6import ValTown from "npm:@valtown/sdk";
7import { Hono } from "npm:hono@3";
8import OpenAI from "npm:openai";
9import { renderToString } from "npm:react-dom/server";
10
170 );
171
172 const openai = new OpenAI();
173 const stream = await openai.chat.completions.create({
174 model,
175 stream: true,
1# Val Town AI Readme Writer
2
3This val provides a class `ReadmeWriter` for generating readmes for vals with OpenAI. It can both draft readmes and update them directly
4
5PRs welcome! See **Todos** below for some ideas I have.
43
44- `model` (optional): The model to be used for generating the readme. Defaults to "gpt-3.5-turbo".
45- `apiKey` (optional): An OpenAI API key. Defaults to `Deno.env.get("OPENAI_API_KEY")`.
46
47#### Methods
63
64## Todos
65- [ ] Additional options to pass to the OpenAI model
66- [ ] Ability to pass more instructions to the prompt to modify how the readme is constructed
1import { type WriterOptions } from "https://esm.town/v/nbbaier/WriterOptions";
2import { fetch } from "https://esm.town/v/std/fetch?v=4";
3import OpenAI, { type ClientOptions } from "npm:openai";
4
5export class ReadmeWriter {
6 model: string;
7 openai: OpenAI;
8 apiKey: string;
9 valtownKey: string;
10
11 constructor(options: WriterOptions) {
12 const { model, ...openaiOptions } = options;
13 this.model = model ? model : "gpt-3.5-turbo";
14 this.openai = new OpenAI(openaiOptions);
15 this.valtownKey = Deno.env.get("valtown");
16 }
47 }
48
49 private async performOpenAICall(prompt: string) {
50 try {
51 const response = await this.openai.chat.completions.create({
52 messages: [{ role: "system", content: prompt }],
53 model: this.model,
55
56 if (!response.choices || response.choices.length === 0) {
57 throw new Error("No response from OpenAI");
58 }
59
61
62 if (!readme) {
63 throw new Error("No readme returned by OpenAI. Try again.");
64 }
65
92 const { id, code } = await this.getVal(username, valName);
93 const prompt = this.createPrompt(code, userPrompt);
94 const readme = await this.performOpenAICall(prompt);
95 return { id, readme };
96 }
174 },
175 {
176 prompt: "Write a val that uses OpenAI",
177 code: `import { OpenAI } from "https://esm.town/v/std/openai";
178
179 const openai = new OpenAI();
180 const completion = await openai.chat.completions.create({
181 "messages": [
182 { "role": "user", "content": "Say hello in a creative way" },
3import { basicAuth } from "https://esm.town/v/pomdtr/basicAuth?v=62";
4import { fetchText } from "https://esm.town/v/stevekrouse/fetchText";
5import { chat } from "https://esm.town/v/stevekrouse/openai";
6import cronstrue from "npm:cronstrue";
7import { Hono } from "npm:hono@3";
117 await email({ subject: "Subject line", text: "Body of message" });
118
119 // OpenAI
120 import { OpenAI } from "https://esm.town/v/std/openai";
121 const openai = new OpenAI();
122 const completion = await openai.chat.completions.create({
123 messages: [
124 { role: "user", content: "Say hello in a creative way" },
126. Uses EPA's ranking to classify the severity of the score (ie "Unhealthy for Sensitive Groups")
13
14It uses blob storage to cache the openai location id for your location string to skip a couple steps for the next time.
15
16## Example usage
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3const openai = new OpenAI();
4
5/**
58
59/**
60 * Gets the response from the OpenAI language model.
61 * @param {string} prompt - The prompt for the language model.
62 * @returns {Promise<string>} - The response from the language model.
63 */
64async function getLlmResponse(prompt: string) {
65 const completion = await openai.chat.completions.create({
66 "messages": [
67 { "role": "user", "content": prompt },
1# openAiProxy
2
3## Overview
4
5This val is a proxy server that interacts with the OpenAI API to generate responses based on prompts in the request body. The function handles incoming HTTP POST requests, processes the prompt, and returns a response generated by the LLM.
6
7## Prerequisites
8
9- Server-side: (Optional) An active OpenAI API key
10- Client-side: Something that can make POST requests (browser code, Postman, cURL, another Val, etc)
11
25
26```sh
27curl -X POST https://ashryanio-openaiproxy.web.val.run -H "Content-Type: application/json" -d '{"prompt": "Hello, OpenAI!"}'
28```
29
31
32- **Content-Type**: application/json
33- **Body**: JSON object containing the response from the OpenAI language model.
34
35#### Example Response
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4
5// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
9
38 });
39
40 // Setup the SSE connection and stream back the response. OpenAI handles determining
41 // which message is the correct response based on what was last read from the
42 // thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60 const thread = await openai.beta.threads.create();
61 const assistant = await openai.beta.assistants.create({
62 name: "",
63 instructions:
114app.post("/post-message", async (c) => {
115 let message = await c.req.text();
116 await openai.beta.threads.messages.create(
117 c.req.query("threadId"),
118 { role: "user", content: message },
132 ));
133 };
134 const run = openai.beta.threads.runs.stream(threadId, {
135 assistant_id: assistantId,
136 // Make sure we only display messages we haven't seen yet.