tomatoMinnowREADME.md1 match
2Use the Vercel AI SDK in your Vals.
34**Note**: you must add your OpenAI key to your Val Town [Env variables](https://www.val.town/settings/environment-variables) under `OPENAI_API_KEY`. If you would like to specify a different name for your API Key, you can [create a custom OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai#provider-instance) with the `createOpenAI` function.
56Prefer another AI provider? Use [any supported provider](https://sdk.vercel.ai/providers/ai-sdk-providers) by changing just two lines of code!
tealBadgermain.tsx3 matches
1import { OpenAI } from "https://esm.town/v/std/openai";
2export default async function(req: Request): Promise<Response> {
3const openai = new OpenAI();
4const stream = await openai.chat.completions.create({
5stream: true,
6messages: [{ role: "user", content: "Write a poem in the style of beowulf about the DMV" }],
openAIStreamingExamplemain.tsx3 matches
1import { OpenAI } from "https://esm.town/v/std/openai";
23export default async function(req: Request): Promise<Response> {
4const openai = new OpenAI();
5const stream = await openai.chat.completions.create({
6stream: true,
7messages: [{
1Migrated from folder: Archive/openAIStreamingExample
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
34const openai = new OpenAI();
56const getCacheKey = (url: string): string => {
90let pageResult = "";
9192// // 2. Do one OpenAI inference to expand that URL to a longer page description
93const pageDescriptionStream = await openai.chat.completions.create({
94model: "gpt-4o",
95messages: [{
127128// 3. Generate the page
129const stream = await openai.chat.completions.create({
130model: "gpt-4o",
131messages: [{
convertToResumeJSONmain.tsx1 match
8}
910const endpoint = 'https://api.openai.com/v1/chat/completions';
11const model = 'gpt-4';
12
valTownChatGPTREADME.md1 match
1# ChatGPT Implemented in Val Town
23Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
45<p align=center>
valTownChatGPTmain.tsx8 matches
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
45// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
938});
3940// Setup the SSE connection and stream back the response. OpenAI handles determining
41// which message is the correct response based on what was last read from the
42// thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60const thread = await openai.beta.threads.create();
61const assistant = await openai.beta.assistants.create({
62name: "",
63instructions:
114app.post("/post-message", async (c) => {
115let message = await c.req.text();
116await openai.beta.threads.messages.create(
117c.req.query("threadId"),
118{ role: "user", content: message },
132));
133};
134const run = openai.beta.threads.runs.stream(threadId, {
135assistant_id: assistantId,
136// Make sure we only display messages we haven't seen yet.
openAIStreamingREADME.md2 matches
1# OpenAI Streaming - Assistant and Threads
23An example of using OpenAI to stream back a chat with an assistant. This example sends two messages to the assistant and streams back the responses when they come in.
45Example response:
openAIStreamingmain.tsx6 matches
1import OpenAI from "npm:openai";
2const openai = new OpenAI();
3import process from "node:process";
45// Define our assistant.
6const assistant = await openai.beta.assistants.create({
7name: "Val Tutor",
8instructions: `You are a personal Val tutor.
1415// Create a thread to chat in.
16const thread = await openai.beta.threads.create();
1718// These are the messages we'll send to the assistant.
44}, 100);
4546const message = await openai.beta.threads.messages.create(
47thread.id,
48{ role: "user", content: messages[i] },
49);
5051const run = openai.beta.threads.runs.stream(thread.id, {
52assistant_id: assistant.id,
53// Make sure we only display messages we haven't seen yet.