1/** @jsxImportSource https://esm.sh/react */
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { Hono } from "npm:hono@3";
4import { renderToString } from "npm:react-dom/server";
5
6const openai = new OpenAI();
7
8const jsxResponse = (jsx) => {
62 let thread, assistant;
63 try {
64 thread = await openai.chat.completions.create({
65 model: "gpt-3.5-turbo", // Use a faster model
66 messages: [{ role: "system", content: "Start a new thread" }],
67 });
68 assistant = await openai.chat.completions.create({
69 model: "gpt-3.5-turbo", // Use a faster model
70 messages: [{
128 let message = await c.req.text();
129 try {
130 await openai.chat.completions.create({
131 model: "gpt-3.5-turbo", // Use a faster model
132 messages: [{ role: "user", content: message }],
150 ));
151 };
152 const run = openai.chat.completions.stream({
153 model: "gpt-3.5-turbo", // Use a faster model
154 messages: [{ role: "user", content: "Continue the conversation" }],
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.
61 prompt = prompt || "Summarize this article in 30 words, focusing on the primary author. Do not respond with JSON or XML or stage instructions. Only reply in text. Text:"
62 let result = await ai({
63 provider: "openai",
64 model: "gpt-3.5-turbo",
65 prompt: prompt + content
71 prompt = prompt || "Return a comma separated list of 3-4 tags that broadly describes the text. Do not repeat the given text. Just return text and commas, no other symbols. Text to categorize:"
72 let result = await ai({
73 provider: "openai",
74 model: "gpt-3.5-turbo",
75 prompt: prompt + content
80export const getEmbeddingsFn = async (content) => {
81 let result = await ai({
82 provider: "openai",
83 embed:true,
84 value: content
1import { openai } from "npm:@ai-sdk/openai";
2import { StreamingTextResponse, streamText } from "npm:ai";
3
4export default async function(req: Request): Promise<Response> {
5 const result = await streamText({
6 model: openai("gpt-4o"),
7 prompt: "Generate a fast recipe for Lasagna.",
8 });