2import { cors } from 'npm:hono/cors';
3import { stream, streamSSE } from "https://deno.land/x/hono@v4.3.11/helper.ts";
4import { OpenAI } from "npm:openai";
5import { ai } from "https://esm.town/v/yawnxyz/ai";
6
7
8const app = new Hono();
9const openai = new OpenAI();
10
11app.use('*', cors({
98 let pageResult = "";
99
100 // // 2. Do one OpenAI inference to expand that URL to a longer page description
101 const pageDescriptionStream = await togetherAI.inference("mistralai/Mixtral-8x7B-Instruct-v0.1", {
102 prompt: `
1# Use GPT to generate vals on your account!
2
3Describe the val that you need, call this function, and you'll get a new val on your workspace generated by OpenAI's API!
4
5First, ensure you have a [Val Town API Token](https://www.val.town/settings/api), then call `@andreterron.createGeneratedVal({...})` like this [example](https://www.val.town/v/andreterron.untitled_tomatoKiwi):
2import { cors } from 'npm:hono/cors';
3import { stream } from "https://deno.land/x/hono@v4.3.11/helper.ts";
4import { OpenAI } from "npm:openai";
5
6const app = new Hono();
7const openai = new OpenAI();
8
9app.use('*', cors({
22
23const SOURCE_URL = ""; // leave blank for deno deploy / native
24// const SOURCE_URL = "https://yawnxyz-openAIHonoChatStreamSample.web.val.run"; // valtown as generator - no SSE
25// const SOURCE_URL = "https://funny-crow-81.deno.dev"; // deno deploy as generator
26
31 <head>
32 <meta charset="UTF-8" />
33 <title>OpenAI Streaming Example</title>
34 <style>
35 body {
48 </head>
49 <body>
50 <h1>OpenAI Streaming Example</h1>
51 <label for="prompt">Prompt:</label>
52 <input type="text" id="prompt" value="tell me a joke" />
178 return stream(c, async (stream) => {
179 try {
180 const chatStream = await openai.chat.completions.create({
181 model: "gpt-4",
182 messages: [{ role: "user", content: prompt }],
1import OpenAI from "npm:openai";
2
3const openai = new OpenAI();
4
5export default async (req) => {
22
23 // Generate the AI response
24 const stream = await openai.chat.completions.create({
25 model: "gpt-4o",
26 messages: [{
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
3
4const openai = new OpenAI();
5
6const getCacheKey = (url: string): string => {
90 let pageResult = "";
91
92 // // 2. Do one OpenAI inference to expand that URL to a longer page description
93 const pageDescriptionStream = await openai.chat.completions.create({
94 model: "gpt-4o",
95 messages: [{
127
128 // 3. Generate the page
129 const stream = await openai.chat.completions.create({
130 model: "gpt-4o",
131 messages: [{
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
4
5Migrated from folder: Archive/valTownChatGPT
2Couldn't get streaming to work in valtown myself!!
3
4Migrated from folder: Archive/gettingOpenAiStreamingtoWork
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
3
4const openai = new OpenAI();
5
6const getCacheKey = (url: string): string => {
90 let pageResult = "";
91
92 // // 2. Do one OpenAI inference to expand that URL to a longer page description
93 const pageDescriptionStream = await openai.chat.completions.create({
94 model: "gpt-4o",
95 messages: [{
127
128 // 3. Generate the page
129 const stream = await openai.chat.completions.create({
130 model: "gpt-4o",
131 messages: [{