1import { OpenAI } from "https://esm.town/v/std/openai?v=5"
2import { sqlite } from "https://esm.town/v/std/sqlite?v=6"
3import { Chess } from "npm:chess.js"
131 return c.html(`<body>
132 <div class='p-4'>
133 <h2 class='font-bold'>OpenAI Chess</h2>
134 <p class='pb-4'>Play chess against ChatGPT-4</p>
135 <div id="myBoard" style="width: 400px"></div>
171 chess.move(san)
172
173 const openai = new OpenAI()
174
175 let messages = []
182 args: [c.req.param().id, `Requesting response to ${san}`],
183 })
184 const completion = await openai.chat.completions.create({
185 messages: [
186 {
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4
5// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
9
38 });
39
40 // Setup the SSE connection and stream back the response. OpenAI handles determining
41 // which message is the correct response based on what was last read from the
42 // thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60 const thread = await openai.beta.threads.create();
61 const assistant = await openai.beta.assistants.create({
62 name: "",
63 instructions:
114app.post("/post-message", async (c) => {
115 let message = await c.req.text();
116 await openai.beta.threads.messages.create(
117 c.req.query("threadId"),
118 { role: "user", content: message },
132 ));
133 };
134 const run = openai.beta.threads.runs.stream(threadId, {
135 assistant_id: assistantId,
136 // Make sure we only display messages we haven't seen yet.
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
5import { socialDataSearch, Tweet } from "https://esm.town/v/stevekrouse/socialDataSearch";
11 .join(" OR ") + " " + excludes;
12
13const openai = new OpenAI();
14
15const RelevanceSchema = z.object({
29
30 try {
31 const completion = await openai.beta.chat.completions.parse({
32 model: "gpt-4o-mini",
33 messages: [
42 return completion.choices[0].message.parsed;
43 } catch (error) {
44 console.error("Error parsing OpenAI response:", error);
45 return { isRelevant: false, confidence: 0, reason: "Error in processing" };
46 }
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
5import { socialDataSearch, Tweet } from "https://esm.town/v/stevekrouse/socialDataSearch";
11 .join(" OR ") + " " + excludes;
12
13const openai = new OpenAI();
14
15const RelevanceSchema = z.object({
29
30 try {
31 const completion = await openai.beta.chat.completions.parse({
32 model: "gpt-4o-mini",
33 messages: [
42 return completion.choices[0].message.parsed;
43 } catch (error) {
44 console.error("Error parsing OpenAI response:", error);
45 return { isRelevant: false, confidence: 0, reason: "Error in processing" };
46 }
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "brooklyn ny";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",
235 const { intervieweeResponse, interviewPosition } = await request.json();
236
237 const { OpenAI } = await import("https://esm.town/v/std/openai");
238 const openai = new OpenAI();
239
240 const completion = await openai.chat.completions.create({
241 messages: [
242 {
228 const webpageText = await webpageResponse.text();
229
230 // Use OpenAI as Claude proxy
231 const { OpenAI } = await import("https://esm.town/v/std/openai");
232 const openai = new OpenAI();
233
234 const completion = await openai.chat.completions.create({
235 messages: [
236 {
339 const { question } = await request.json();
340
341 const { OpenAI } = await import("https://esm.town/v/std/openai");
342 const openai = new OpenAI();
343
344 const completion = await openai.chat.completions.create({
345 messages: [
346 {
1import { OpenAI } from "https://esm.sh/openai";
2import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
3import { z } from "https://esm.sh/zod";
4import { zip } from "https://esm.town/v/pomdtr/sql";
42}, { concurrency: 3 });
43
44const openai = new OpenAI();
45
46const ValDescriptions = z.object({
50
51async function getDescriptions(val) {
52 const completion = await openai.beta.chat.completions.parse({
53 model: "gpt-4o-mini",
54 messages: [