Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=118&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1580 results for "openai"(1580ms)

purpleKangarooREADME.md1 match

@willthereader•Updated 10 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>

purpleKangaroomain.tsx8 matches

@willthereader•Updated 10 months ago
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4
5// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
9
38 });
39
40 // Setup the SSE connection and stream back the response. OpenAI handles determining
41 // which message is the correct response based on what was last read from the
42 // thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60 const thread = await openai.beta.threads.create();
61 const assistant = await openai.beta.assistants.create({
62 name: "",
63 instructions:
114app.post("/post-message", async (c) => {
115 let message = await c.req.text();
116 await openai.beta.threads.messages.create(
117 c.req.query("threadId"),
118 { role: "user", content: message },
132 ));
133 };
134 const run = openai.beta.threads.runs.stream(threadId, {
135 assistant_id: assistantId,
136 // Make sure we only display messages we haven't seen yet.

chocolateCanidmain.tsx6 matches

@willthereader•Updated 10 months ago
1/** @jsxImportSource https://esm.sh/react */
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { Hono } from "npm:hono@3";
4import { renderToString } from "npm:react-dom/server";
5
6const openai = new OpenAI();
7
8const jsxResponse = (jsx) => {
62 let thread, assistant;
63 try {
64 thread = await openai.chat.completions.create({
65 model: "gpt-3.5-turbo", // Use a faster model
66 messages: [{ role: "system", content: "Start a new thread" }],
67 });
68 assistant = await openai.chat.completions.create({
69 model: "gpt-3.5-turbo", // Use a faster model
70 messages: [{
128 let message = await c.req.text();
129 try {
130 await openai.chat.completions.create({
131 model: "gpt-3.5-turbo", // Use a faster model
132 messages: [{ role: "user", content: message }],
150 ));
151 };
152 const run = openai.chat.completions.stream({
153 model: "gpt-3.5-turbo", // Use a faster model
154 messages: [{ role: "user", content: "Continue the conversation" }],

chocolateCanidREADME.md1 match

@willthereader•Updated 10 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>

coralCarpREADME.md1 match

@willthereader•Updated 10 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events

coralCarpmain.tsx6 matches

@willthereader•Updated 10 months ago
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.

emeraldRaccoonmain.tsx6 matches

@willthereader•Updated 10 months ago
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.

emeraldRaccoonREADME.md1 match

@willthereader•Updated 10 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events

valTownChatGPTREADME.md1 match

@willthereader•Updated 10 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events

valTownChatGPTmain.tsx6 matches

@willthereader•Updated 10 months ago
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.

testOpenAI1 file match

@stevekrouse•Updated 1 day ago

testOpenAI1 file match

@shouser•Updated 3 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",