Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Burl%7D?q=openai&page=124&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1602 results for "openai"(1411ms)

gettingOpenAiStreamingtoWorkmain.tsx3 matches

@yawnxyz•Updated 11 months ago
1import OpenAI from "npm:openai";
2
3const openai = new OpenAI();
4
5export default async (req) => {
22
23 // Generate the AI response
24 const stream = await openai.chat.completions.create({
25 model: "gpt-4o",
26 messages: [{

valTownChatGPTmain.tsx6 matches

@stevekrouse•Updated 11 months ago
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.

webgenmain.tsx5 matches

@triptych•Updated 11 months ago
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
3
4const openai = new OpenAI();
5
6const getCacheKey = (url: string): string => {
90 let pageResult = "";
91
92 // // 2. Do one OpenAI inference to expand that URL to a longer page description
93 const pageDescriptionStream = await openai.chat.completions.create({
94 model: "gpt-4o",
95 messages: [{
127
128 // 3. Generate the page
129 const stream = await openai.chat.completions.create({
130 model: "gpt-4o",
131 messages: [{

valTownChatGPTREADME.md1 match

@stevekrouse•Updated 11 months ago
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
4
5Migrated from folder: Archive/valTownChatGPT

gettingOpenAiStreamingtoWorkREADME.md1 match

@yawnxyz•Updated 11 months ago
2Couldn't get streaming to work in valtown myself!!
3
4Migrated from folder: Archive/gettingOpenAiStreamingtoWork

webgenmain.tsx5 matches

@yawnxyz•Updated 11 months ago
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
3
4const openai = new OpenAI();
5
6const getCacheKey = (url: string): string => {
90 let pageResult = "";
91
92 // // 2. Do one OpenAI inference to expand that URL to a longer page description
93 const pageDescriptionStream = await openai.chat.completions.create({
94 model: "gpt-4o",
95 messages: [{
127
128 // 3. Generate the page
129 const stream = await openai.chat.completions.create({
130 model: "gpt-4o",
131 messages: [{

openAIStreamingmain.tsx6 matches

@maxm•Updated 11 months ago
1import OpenAI from "npm:openai";
2const openai = new OpenAI();
3import process from "node:process";
4
5// Define our assistant.
6const assistant = await openai.beta.assistants.create({
7 name: "Val Tutor",
8 instructions: `You are a personal Val tutor.
14
15// Create a thread to chat in.
16const thread = await openai.beta.threads.create();
17
18// These are the messages we'll send to the assistant.
44 }, 100);
45
46 const message = await openai.beta.threads.messages.create(
47 thread.id,
48 { role: "user", content: messages[i] },
49 );
50
51 const run = openai.beta.threads.runs.stream(thread.id, {
52 assistant_id: assistant.id,
53 // Make sure we only display messages we haven't seen yet.

webgenmain.tsx5 matches

@thesephist•Updated 11 months ago
1import { blob } from "https://esm.town/v/std/blob?v=12";
2import OpenAI from "npm:openai";
3
4const openai = new OpenAI();
5
6const getCacheKey = (url: string): string => {
90 let pageResult = "";
91
92 // // 2. Do one OpenAI inference to expand that URL to a longer page description
93 const pageDescriptionStream = await openai.chat.completions.create({
94 model: "gpt-4o",
95 messages: [{
127
128 // 3. Generate the page
129 const stream = await openai.chat.completions.create({
130 model: "gpt-4o",
131 messages: [{

openaistreaminghtmlREADME.md1 match

@stevekrouse•Updated 11 months ago
1
2
3Migrated from folder: Archive/openaistreaminghtml

openaiStreamingDemoREADME.md1 match

@stevekrouse•Updated 11 months ago
1null
2
3Migrated from folder: Archive/openaiStreamingDemo

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 23 hours ago

testOpenAI1 file match

@stevekrouse•Updated 2 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",