Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=56&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1621 results for "openai"(505ms)

discreetOrangeAntelopemain.tsx7 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import ValTown from "npm:@valtown/sdk";
3import { generateObject, generateText } from "npm:ai";
70 const generateImplStart = Date.now();
71 const { text: implementation } = await generateText({
72 model: openai("gpt-4o"),
73 system: `
74# VALTOWN HTTP Cloud Function Generator Prompt
965. **AI API Integration (if applicable)**:
97 - If the toolDescription indicates that creative enrichment or additional processing via AI is needed, integrate an AI API call into the function.
98 - Import { openai } from "npm:@ai-sdk/openai" and { generateText } from "npm:ai".
99 - Use the model "gpt-4o-mini".
100 - Construct a detailed, context-appropriate prompt for the AI call and integrate its response into the function’s output.
122
123---typescript
124import { openai } from "npm:@ai-sdk/openai";
125import { generateText } from "npm:ai";
126
135 }
136 const { text: analysis } = await generateText({
137 model: openai("gpt-4o-mini"),
138 system: "You are a creative code analyst.",
139 prompt: "Analyze the following recent changes in the code: " + body.recent_changes,
233
234 const { object: manifestoConfig } = await generateObject({
235 model: openai("gpt-4o"),
236 schema: z.object({
237 manifestoConfigPureJson: z.string(),
293
294 const { object: updatedManifestoConfigTypescript } = await generateObject({
295 model: openai("gpt-4o"),
296 system: promptForUpdatedManifest,
297 schema: z.object({

harmoniousPlumTapirmain.tsx1 match

@davincidreams•Updated 1 month ago
6 "description": "A sample blah manifest demonstrating various tool types and configurations.",
7 "env": {
8 "OPENAI_API_KEY": "your-openai-api-key-here",
9 },
10 "tools": [

get_country_factmain.tsx2 matches

@ajax•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: fact } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are an expert in world trivia.",
16 prompt: `Provide an interesting and fun fact about the country: ${body.country}.`,

get_recipes_by_countrymain.tsx2 matches

@ajax•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
13 const countriesList = body.countries.join(", ");
14 const { text: recipes } = await generateText({
15 model: openai("gpt-4o-mini"),
16 system: "You are a culinary expert.",
17 prompt: `Provide a list of popular recipes from the following countries: ${countriesList}.`,

fallacyDetectorAImain.tsx7 matches

@davincidreams•Updated 1 month ago
42];
43
44// Mock fallacy detection for when OpenAI fails
45function mockFallacyDetection(text) {
46 const lowercaseText = text.toLowerCase();
95
96 try {
97 // Dynamically import OpenAI with error handling
98 const openAIModule = await import("https://esm.town/v/std/openai").catch(err => {
99 console.error("Failed to import OpenAI module:", err);
100 throw new Error("Could not load AI analysis module");
101 });
102
103 const OpenAI = openAIModule.OpenAI;
104 const openai = new OpenAI();
105
106 const response = await openai.chat.completions.create({
107 model: "gpt-4o-mini",
108 messages: [

audioAIChatmain.tsx5 matches

@davincidreams•Updated 1 month ago
136export default async function server(request: Request): Promise<Response> {
137 if (request.method === 'POST') {
138 const { OpenAI } = await import("https://esm.town/v/std/openai");
139 const openai = new OpenAI();
140
141 // Parse multipart form data
148
149 // Transcribe audio
150 const transcriptionResponse = await openai.audio.transcriptions.create({
151 file: base64Audio,
152 model: "whisper-1",
157
158 // Generate AI response
159 const chatCompletion = await openai.chat.completions.create({
160 messages: [
161 {
175
176 // Generate audio response
177 const speechResponse = await openai.audio.speech.create({
178 model: "tts-1",
179 voice: "nova",

avatarGeneratormain.tsx3 matches

@davincidreams•Updated 1 month ago
82export default async function server(request: Request): Promise<Response> {
83 if (request.method === 'POST') {
84 const { OpenAI } = await import("https://esm.town/v/std/openai");
85 const openai = new OpenAI();
86
87 const { theme } = await request.json();
88
89 const completion = await openai.chat.completions.create({
90 messages: [
91 {

vtuberAppmain.tsx3 matches

@davincidreams•Updated 1 month ago
75export default async function server(request: Request): Promise<Response> {
76 if (request.method === 'POST') {
77 const { OpenAI } = await import("https://esm.town/v/std/openai");
78 const openai = new OpenAI();
79
80 const { theme } = await request.json();
81
82 const completion = await openai.chat.completions.create({
83 messages: [
84 {

MEDIANALIZE_PROMedicalreport.tsx5 matches

@UndiscoverableERA•Updated 1 month ago
224export default async function server(request: Request): Promise<Response> {
225 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
226 const { OpenAI } = await import("https://esm.town/v/std/openai");
227 const openai = new OpenAI();
228
229 const body = await request.json();
234
235 try {
236 const completion = await openai.chat.completions.create({
237 messages: [
238 {
265 });
266 } catch (error) {
267 console.error('OpenAI Analysis Error:', error);
268 return new Response(JSON.stringify({
269 diagnosis: 'Unable to generate analysis',
298
299function parseAnalysis(analysis: string) {
300 // Basic parsing of OpenAI response
301 const sections = analysis.split('\n\n');
302 return {

MEDIANALIZE_PROhealthcare.tsx14 matches

@UndiscoverableERA•Updated 1 month ago
222 }
223
224 // Dynamically import OpenAI with error handling
225 let OpenAI;
226 try {
227 const module = await import("https://esm.town/v/std/openai");
228 OpenAI = module.OpenAI;
229 } catch (importError) {
230 console.error("OpenAI Import Error:", importError);
231 return new Response(
232 JSON.stringify({
233 error: "Failed to import OpenAI module",
234 details: String(importError),
235 }),
244 }
245
246 // Ensure OpenAI is available
247 if (!OpenAI) {
248 return new Response(
249 JSON.stringify({
250 error: "OpenAI module not found",
251 }),
252 {
260 }
261
262 // Create OpenAI instance
263 const openai = new OpenAI();
264
265 // Create OpenAI completion with comprehensive error handling
266 let completion;
267 try {
268 completion = await openai.chat.completions.create({
269 messages: body.messages,
270 model: "gpt-4o-mini",
272 });
273 } catch (completionError) {
274 console.error("OpenAI Completion Error:", completionError);
275 return new Response(
276 JSON.stringify({

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 4 days ago

testOpenAI1 file match

@stevekrouse•Updated 6 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",