Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=6&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1566 results for "openai"(790ms)

Projectmain.tsx15 matches

@Get•Updated 2 days ago
6 * Uses 'npm:pdf.js-extract' for direct PDF text extraction.
7 * Serves HTML UI & API endpoint from the same Val.
8 * OpenAI import is dynamically done inside the main server function.
9 *
10 * Based on structure from multi-agent support simulation example.
11 * Assumes 'openai' secret is set in Val Town environment variables.
12 *
13 * Last Updated: 2025-05-01 (Dashboard UI, Localization, Animation Integration)
950export default async function(req: Request) {
951 // --- Dynamic Imports (Inside Handler) ---
952 const { OpenAI } = await import("https://esm.town/v/std/openai");
953 const { z } = await import("npm:zod");
954 const { fetch } = await import("https://esm.town/v/std/fetch");
990 }
991
992 // --- Helper Function: Call OpenAI API (Unchanged) ---
993 async function callOpenAI(
994 openai: OpenAI,
995 systemPrompt: string,
996 userMessage: string,
999 ): Promise<{ role: "assistant" | "system"; content: string | object }> {
1000 try {
1001 const response = await openai.chat.completions.create({
1002 model: model,
1003 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
1011 return { role: "assistant", content: JSON.parse(content) };
1012 } catch (parseError) {
1013 console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
1014 throw new Error(`AI response was not valid JSON. Raw: ${content.substring(0, 150)}...`);
1015 }
1016 } else { return { role: "assistant", content: content }; }
1017 } catch (error) {
1018 console.error(`OpenAI API call failed. ExpectJSON: ${expectJson}. Error:`, error);
1019 let errorMessage = "Error communicating with AI model.";
1020 if (error.message) { errorMessage += ` Details: ${error.message}`; }
1021 // Check for specific error types if needed (e.g., rate limits, auth)
1022 if (error.status === 401) errorMessage = "Authentication error with OpenAI API. Check your secret key.";
1023 if (error.status === 429) errorMessage = "OpenAI API rate limit exceeded. Please try again later.";
1024
1025 return { role: "system", content: errorMessage };
1112 log: LogEntry[],
1113 ): Promise<LogEntry[]> { // Returns the completed log array
1114 const openai = new OpenAI(); // Assumes API key is in environment variables
1115
1116 log.push({ agent: "System", type: "step", message: "Starting analysis workflow." });
1217 // --- 2. Content Analysis ---
1218 log.push({ agent: "System", type: "step", message: "Starting Content Analysis..." });
1219 const analysisResponse = await callOpenAI(openai, contentAnalysisSystemPrompt, truncatedText, "gpt-4o", true); // Use gpt-4o
1220 let analysisResult: AnalysisResult | null = null;
1221 if (analysisResponse.role === "assistant" && typeof analysisResponse.content === "object") {
1237 // --- 3. Citation Extraction ---
1238 log.push({ agent: "System", type: "step", message: "Starting Citation Extraction..." });
1239 const citationResponse = await callOpenAI(openai, citationExtractionSystemPrompt, truncatedText, "gpt-4o", true); // Use gpt-4o
1240 let extractedCitations: Citation[] = [];
1241 if (
1267 message: \`AI response was not the expected JSON format. Received: \${JSON.stringify(citationResponse.content).substring(0, 200)}...\`,
1268 });
1269 } else { // System error from callOpenAI
1270 log.push({ agent: "Citation Extraction Agent", type: "error", message: citationResponse.content });
1271 }

myanythingmain.tsx3 matches

@yassinreg•Updated 2 days ago
125 if (request.method === 'POST') {
126 try {
127 const { OpenAI } = await import("https://esm.town/v/std/openai");
128 const openai = new OpenAI();
129
130 const formData = await request.formData();
138 const cvText = new TextDecoder().decode(cvBuffer);
139
140 const completion = await openai.chat.completions.create({
141 messages: [
142 {

myeverythingmain.tsx3 matches

@yassinreg•Updated 2 days ago
125 if (request.method === "POST") {
126 try {
127 const { OpenAI } = await import("https://esm.town/v/std/openai");
128 const openai = new OpenAI();
129
130 const formData = await request.formData();
138 const cvText = new TextDecoder().decode(cvBuffer);
139
140 const completion = await openai.chat.completions.create({
141 messages: [
142 {

feedbackmain.tsx3 matches

@yassinreg•Updated 2 days ago
125 if (request.method === "POST") {
126 try {
127 const { OpenAI } = await import("https://esm.town/v/std/openai");
128 const openai = new OpenAI();
129
130 const formData = await request.formData();
138 const cvText = new TextDecoder().decode(cvBuffer);
139
140 const completion = await openai.chat.completions.create({
141 messages: [
142 {

speechmain.tsx5 matches

@salon•Updated 3 days ago
197 const [isInitialized, setIsInitialized] = useState(false);
198
199 // --- OpenAI Interaction (Unchanged) ---
200 // !!! IMPORTANT SECURITY WARNING & Val Town Note !!!
201 // ... (same as before)
205 if (!text) return;
206 setStatus("Sending to AI...");
207 console.log("Sending to OpenAI:", text);
208 setAiResponse(""); // Clear previous response
209 setError(null);
213 try {
214 // ---- START: Replace this block in Val Town ----
215 const response = await fetch("https://api.openai.com/v1/chat/completions", {
216 method: "POST",
217 headers: {
226 const errorData = await response.json().catch(() => ({})); // Try to get JSON error details
227 throw new Error(
228 `OpenAI API Error: ${response.status} ${response.statusText} - ${
229 errorData?.error?.message ?? "Check API key or usage limits."
230 }`,
240 speakText(reply);
241 } catch (err: any) {
242 console.error("OpenAI call failed:", err);
243 const errMsg = `AI Error: ${err.message}`;
244 setError(errMsg);

FixItWandgenerate.ts6 matches

@wolf•Updated 3 days ago
1import OpenAI from "https://esm.sh/openai@4.96.0";
2import { search } from "./locations/mod.ts";
3
4const openai = new OpenAI();
5
6/**
39 const audioFile = new File([audioBlob], "a.mp3", { type: "audio/mp3" });
40
41 transcription = await openai.audio.transcriptions.create({
42 file: audioFile,
43 model: "whisper-1",
50 if (transcription) {
51 // Detect possible location references in the transcription
52 const locationDetectionResponse = await openai.chat.completions.create({
53 model: "gpt-4o-mini",
54 messages: [
149 });
150
151 const chatResponse = await openai.chat.completions.create({
152 model: "gpt-4o",
153 messages: messages as any, // Type assertion to fix any TypeScript issues
166`;
167
168 const subjectResponse = await openai.chat.completions.create({
169 model: "gpt-4o-mini",
170 messages: [

OpenTownie_jacksonsystem_prompt.txt4 matches

@stevekrouse•Updated 3 days ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

guidemain.tsx17 matches

@salon•Updated 3 days ago
1// Val Town Script: Dynamic Character Race Carousel with OpenAI + Plane Tilt + Fanning Cards + Animated Borders
2
3// =============================================================================
5// =============================================================================
6
7// Import OpenAI library from Val Town's standard modules
8import { OpenAI } from "https://esm.town/v/std/openai"; // Ensure 'openai' secret is set
9
10// --- Configuration ---
21 borderAnimationHint?: string; // <<< NEW: Optional hint for border style
22}
23interface OpenAIResponse {
24 races: RaceInfo[];
25}
65];
66
67// --- OpenAI Generation Function ---
68async function generateRaceDataWithOpenAI(): Promise<RaceInfo[]> {
69 const openai = new OpenAI();
70 const numToRequest = Math.max(1, NUM_CARDS_TO_GENERATE);
71 const prompt = `
81
82 try {
83 console.info(`Requesting ${numToRequest} race data generation from OpenAI...`);
84 const completion = await openai.chat.completions.create({
85 model: "gpt-4o", // Or your preferred model
86 messages: [{ role: "user", content: prompt }],
90
91 const rawContent = completion.choices[0]?.message?.content;
92 if (!rawContent) throw new Error("OpenAI returned an empty response message.");
93 console.info("Received response from OpenAI. Parsing and validating JSON...");
94 const parsedJson = JSON.parse(rawContent);
95
109 )
110 ) {
111 console.warn(`OpenAI response JSON failed validation for ${numToRequest} races:`, parsedJson);
112 throw new Error("OpenAI response JSON structure, count, data types, or hint value invalid.");
113 }
114
115 const generatedData = (parsedJson as OpenAIResponse).races.map(race => ({
116 ...race,
117 // Ensure borderAnimationHint defaults to 'none' if missing from response
119 }));
120
121 console.info(`Successfully generated and validated ${generatedData.length} races from OpenAI.`);
122 return generatedData;
123 } catch (error) {
124 console.error("Error fetching or processing data from OpenAI:", error);
125 console.warn("Using fallback race data due to the error.");
126 // Ensure fallback data also has the hint, slice correctly
134// --- Main HTTP Handler (Val Town Entry Point) ---
135export default async function server(request: Request): Promise<Response> {
136 const activeRaceData = await generateRaceDataWithOpenAI();
137
138 // Define CSS Styles

policy2main.tsx13 matches

@salon•Updated 3 days ago
7 * Serves HTML UI & API endpoint from the same Val.
8 * Based on user-provided glassmorphism UI example.
9 * Assumes 'openai' secret is set in Val Town environment variables.
10 *
11 * Last Updated: 2025-05-01 (Reference Relevance & Paradigmatic Analysis Integration)
916export default async function(req: Request) {
917 // --- Dynamic Imports (Inside Handler) ---
918 const { OpenAI } = await import("https://esm.town/v/std/openai");
919 const { z } = await import("npm:zod");
920 const { fetch } = await import("https://esm.town/v/std/fetch");
944 }
945
946 // --- Helper Function: Call OpenAI API (Unchanged) ---
947 async function callOpenAI(
948 openai: OpenAI,
949 systemPrompt: string,
950 userMessage: string,
953 ): Promise<{ role: "assistant" | "system"; content: string | object }> {
954 try {
955 const response = await openai.chat.completions.create({
956 model,
957 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
971 else { return { role: "assistant", content }; }
972 } catch (error) {
973 console.error(`OpenAI call failed (ExpectJSON: ${expectJson}):`, error);
974 let msg = "Error communicating with AI.";
975 if (error.message) msg += ` Details: ${error.message}`;
976 if (error.status === 401) msg = "OpenAI Auth Error.";
977 if (error.status === 429) msg = "OpenAI Rate Limit Exceeded.";
978 return { role: "system", content: msg };
979 }
1042 log: LogEntry[],
1043 ): Promise<LogEntry[]> {
1044 const openai = new OpenAI();
1045 log.push({ agent: "System", type: "step", message: "Workflow started." });
1046 log.push({
1115 // --- Content Analysis (Unchanged) ---
1116 log.push({ agent: "System", type: "step", message: "Analyzing content..." });
1117 const anaRes = await callOpenAI(openai, contentAnalysisSystemPrompt, truncText, "gpt-4o", true);
1118 if (anaRes.role === "assistant" && anaRes.content && (anaRes.content as AnalysisResult).summary) { // Type assertion for check
1119 log.push({ agent: "Content Analysis Agent", type: "result", message: anaRes.content });
1124 // --- NEW: Paradigmatic Analysis ---
1125 log.push({ agent: "System", type: "step", message: "Analyzing document context/paradigm..." });
1126 const paradigmRes = await callOpenAI(openai, paradigmaticAnalysisSystemPrompt, truncText, "gpt-4o", true);
1127 if (
1128 paradigmRes.role === "assistant" && paradigmRes.content && (paradigmRes.content as ParadigmaticInfo).documentType
1135 // --- MODIFIED: Reference Extraction ---
1136 log.push({ agent: "System", type: "step", message: "Extracting references..." });
1137 const refRes = await callOpenAI(openai, referenceExtractionSystemPrompt, truncText, "gpt-4o", true); // Use updated prompt
1138 let extRef: Reference[] = []; // Use Reference type
1139 // Check the correct key 'references' from the prompt

Pathwaymain.tsx15 matches

@Get•Updated 3 days ago
2 * Multi-Agent Policy Document Analysis (Single Val Version with PDF Upload & Dashboard Style)
3 * Demonstrates document ingestion (URL, Text, PDF Upload), content analysis,
4 * citation extraction, and basic reference traversal using collaborative AI agents via OpenAI.
5 * Uses 'npm:pdf.js-extract' for direct PDF text extraction within the Val (experimental).
6 * Serves an HTML UI with a dashboard grid background and handles API requests within the same Val.
7 * OpenAI import is dynamically done inside the main server function.
8 *
9 * Based on structure from multi-agent support simulation example.
10 * Assumes 'openai' secret is set in Val Town environment variables.
11 *
12 * Last Updated: 2025-05-01 (Added PDF upload, dashboard style)
171 <p class="description">
172 Enter a document URL, paste text, or upload a PDF below. AI agents will analyze content, extract citations, and attempt reference traversal.<br>
173 Uses OpenAI via Val Town & <code>npm:pdf.js-extract</code> for PDFs. Current Date: ${
174 new Date().toLocaleDateString()
175 }
339export default async function(req: Request) {
340 // --- Dynamic Imports (Inside Handler) ---
341 const { OpenAI } = await import("https://esm.town/v/std/openai");
342 const { z } = await import("npm:zod");
343 const { fetch } = await import("https://esm.town/v/std/fetch");
397 }
398
399 // --- Helper Function: Call OpenAI API (Unchanged) ---
400 async function callOpenAI(
401 openai: OpenAI,
402 systemPrompt: string,
403 userMessage: string,
406 ): Promise<{ role: "assistant" | "system"; content: string | object }> {
407 try {
408 const response = await openai.chat.completions.create({
409 model: model,
410 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
418 return { role: "assistant", content: JSON.parse(content) };
419 } catch (parseError) {
420 console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
421 throw new Error(`AI response was not valid JSON. Raw: ${content.substring(0, 150)}...`);
422 }
423 } else { return { role: "assistant", content: content }; }
424 } catch (error) {
425 console.error(`OpenAI API call failed. ExpectJSON: ${expectJson}. Error:`, error);
426 let errorMessage = "Error communicating with AI model.";
427 if (error.message) { errorMessage += ` Details: ${error.message}`; }
486 log: LogEntry[],
487 ): Promise<LogEntry[]> {
488 const openai = new OpenAI();
489
490 log.push({ agent: "System", type: "step", message: "Starting analysis workflow." });
554
555 // Limit text length
556 const MAX_TEXT_LENGTH = 20000; // Consider limits for OpenAI context window
557 const truncatedText = documentText.substring(0, MAX_TEXT_LENGTH);
558 if (documentText.length > MAX_TEXT_LENGTH) {
566 // --- Steps 2, 3, 4, 5 (Analysis, Extraction, Traversal, Final Output) ---
567 log.push({ agent: "System", type: "step", message: "Starting Content Analysis..." });
568 const analysisResponse = await callOpenAI(openai, contentAnalysisSystemPrompt, truncatedText, "gpt-4o", true);
569 let analysisResult: AnalysisResult | null = null;
570 if (analysisResponse.role === "assistant" && typeof analysisResponse.content === "object") {
575
576 log.push({ agent: "System", type: "step", message: "Starting Citation Extraction..." });
577 const citationResponse = await callOpenAI(openai, citationExtractionSystemPrompt, truncatedText, "gpt-4o", true);
578 let extractedCitations: Citation[] = [];
579 if (

testOpenAI1 file match

@shouser•Updated 1 day ago

testOpenAI1 file match

@stevekrouse•Updated 1 day ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",