Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=openai&page=56&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1632 results for "openai"(369ms)

researchAgentemailHandler2 matches

@xdstudiosmedia•Updated 1 month ago
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",

blahmain.tsx1 match

@davincidreams•Updated 1 month ago
6 "description": "A sample blah manifest demonstrating various tool types and configurations.",
7 "env": {
8 "OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
9 },
10 "tools": [

townieBearindex.ts44 matches

@charmaine•Updated 1 month ago
2 * This is a Townie clone for Val Town.
3 * It creates a chat interface that helps users generate Val Town applications.
4 * Uses OpenAI's API to generate code responses in the proper format.
5 */
6/** @jsxImportSource https://esm.sh/react@18.2.0 */
91If you are changing a SQLite table's schema, you should also change the table's name so it creates a fresh table, ie by adding _2 or _3 after it everywhere. Ensure that tables are created before they are used.
92
93OPENAI
94
95Val Town includes a free, proxied OpenAI:
96
97import { OpenAI } from "https://esm.town/v/std/openai";
98const openai = new OpenAI();
99const completion = await openai.chat.completions.create({
100 messages: [
101 { role: "user", content: "Say hello in a creative way" },
176): Promise<string> {
177 try {
178 // Try using the proxied OpenAI first, if that fails, fall back to direct API call
179 try {
180 return await processWithValTownOpenAI(messages, systemPrompt, stream);
181 } catch (error) {
182 console.error("Error with Val Town OpenAI:", error);
183 console.log("Falling back to direct OpenAI API call");
184 return await processWithDirectOpenAI(messages, systemPrompt, stream);
185 }
186 } catch (error) {
191
192/**
193 * Process chat using Val Town's proxied OpenAI
194 */
195async function processWithValTownOpenAI(
196 messages: Message[],
197 systemPrompt: string,
199): Promise<string> {
200 try {
201 const { OpenAI } = await import("https://esm.town/v/std/openai");
202 const openai = new OpenAI();
203
204 // Prepare the messages array with system prompt
205 const openaiMessages = [
206 { role: "system", content: systemPrompt },
207 ...messages.map(m => ({ role: m.role, content: m.content }))
208 ];
209
210 console.log("Sending to Val Town OpenAI:", {
211 messageCount: openaiMessages.length,
212 firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
213 });
215 // If streaming is not required, get a complete response
216 if (!stream) {
217 const completion = await openai.chat.completions.create({
218 messages: openaiMessages,
219 model: "gpt-4o-mini", // Using Val Town's available model
220 temperature: 0.7,
223 return completion.choices[0]?.message?.content || "Sorry, I couldn't generate a response.";
224 } else {
225 // Streaming is not directly supported by Val Town OpenAI wrapper
226 // Falling back to direct API
227 throw new Error("Streaming not supported by Val Town OpenAI wrapper");
228 }
229 } catch (error) {
230 console.error("Error in processWithValTownOpenAI:", error);
231 throw error;
232 }
234
235/**
236 * Process chat using direct OpenAI API
237 */
238async function processWithDirectOpenAI(
239 messages: Message[],
240 systemPrompt: string,
242): Promise<string> {
243 // Get API key from environment
244 const apiKey = Deno.env.get("OPENAI_API_KEY");
245
246 if (!apiKey) {
247 throw new Error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.");
248 }
249
250 // Format messages for OpenAI API
251 const openaiMessages = [
252 { role: "system", content: systemPrompt },
253 ...messages.map(m => ({ role: m.role, content: m.content }))
254 ];
255
256 console.log("Sending to Direct OpenAI:", {
257 messageCount: openaiMessages.length,
258 firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
259 });
261 if (stream) {
262 // Stream the response if a stream is provided
263 return await streamChatResponse(openaiMessages, apiKey, stream);
264 } else {
265 // Otherwise, return the complete response
266 return await fetchChatResponse(openaiMessages, apiKey);
267 }
268}
269
270/**
271 * Fetch a complete chat response from OpenAI
272 */
273async function fetchChatResponse(messages: any[], apiKey: string): Promise<string> {
274 try {
275 const response = await fetch("https://api.openai.com/v1/chat/completions", {
276 method: "POST",
277 headers: {
288 if (!response.ok) {
289 const errorText = await response.text();
290 console.error("OpenAI API error response:", errorText);
291 try {
292 const errorData = JSON.parse(errorText);
293 throw new Error(`OpenAI API error: ${response.status} ${errorData.error?.message || errorText}`);
294 } catch (e) {
295 throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
296 }
297 }
306
307/**
308 * Stream a chat response from OpenAI
309 */
310async function streamChatResponse(messages: any[], apiKey: string, stream: any): Promise<string> {
311 try {
312 const response = await fetch("https://api.openai.com/v1/chat/completions", {
313 method: "POST",
314 headers: {
326 if (!response.ok) {
327 const errorText = await response.text();
328 console.error("OpenAI API streaming error:", errorText);
329 throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
330 }
331
374 status: "ok",
375 message: "Backend is working",
376 hasOpenAiKey: Boolean(Deno.env.get("OPENAI_API_KEY")),
377 hasValTownOpenAI: true
378 };
379});

summarize_solar_weathermain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
16
17 const { text: summary } = await generateText({
18 model: openai("gpt-4o-mini"),
19 system: "You are a solar weather expert.",
20 prompt: `Summarize today's solar weather based on the following data: ${JSON.stringify(todayData)}`,

code_feedback_toolmain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: feedback } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are an expert code reviewer.",
16 prompt: "Analyze the following codebase for skill level, code quality, potential code smells, and areas for improvement: " + body.codebase,

generate_personality_profilemain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
13 const interactionsText = body.interactions.join("\n");
14 const { text: personalityProfile } = await generateText({
15 model: openai("gpt-4o-mini"),
16 system: "You are a personality analyst.",
17 prompt: "Based on the following user interactions, identify patterns and summarize the user's personality traits: " + interactionsText,

honest_feedback_toolmain.tsx3 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
15
16 const { text: codeAnalysis } = await generateText({
17 model: openai("gpt-4o-mini"),
18 system: "You are a code quality and complexity analyst.",
19 prompt: `Analyze the following codebase for quality, complexity, and structure: ${codebase}. Include metrics such as the number of files, lines of code, and code complexity.`,
21
22 const { text: skillAssessment } = await generateText({
23 model: openai("gpt-4o-mini"),
24 system: "You are an expert in assessing programming skills.",
25 prompt: `Based on the following questions and answers, estimate the user's skill level and experience: ${questions}. Consider topics like programming languages used, frameworks, and previous projects.`,

generate_haikumain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: haiku } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are a poet creating haikus.",
16 prompt: `Create a haiku about the following topic: ${body.topic}. The haiku should follow the 5-7-5 syllable structure.`,

logical_fallacy_analyzermain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: analysis } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are an expert in identifying logical fallacies.",
16 prompt: "Analyze the following text for common logical fallacies such as ad hominem, straw man, slippery slope, etc.: " + body.text,

suggest_animal_dietmain.tsx2 matches

@davincidreams•Updated 1 month ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: dietSuggestion } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are an expert zoologist.",
16 prompt:

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 6 days ago

testOpenAI1 file match

@stevekrouse•Updated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",