Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=openai&page=12&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1581 results for "openai"(1312ms)

SimpleAudioPlayerPWAvoices.tsx1 match

@dcm31•Updated 1 week ago
1// List of available voices for OpenAI TTS API
2export const voices = [
3 { id: "alloy", name: "Alloy", description: "Neutral, versatile voice" },

SimpleAudioPlayerindex.ts4 matches

@dcm31•Updated 1 week ago
190 const { text } = await request.json();
191
192 // Using the OpenAI standard library for TTS
193 const { OpenAI } = await import("https://esm.town/v/std/openai");
194 const openai = new OpenAI();
195
196 const mp3 = await openai.audio.speech.create({
197 model: "tts-1",
198 voice: "alloy",

groqllmsmain.tsx1 match

@yawnxyz•Updated 1 week ago
14const urls = [
15 "https://console.groq.com/docs/quickstart",
16 "https://console.groq.com/docs/openai",
17 "https://console.groq.com/docs/models",
18 "https://console.groq.com/docs/rate-limits",

ItalianBrainRotGeneratormain.tsx5 matches

@dcm31•Updated 1 week ago
128 if (url.pathname === "/generate-character") {
129 try {
130 const { OpenAI } = await import("https://esm.town/v/std/openai");
131 const openai = new OpenAI();
132
133 // Define the Italian Brainrot character templates based on popular examples
158Create a character that perfectly fits this absurd meme universe without mentioning any specific brands or companies in the description.`;
159
160 const nameCompletion = await openai.chat.completions.create({
161 messages: [
162 { role: "system", content: brainrotSystemPrompt },
169 const characterName = nameCompletion.choices[0].message.content.trim();
170
171 const descriptionCompletion = await openai.chat.completions.create({
172 messages: [
173 { role: "system", content: brainrotSystemPrompt },
187
188 // Get a viral catchphrase
189 const catchphraseCompletion = await openai.chat.completions.create({
190 messages: [
191 { role: "system", content: brainrotSystemPrompt },

MyStevenssendDailyBrief.ts9 matches

@billogiovanni•Updated 1 week ago
1import { Bot } from "https://deno.land/x/grammy@v1.35.0/mod.ts";
2import { DateTime } from "https://esm.sh/luxon@3.4.4";
3import { OpenAI } from "npm:openai";
4import { backstory } from "../backstory.ts";
5import {
13} from "../memoryUtils.ts";
14
15async function generateBriefingContent(openai, memories, today, isSunday) {
16 try {
17 const weekdaysHelp = generateWeekDays(today);
74 };
75
76 console.log("Sending prompt to openai...", userMessage);
77
78 const response = await openai.messages.create({
79 model: "claude-3-7-sonnet-latest",
80 max_tokens: 30000,
98export async function sendDailyBriefing(chatId?: string, today?: DateTime) {
99 // Get API keys from environment
100 const apiKey = Deno.env.get("OPENAI_API_KEY");
101 const telegramToken = Deno.env.get("TELEGRAM_TOKEN");
102
107
108 if (!apiKey) {
109 console.error("openai API key is not configured.");
110 return;
111 }
121 }
122
123 // Initialize openai client
124 const openai = new OpenAI({ apiKey });
125
126 // Initialize Telegram bot
145 // Generate briefing content
146 const content = await generateBriefingContent(
147 openai,
148 memories,
149 today,

svvcmain.tsx17 matches

@Get•Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
666}
667
668async function callOpenAI(
669 systemPrompt: string,
670 userPrompt: string,
675 log(
676 "DEBUG",
677 "callOpenAI",
678 `Calling OpenAI for task ${taskId}`,
679 { sysLen: systemPrompt.length, usrLen: userPrompt.length },
680 mandateId,
682 );
683 try {
684 const openai = new OpenAI();
685 const completion = await openai.chat.completions.create({
686 model: "gpt-4o-mini",
687 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userPrompt }],
692 const usage = completion.usage;
693 if (!resultText) {
694 log("WARN", "callOpenAI", `OpenAI returned no content for task ${taskId}.`, { usage }, mandateId, taskId);
695 return null;
696 }
697 log(
698 "DEBUG",
699 "callOpenAI",
700 `OpenAI call successful for task ${taskId}`,
701 { resLen: resultText.length, usage },
702 mandateId,
706 } catch (error) {
707 const errorDetails = { msg: error.message, name: error.name, status: error.status, type: error.type };
708 log("ERROR", "callOpenAI", `OpenAI API call failed for task ${taskId}`, { error: errorDetails }, mandateId, taskId);
709 throw new Error(`OpenAI API error: ${error.message}`);
710 }
711}
1096 `Please generate the ${payload.contentType} for ${payload.platform} based on the details provided in the system prompt.`;
1097 try {
1098 const generatedContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1099 if (!generatedContent) {
1100 log("WARN", "ContentCreationAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1154 const userPrompt = `Please develop the marketing strategy based on the details provided in the system prompt.`;
1155 try {
1156 const strategyContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1157 if (!strategyContent) {
1158 log("WARN", "MarketingStrategyAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1217 const userPrompt = `Generate logo concepts for "${payload.businessName}" based on the details in the system prompt.`;
1218 try {
1219 const conceptContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1220 if (!conceptContent) {
1221 log("WARN", "LogoConceptAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1283 `Build the brand identity guide for "${payload.businessName}" based on the details in the system prompt.`;
1284 try {
1285 const identityContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1286 if (!identityContent) {
1287 log("WARN", "BrandIdentityAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1333 const userPrompt = `Run the ${payload.simulationType} simulation using the parameters provided in the system prompt.`;
1334 try {
1335 const simContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1336 if (!simContent) {
1337 log("WARN", "SimulationAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1384 `Generate or refine the system prompt based on my request provided in the system prompt context.`;
1385 try {
1386 const genSysPrompt = await callOpenAI(systemPromptForMeta, userPromptForMeta, mandateId, taskId, log);
1387 if (!genSysPrompt) {
1388 log("WARN", "MetaPromptAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);

svvcmain.tsx18 matches

@salon•Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
396}
397
398async function callOpenAI(sysPrompt: string, userPrompt: string, mid: string, tid: string, lg: LogFn): Promise<string | null> {
399 lg("DEBUG", "callOpenAI", `Initiating OpenAI call tid=${tid}`, { spLen: sysPrompt.length, upLen: userPrompt.length }, mid, tid);
400 try {
401 if (!process.env.OPENAI_API_KEY) {
402 lg("ERROR", "callOpenAI", "OpenAI API key missing.", undefined, mid, tid);
403 throw new Error("OpenAI API key not configured.");
404 }
405 const openai = new OpenAI();
406 const completion = await openai.chat.completions.create({
407 model: "gpt-4o-mini",
408 messages: [{ role: "system", content: sysPrompt }, { role: "user", content: userPrompt }],
412 const usage = completion.usage;
413 if (!resText) {
414 lg("WARN", "callOpenAI", `OpenAI call returned no text tid=${tid}.`, { usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, tid);
415 return null;
416 }
417 lg("INFO", "callOpenAI", `OpenAI call OK tid=${tid}`, { resLen: resText.length, usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, tid);
418 return resText.trim();
419 } catch (err: any) {
420 const errDtls = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
421 lg("ERROR", "callOpenAI", `OpenAI API call failed tid=${tid}`, { error: errDtls }, mid, tid);
422 throw new Error(`OpenAI API failed: ${err.message}` + (err.code ? ` (Code: ${err.code})` : ''));
423 }
424}
626 const userP = `Generate the ${p.cType} for the specified platform based on the context provided in the system prompt.`;
627 try {
628 const genContent = await callOpenAI(sysP, userP, mid, tid, lg);
629 if (!genContent) { lg("WARN", "CntAgent", `LLM returned no content tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { content: "" }, e: "AI failed to generate content." }; }
630 const output: CntOut = { content: genContent };
644 const userP = `Develop the marketing strategy based on the system prompt context & framework.`;
645 try {
646 const stratContent = await callOpenAI(sysP, userP, mid, tid, lg);
647 if (!stratContent) { lg("WARN", "StratAgent", `LLM no content for strat tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { strategy: "" }, e: "AI failed strategy." }; }
648 let adIdeas: string[] = [];
664 const userP = `Generate 2-3 distinct logo concepts for "${p.bName}" based on the system prompt. Provide descriptions and AI prompts.`;
665 try {
666 const conceptContent = await callOpenAI(sysP, userP, mid, tid, lg);
667 if (!conceptContent) { lg("WARN", "LogoAgent", `LLM no content for logo tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { concepts: [], prompts: [] }, e: "AI failed logo concepts." }; }
668 const concepts: string[] = []; const prompts: string[] = [];
690 const userP = `Develop foundational brand ID guide for "${p.bName}" based on system prompt context/instructions. Ensure cohesive & practical.`;
691 try {
692 const idContent = await callOpenAI(sysP, userP, mid, tid, lg);
693 if (!idContent) { lg("WARN", "BrandAgent", `LLM no content for brand ID tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { bVoice: "", kMsgs: [], cPalSugs: "", typo: "" }, e: "AI failed brand ID guide." }; }
694 const voiceM = idContent.match(/## Brand Voice & Personality\n([\s\S]*?)(?=\n##|$)/i);
718 const userP = `Run simulation type '${p.simType}' based on system prompt context/params. Follow format.`;
719 try {
720 const simContent = await callOpenAI(sysP, userP, mid, tid, lg);
721 if (!simContent) { lg("WARN", "SimAgent", `LLM no content for sim tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { simResult: "" }, e: "AI failed sim." }; }
722 let simResult = simContent; let analysis: string | undefined = undefined;
741 const userP = `Based on my request in system context, generate/refine the system prompt per guidelines. Output only resulting prompt text.`;
742 try {
743 const genSysP = await callOpenAI(sysP, userP, mid, tid, lg);
744 if (!genSysP) { lg("WARN", "MetaAgent", `LLM no content for meta-prompt tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { sysPrompt: "" }, e: "AI failed to generate system prompt." }; }
745 const finalP = genSysP.replace(/^```(markdown|text)?\s*/i, '').replace(/\s*```$/, '').trim();

stevensDemo.cursorrules4 matches

@brenwildt42•Updated 1 week ago
100Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
101
102### OpenAI
103```ts
104import { OpenAI } from "https://esm.town/v/std/openai";
105const openai = new OpenAI();
106const completion = await openai.chat.completions.create({
107 messages: [
108 { role: "user", content: "Say hello in a creative way" },

promptmain.tsx7 matches

@salon•Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai"; // Assuming OpenAI library is available
3import { z } from "npm:zod"; // Using Zod for input validation
4
340// LLM Interaction Function (Simplified)
341const llm = async (systemPrompt: string, userPrompt: string, reqId: string, log: LogFn): Promise<string> => {
342 log("DEBUG", "LLM", `OpenAI Call`, { sysL: systemPrompt.length, usrL: userPrompt.length }, reqId);
343 try {
344 // IMPORTANT: Replace with your actual OpenAI API key mechanism (e.g., environment variable)
345 const oa = new OpenAI(); // Assumes OPENAI_API_KEY env var is set
346 const completion = await oa.chat.completions.create({
347 // Consider using a more powerful model for prompt generation, maybe cheaper for testing
354 const usage = completion.usage;
355 if (!txt) {
356 log("WARN", "LLM", `OpenAI returned no content.`, { usage }, reqId);
357 throw new Error("LLM returned no content.");
358 }
359 log("DEBUG", "LLM", `OpenAI Success`, { resL: txt.length, usage }, reqId);
360 return txt.trim();
361 } catch (err: any) {
362 const errD = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
363 log("ERROR", "LLM", `OpenAI API call failed`, { error: errD }, reqId);
364 throw new Error(`LLM API error: ${err.message}${err.code ? ` (Code: ${err.code})` : ''}`);
365 }

medmain.tsx7 matches

@salon•Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
114
115const llm = async (sysPrompt: string, userPrompt: string, mid: string, tid: string, log: LogFn): Promise<string | null> => {
116 log("DEBUG", "LLM", `Call OpenAI task ${tid}`, { sysL: sysPrompt.length, usrL: userPrompt.length }, mid, tid);
117 const finalSys = sysPrompt + `\n\nREMINDER: Info only, not medical advice. Verify & use clinical judgment.`;
118 try {
119 const oa = new OpenAI(); // Assumes API key is set in environment
120 const completion = await oa.chat.completions.create({
121 model: "gpt-4o-mini", // Use appropriate model
125 const txt = completion.choices[0]?.message?.content;
126 const usage = completion.usage;
127 if (!txt) { log("WARN", "LLM", `OpenAI task ${tid} no content.`, { usage }, mid, tid); return null; }
128 log("DEBUG", "LLM", `OpenAI task ${tid} success`, { resL: txt.length, usage }, mid, tid);
129 return txt.trim();
130 } catch (err: any) {
131 const errD = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
132 log("ERROR", "LLM", `OpenAI API task ${tid} failed`, { error: errD }, mid, tid);
133 throw new Error(`OpenAI API error: ${err.message}${err.code ? ` (Code: ${err.code})` : ''}`);
134 }
135}

testOpenAI1 file match

@stevekrouse•Updated 1 day ago

testOpenAI1 file match

@shouser•Updated 3 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",