Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$2?q=openai&page=13&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 2237 results for "openai"(2929ms)

intentmain.tsx11 matches

@legalβ€’Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { PDFExtract, PDFExtractOptions } from "npm:pdf.js-extract";
4
173}
174
175async function callOpenAI(
176 openaiInstance: OpenAI,
177 systemPrompt: string,
178 userMessage: string,
182 agentName: string,
183): Promise<object | string> {
184 log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
185 try {
186 const response = await openaiInstance.chat.completions.create({
187 model: model,
188 messages: [
222 }
223 } catch (error) {
224 console.error(agentName, "OpenAI API call error:", error);
225 let errMsg = "AI communication error.";
226 if (error.message) errMsg += ` Message: ${error.message}`;
1072 <li>**No client-side persistence of documents and analyses**</li>
1073 </ul>
1074 <p>This application uses OpenAI's GPT models for its AI capabilities. Data submitted will be processed by OpenAI.</p>
1075 </div>
1076 \`;
1085
1086export default async function(req: Request) {
1087 const openai = new OpenAI();
1088 const url = new URL(req.url);
1089 const format = url.searchParams.get("format");
1154 const suggPrompt = legalTaskSuggestionSystemPromptTemplate.replace("%%DOCUMENT_TEXT%%", suggText);
1155 const suggAgent = "Task Suggestion AI (LLM1)";
1156 const suggRes = await callOpenAI(
1157 openai,
1158 suggPrompt,
1159 "Generate task suggestions based on the provided document text.",
1303
1304 const analysisAgent = "Legal Analysis AI (LLM2)";
1305 const aiRes = await callOpenAI(openai, finalPrompt, docToAnalyze, "gpt-4o", true, log, analysisAgent);
1306
1307 if (typeof aiRes === "object" && (aiRes as any).error) {

lotusReflectionsmain.tsx14 matches

@aidanlotusβ€’Updated 1 week ago
4
5// Environment Variables to set in Val.Town:
6// - OPENAI_KEY: Your OpenAI API Key
7// - NOTION_KEY: Your Notion API Key
8// - NOTION_DATABASE_ID: The Database ID for your "Reflections" database (submissions)
114
115 console.log("Main Submission Handler: Starting AI Analysis for:", userNumberFromForm);
116 const OPENAI_KEY = getEnv("OPENAI_KEY") as string;
117 let aiAnalysisResult = {
118 summary: "AI summary not generated.",
173 const userMessage =
174 `Please analyze the following user reflection:\n\nUser Reflection Text:\n\"\"\"\n${combinedResponsesText}\n\"\"\"\n\nProvide your analysis as a single JSON object.`;
175 const openAIPayload = {
176 model: "gpt-3.5-turbo",
177 messages: [{ role: "system", content: systemMessage }, { role: "user", content: userMessage }],
180 };
181 try {
182 const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", {
183 method: "POST",
184 headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` },
185 body: JSON.stringify(openAIPayload),
186 });
187 if (!openAIResponse.ok) {
188 const errorBody = await openAIResponse.text();
189 console.error("Main Submission Handler: OpenAI API Error:", openAIResponse.status, errorBody);
190 }
191 else {
192 const openAIData = await openAIResponse.json();
193 const aiContent = openAIData.choices[0]?.message?.content;
194 if (aiContent) {
195 try {
210 } catch (parseError) {
211 console.error(
212 "Main Submission Handler: Failed to parse OpenAI JSON response:",
213 parseError,
214 "Raw AI content:",
217 }
218 } else {
219 console.warn("Main Submission Handler: OpenAI response content was empty.");
220 }
221 }
222 } catch (aiError) {
223 console.error("Main Submission Handler: Error calling OpenAI API:", aiError);
224 }
225

lotusReflectionsGetUsermain.tsx15 matches

@aidanlotusβ€’Updated 1 week ago
76 const NOTION_USERS_DB_ID = getEnv("NOTION_USERS_DATABASE_ID") as string;
77 const NOTION_REFLECTIONS_DB_ID = getEnv("NOTION_DATABASE_ID") as string;
78 const OPENAI_KEY = getEnv("OPENAI_KEY") as string;
79
80 let isFirstTimeUser = true;
187Output ONLY the welcome message text.`;
188
189 const openAIPayload = {
190 model: "gpt-3.5-turbo",
191 messages: [{ role: "system", content: systemPrompt }],
194 };
195
196 console.log("getUserReflectionContext: Calling OpenAI for personalized welcome...");
197 try {
198 const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", {
199 method: "POST",
200 headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` },
201 body: JSON.stringify(openAIPayload),
202 });
203
204 if (!openAIResponse.ok) {
205 const errorBody = await openAIResponse.text();
206 console.error("getUserReflectionContext: OpenAI API Error:", openAIResponse.status, errorBody);
207 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
208 } else {
209 const openAIData = await openAIResponse.json();
210 if (openAIData.choices && openAIData.choices[0] && openAIData.choices[0].message) {
211 welcomeMessage = openAIData.choices[0].message.content.trim();
212 console.log("getUserReflectionContext: OpenAI generated welcome:", welcomeMessage);
213 } else {
214 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
215 console.warn(
216 "getUserReflectionContext: OpenAI response structure unexpected, using default welcome.",
217 );
218 }
219 }
220 } catch (aiError) {
221 console.error("getUserReflectionContext: Error calling OpenAI:", aiError);
222 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
223 }

SON-GOKUAIToolsList.tsx4 matches

@Itssongokuβ€’Updated 1 week ago
11const aiTools: AITool[] = [
12 // AI Assistants & Chatbots
13 { name: "ChatGPT", url: "https://chatgpt.com/", category: "Assistant", description: "OpenAI's conversational AI assistant" },
14 { name: "Claude", url: "https://claude.ai/", category: "Assistant", description: "Anthropic's AI assistant for various tasks and conversations" },
15 { name: "Gemini", url: "https://gemini.google.com/", category: "Assistant", description: "Google's advanced AI assistant" },
27 // Image Generation & Editing
28 { name: "Midjourney", url: "https://www.midjourney.com/", category: "Images", description: "AI art and image generation platform" },
29 { name: "DALLΒ·E 3", url: "https://openai.com/dall-e-3", category: "Images", description: "OpenAI's advanced image generation model" },
30 { name: "Stable Diffusion", url: "https://stability.ai/", category: "Images", description: "Open-source AI image generation" },
31 { name: "Lexica", url: "https://lexica.art/", category: "Images", description: "AI art search engine and generator" },
73 { name: "Runway", url: "https://runwayml.com/", category: "Video", description: "AI video editing and generation tools" },
74 { name: "Synthesia", url: "https://www.synthesia.io/", category: "Video", description: "AI video generation with virtual avatars" },
75 { name: "Sora", url: "https://openai.com/sora", category: "Video", description: "OpenAI's text-to-video generation model" },
76 { name: "Kling", url: "https://klingai.com/", category: "Video", description: "AI video generation platform" },
77 { name: "Hailuo", url: "https://hailuo.ai/", category: "Video", description: "AI video creation tool" },
318
319 // AI Research & Platforms
320 { name: "OpenAI", url: "https://openai.com/", category: "Research", description: "AI research and deployment company" },
321 { name: "Anthropic", url: "https://www.anthropic.com/", category: "Research", description: "AI safety research company" },
322 { name: "xAI", url: "https://x.ai/", category: "Research", description: "Elon Musk's AI company" },

Towniesystem_prompt.txt4 matches

@valdottownβ€’Updated 1 week ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

researchAgentemailHandler2 matches

@charmaineβ€’Updated 1 week ago
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",

svvcmain.tsx18 matches

@legalβ€’Updated 1 week ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
449}
450
451async function callOpenAI(
452 sysPrompt: string,
453 userPrompt: string,
458 lg(
459 "DEBUG",
460 "callOpenAI",
461 `Initiating OpenAI call tid=${tid}`,
462 { spLen: sysPrompt.length, upLen: userPrompt.length },
463 mid,
465 );
466 try {
467 const openai = new OpenAI();
468 const completion = await openai.chat.completions.create({
469 model: "gpt-4o-mini",
470 messages: [{ role: "system", content: sysPrompt }, { role: "user", content: userPrompt }],
477 lg(
478 "WARN",
479 "callOpenAI",
480 `OpenAI call returned no text tid=${tid}.`,
481 { usage: usage, finishReason: completion.choices[0]?.finish_reason },
482 mid,
487 lg(
488 "INFO",
489 "callOpenAI",
490 `OpenAI call OK tid=${tid}`,
491 { resLen: resText.length, usage: usage, finishReason: completion.choices[0]?.finish_reason },
492 mid,
496 } catch (err: any) {
497 const errDtls = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
498 lg("ERROR", "callOpenAI", `OpenAI API call failed tid=${tid}`, { error: errDtls }, mid, tid);
499 throw new Error(`OpenAI API failed: ${err.message}` + (err.code ? ` (Code: ${err.code})` : ""));
500 }
501}
910 `Generate the ${p.cType} for the specified platform based on the context provided in the system prompt.`;
911 try {
912 const genContent = await callOpenAI(sysP, userP, mid, tid, lg);
913 if (!genContent) {
914 lg("WARN", "CntAgent", `LLM returned no content tid=${tid}.`, undefined, mid, tid);
956 const userP = `Develop the marketing strategy based on the system prompt context & framework.`;
957 try {
958 const stratContent = await callOpenAI(sysP, userP, mid, tid, lg);
959 if (!stratContent) {
960 lg("WARN", "StratAgent", `LLM no content for strat tid=${tid}.`, undefined, mid, tid);
1007 `Generate 2-3 distinct logo concepts for "${p.bName}" based on the system prompt. Provide descriptions and AI prompts.`;
1008 try {
1009 const conceptContent = await callOpenAI(sysP, userP, mid, tid, lg);
1010 if (!conceptContent) {
1011 lg("WARN", "LogoAgent", `LLM no content for logo tid=${tid}.`, undefined, mid, tid);
1084 `Develop foundational brand ID guide for "${p.bName}" based on system prompt context/instructions. Ensure cohesive & practical.`;
1085 try {
1086 const idContent = await callOpenAI(sysP, userP, mid, tid, lg);
1087 if (!idContent) {
1088 lg("WARN", "BrandAgent", `LLM no content for brand ID tid=${tid}.`, undefined, mid, tid);
1148 const userP = `Run simulation type '${p.simType}' based on system prompt context/params. Follow format.`;
1149 try {
1150 const simContent = await callOpenAI(sysP, userP, mid, tid, lg);
1151 if (!simContent) {
1152 lg("WARN", "SimAgent", `LLM no content for sim tid=${tid}.`, undefined, mid, tid);
1196 `Based on my request in system context, generate/refine the system prompt per guidelines. Output only resulting prompt text.`;
1197 try {
1198 const genSysP = await callOpenAI(sysP, userP, mid, tid, lg);
1199 if (!genSysP) {
1200 lg("WARN", "MetaAgent", `LLM no content for meta-prompt tid=${tid}.`, undefined, mid, tid);

ContextualREADME.md1 match

@c15rβ€’Updated 1 week ago
14
15### Core Cognitive Tools
16- **AI-Enhanced Thought Forking**: Automatically generate parallel explorations using OpenAI
17- **Goal Tracking**: Create, update, and monitor goals with hierarchical structure
18- **Task Management**: Break down goals into actionable tasks with state tracking

osdata.json5 matches

@dinavinterβ€’Updated 1 week ago
4 "projects": [
5 {
6 "raw": "# bolt.diy\r\n\r\n### Repository URL\r\n\r\nhttps://github.com/stackblitz-labs/bolt.diy\r\n\r\n### Project Description\r\n\r\nbolt.diy is an open-source platform that enables users to prompt, run, edit, and deploy full-stack web applications directly in the browser using various Large Language Models (LLMs). It supports integration with multiple LLM providers, including OpenAI, Anthropic, HuggingFace, and more. The platform offers features like an integrated terminal, code versioning, and the ability to attach images to prompts, facilitating a seamless AI-powered development experience. \r\n\r\n\r\n### Potential Contribution Areas\r\n\r\nAgent Collaboration: Enhance bolt.diy to support MCP tool access and A2A-based agent collaboration, fostering better interoperability between SAP agents and bolt.diy projects.\r\nSAP Integration: Develop adapters or plugins to integrate bolt.diy with SAP's App Router or Cloud Foundry, enabling seamless deployment of applications within the SAP ecosystem.\r\n\r\n### Estimated Time Commitment\r\n\r\nMedium (4-8 hours/week)\r\n\r\n### Required Skills\r\n\r\nTypeScript\r\n\r\n### License\r\n\r\nMIT\r\n\r\n### Additional Information\r\n\r\n_No response_",
7 "id": 22,
8 "title": "bolt.diy",
39 "projectName": "",
40 "repositoryUrl": "https://github.com/stackblitz-labs/bolt.diy",
41 "description": "bolt.diy is an open-source platform that enables users to prompt, run, edit, and deploy full-stack web applications directly in the browser using various Large Language Models (LLMs). It supports integration with multiple LLM providers, including OpenAI, Anthropic, HuggingFace, and more. The platform offers features like an integrated terminal, code versioning, and the ability to attach images to prompts, facilitating a seamless AI-powered development experience.",
42 "contribution": "Agent Collaboration: Enhance bolt.diy to support MCP tool access and A2A-based agent collaboration, fostering better interoperability between SAP agents and bolt.diy projects.\r\nSAP Integration: Develop adapters or plugins to integrate bolt.diy with SAP's App Router or Cloud Foundry, enabling seamless deployment of applications within the SAP ecosystem.",
43 "timeCommitment": "Medium (4-8 hours/week)",
222 },
223 {
224 "raw": "### Project Name\n\nLLM Proxy Server for SAP AI Core\n\n### Repository URL\n\nhttps://github.com/sap-samples/llm-proxy-sap-ai-core\n\n### Project Description\n\nThe LLM Proxy Server for SAP AI Core is an open-source project designed to serve as a lightweight, performant, and extensible intermediary between SAP AI Core and a variety of large language model (LLM) backends. Inspired by solutions like LightLLM, this proxy server standardizes interactions with different model providers (OpenAI, Anthropic, HuggingFace, etc.), handles rate limiting and caching, and simplifies integration for enterprise use cases. Its purpose is to provide a scalable, multi-tenant LLM gateway that plugs seamlessly into the SAP AI Core inference pipeline.\n\n### Potential Contribution Areas\n\n- Model Adapter Layer: Implement new adapters to support additional LLM providers or fine-tuned models hosted on SAP AI Core.\r\n- Request Routing & Optimization: Improve routing logic, caching strategies, and load balancing between LLM providers.\r\n- Security & Multi-Tenancy: Enhance authentication, logging, and quota management to support secure, tenant-aware deployments.\r\n- SAP Integration: Develop SDK components or API contracts to enable easy consumption of the proxy within SAP BTP-based applications.\n\n### Estimated Time Commitment\n\nMedium (4-8 hours/week)\n\n### Required Skills\n\n_No response_\n\n### License\n\n_No response_\n\n### Additional Information\n\n_No response_",
225 "id": 23,
226 "title": "LLM Proxy Server for SAP AI Core",
256 "projectName": "LLM Proxy Server for SAP AI Core",
257 "repositoryUrl": "https://github.com/sap-samples/llm-proxy-sap-ai-core",
258 "description": "The LLM Proxy Server for SAP AI Core is an open-source project designed to serve as a lightweight, performant, and extensible intermediary between SAP AI Core and a variety of large language model (LLM) backends. Inspired by solutions like LightLLM, this proxy server standardizes interactions with different model providers (OpenAI, Anthropic, HuggingFace, etc.), handles rate limiting and caching, and simplifies integration for enterprise use cases. Its purpose is to provide a scalable, multi-tenant LLM gateway that plugs seamlessly into the SAP AI Core inference pipeline.",
259 "contribution": "- Model Adapter Layer: Implement new adapters to support additional LLM providers or fine-tuned models hosted on SAP AI Core.\r\n- Request Routing & Optimization: Improve routing logic, caching strategies, and load balancing between LLM providers.\r\n- Security & Multi-Tenancy: Enhance authentication, logging, and quota management to support secure, tenant-aware deployments.\r\n- SAP Integration: Develop SDK components or API contracts to enable easy consumption of the proxy within SAP BTP-based applications.",
260 "timeCommitment": "Medium (4-8 hours/week)",
746 },
747 {
748 "raw": "# Open WebUI \r\n ---\r\n \r\n ## πŸ”— Repository URL \r\n https://github.com/open-webui/open-webui\r\n \r\n ## 🧠 Project Description \r\n Local web UI for interacting with LLMs like OpenAI, Ollama, LM Studio, and others.\r\n \r\n ## 🧩 Interoperability: Agent & Tool Protocol Fit \r\n Could integrate MCP to allow access to external tool APIs and A2A for backend AI agent interaction.\r\n \r\n ## πŸ› οΈ How AI Guild Can Contribute \r\n - Add SAP API Hub wrapper as MCP server\r\n - Build agent-to-agent pipeline that connects support tooling to dev tools\r\n \r\n ## πŸ•’ Estimated Time Commitment \r\n Medium (4-8 hours/week)\r\n\r\n \r\n ## πŸ§ͺ Required Skills \r\n TypeScript, Python, LLM APIs, Web UI Development\r\n \r\n ## βš–οΈ License \r\n \r\n \r\n ---\r\n \r\n ## πŸ’¬ Additional Information \r\n \r\n \r\n πŸ“Ž Linked source: `Open WebUI.json`\r\n ",
749 "id": 9,
750 "title": "Open WebUI",

Change-Logs-Generatorprocess-commits.tsx3 matches

@hussufoβ€’Updated 1 week ago
5import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
6import { Octokit } from "npm:octokit";
7import { OpenAI } from "https://esm.town/v/std/openai";
8
9// Environment variables, set them in the left sidebar
25 */
26async function generateUserFocusedSummary(fullCommitMessage: string, commitType: string): Promise<string> {
27 const openai = new OpenAI();
28
29 const prompt = `You are writing release notes for a developer tool. Based on this commit message, write a concise one-liner.
51
52 try {
53 const completion = await openai.chat.completions.create({
54 messages: [{ role: "user", content: prompt }],
55 model: "gpt-4o-mini",

openai-client1 file match

@cricks_unmixed4uβ€’Updated 1 week ago

openai_enrichment6 file matches

@stevekrouseβ€’Updated 1 week ago
kwhinnery_openai
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp