Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$2?q=openai&page=16&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 2240 results for "openai"(4748ms)

SpanishPanda_botmain.tsx5 matches

@orguetta•Updated 1 week ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
3import {
15 console.log(`received: ${text}`)
16 if (text) {
17 const response = await translateToSpanishWithOpenAI(text);
18 console.log(`translated to: ${response}`);
19 ctx.reply(response);
33
34
35async function translateToSpanishWithOpenAI(text: string) {
36 const openai = new OpenAI();
37 const completion = await openai.chat.completions.create({
38 messages: [
39 {

Legalhelpmain.tsx12 matches

@Get•Updated 1 week ago
2 * Legal AI Document Analysis (Single Val Version with PDF Upload & Dashboard Style)
3 * Ingests documents (URL, Text, PDF Upload), takes a user-defined legal task query,
4 * and uses a Legal AI Agent (via OpenAI) to analyze the content.
5 * The Legal AI Agent outputs a standardized JSON structure.
6 * Uses 'npm:pdf.js-extract' for direct PDF text extraction within the Val.
620// --- Main Request Handler (Server Code) ---
621export default async function(req: Request) {
622 const { OpenAI } = await import("https://esm.town/v/std/openai");
623 // Zod is imported globally via import statement at the top of the script
624 const { fetch } = await import("https://esm.town/v/std/fetch"); // Using std/fetch
676 }
677
678 // --- Helper Function: Call OpenAI API ---
679 async function callOpenAI(
680 openai: OpenAI,
681 systemPrompt: string,
682 userMessage: string,
686 log.push({ agent, type: "step", message: CONFIG.statusMessages.aiAnalysisInProgress(model) });
687 try {
688 const response = await openai.chat.completions.create({
689 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
690 temperature: 0.2, // REFACTOR: Slightly lower for more precision
732 message: `AI response was not valid JSON. Raw: ${content.substring(0, 250)}...`,
733 });
734 console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
735 return {
736 error: "AI_JSON_PARSE_ERROR",
740 }
741 } catch (error) {
742 console.error(`OpenAI API call failed for model ${model}. Error:`, error);
743 let errorMessage = `Error communicating with AI model (${model}).`;
744 if (error.message) { errorMessage += ` Details: ${error.message}`; }
762 },
763 ): Promise<{ finalResult?: LegalAIResponse | object; log: LogEntry[] }> {
764 const openai = new OpenAI();
765
766 log.push({ agent: "System", type: "step", message: "Legal AI analysis workflow initiated." });
878 .replace("%%AI_MODEL_USED%%", input.aiModel);
879
880 const aiResponseOrError = await callOpenAI(openai, finalSystemPrompt, truncatedText, input.aiModel);
881
882 if (typeof aiResponseOrError === "object" && (aiResponseOrError as any).error) {
883 // Error already logged by callOpenAI
884 log.push({ agent: "System", type: "final", message: "Workflow finished with AI processing errors." });
885 return { finalResult: aiResponseOrError, log };
886 }
887 // At this point, Zod validation passed in callOpenAI, so it's LegalAIResponse
888 const aiResponse = aiResponseOrError as LegalAIResponse;
889

aimain.tsx17 matches

@rups•Updated 1 week ago
2import { Hono } from "npm:hono@3";
3import { cors } from "npm:hono/cors";
4import { createOpenAI } from "npm:@ai-sdk/openai";
5import { createAnthropic } from "npm:@ai-sdk/anthropic@0.0.48";
6import { google, createGoogleGenerativeAI } from 'npm:@ai-sdk/google';
30});
31
32const openai = createOpenAI({
33 // apiKey = Deno.env.get("OPENAI_API_KEY");
34 apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET")
35});
36
37
38const groq = createOpenAI({
39 baseURL: 'https://api.groq.com/openai/v1',
40 apiKey: Deno.env.get("GROQ_API_KEY"),
41});
42
43const perplexity = createOpenAI({
44 apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? '',
45 baseURL: 'https://api.perplexity.ai/',
57 this.memories = options.memories || [];
58 this.messages = options.messages || [];
59 this.defaultProvider = options.provider || 'openai';
60 this.defaultModel = options.model;
61 this.defaultMaxTokens = options.maxTokens;
122 let result;
123 switch (provider) {
124 case 'openai':
125 result = await this.generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, ...additionalSettings });
126 break;
127 case 'anthropic':
171 }
172
173 async generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, embed, value, dimensions, user, ...additionalSettings }) {
174 const modelId = model || 'gpt-3.5-turbo';
175
176 if (embed) {
177 let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
178 // console.log('embed!', result)
179 return result
181
182 const options = {
183 model: openai(modelId),
184 system,
185 temperature,
235 }
236
237 async generateOpenAIEmbedding({ model, value, dimensions, user }) {
238 const modelId = model || 'text-embedding-3-large';
239 const options = {
240 model: openai.embedding(modelId, {
241 dimensions,
242 user,
491
492app.get('/generate', async (c) => {
493 const provider = c.req.query('provider') || 'openai';
494 const model = c.req.query('model');
495 const prompt = c.req.query('prompt');
523 console.log("post/generate", { mode: 'post/generate', prompt, provider, model });
524 const response = await modelProvider.gen({
525 provider: provider || 'openai',
526 model,
527 prompt,

voicemessagesvoicenotes.ts3 matches

@cameronpak•Updated 1 week ago
1import { type Context, Hono } from "https://esm.sh/hono@3.11.7";
2import { blob } from "https://esm.town/v/std/blob";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
5import Groq from "npm:groq-sdk";
16
17const app = new Hono();
18const openai = new OpenAI();
19
20// Get all voice notes (for admin/dashboard)
153async function transcribeAudio(voiceNoteId: string, audioBuffer: ArrayBuffer) {
154 try {
155 // Convert ArrayBuffer to File for OpenAI
156 const audioFile = new File([audioBuffer], "audio.webm", { type: "audio/webm" });
157

Towniesystem_prompt.txt4 matches

@shecthrives•Updated 1 week ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

Townie.cursorrules4 matches

@shecthrives•Updated 1 week ago
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },

Towniesystem_prompt.txt4 matches

@KhadijahAleeyuA•Updated 1 week ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

Townie.cursorrules4 matches

@KhadijahAleeyuA•Updated 1 week ago
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },

Towniesystem_prompt.txt4 matches

@claudiaowusu•Updated 1 week ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

Townie.cursorrules4 matches

@claudiaowusu•Updated 1 week ago
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },

openai-client1 file match

@cricks_unmixed4u•Updated 1 week ago

openai_enrichment6 file matches

@stevekrouse•Updated 1 week ago
kwhinnery_openai
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp