Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=openai&page=84&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1670 results for "openai"(1071ms)

jarvisPrototypemain.tsx3 matches

@pashaabhi•Updated 3 months ago
243export default async function server(request: Request): Promise<Response> {
244 if (request.method === "POST") {
245 const { OpenAI } = await import("https://esm.town/v/std/openai");
246 const openai = new OpenAI();
247
248 const body = await request.json();
249 const messages = body.messages || [];
250
251 const completion = await openai.chat.completions.create({
252 messages: messages,
253 model: "gpt-4o-mini",

OpenRouterChatCompletionmain.tsx1 match

@rozek•Updated 3 months ago
114 }
115
116 return await OpenAIChatCompletion(Request)
117 }
118

OpenRouterChatCompletionLeadmain.tsx1 match

@rozek•Updated 3 months ago
2 import { floatingQuotaTracker } from 'https://esm.town/v/rozek/floatingQuotaTracker'
3
4/**** how long until "OpenAIChatCompletion" becomes available again? ****/
5
6 export default async function (Request:Request):Promise<Response> {

translatormain.tsx8 matches

@AIWB•Updated 3 months ago
2import { Hono } from "npm:hono@3";
3import { html } from "npm:hono@3/html";
4import { OpenAI } from "npm:openai";
5
6const app = new Hono();
7const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY_VOICE") });
8
9class TranscriptionService {
10 async transcribeAudio(audioFile) {
11 try {
12 const transcription = await openai.audio.transcriptions.create({
13 file: audioFile,
14 model: "whisper-1",
18 return transcription;
19 } catch (error) {
20 console.error("OpenAI API error:", error);
21 throw error;
22 }
403
404 try {
405 const response = await openai.chat.completions.create({
406 model: "gpt-4o-mini",
407 messages: [
421 return c.text(translation);
422 } catch (error) {
423 console.error("OpenAI API error:", error);
424 return c.text("Error occurred during translation", 500);
425 }
436
437 try {
438 const mp3 = await openai.audio.speech.create({
439 model: "tts-1",
440 voice: voice,
448 });
449 } catch (error) {
450 console.error("OpenAI API error:", error);
451 return c.text("Error occurred during speech generation", 500);
452 }

translatorREADME.md1 match

@AIWB•Updated 3 months ago
3The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.
4
5Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.

API_URLmain.tsx4 matches

@awhitter•Updated 3 months ago
1import { val } from 'https://esm.town/v/std/val';
2import { OpenAI } from 'https://esm.town/v/std/openai';
3
4// Initialize OpenAI
5const openai = new OpenAI();
6
7// Type definitions for Airtable attachments
166 `;
167
168 const completion = await openai.chat.completions.create({
169 messages: [
170 {

contentTemplateAppmain.tsx3 matches

@awhitter•Updated 3 months ago
282 } else if (url.pathname === "/api/analyze") {
283 if (req.method === 'POST') {
284 const { OpenAI } = await import("https://esm.town/v/std/openai");
285 const openai = new OpenAI();
286
287 try {
292 Full Content: ${fullContent}`;
293
294 const completion = await openai.chat.completions.create({
295 messages: [{ role: "user", content: prompt }],
296 model: "gpt-4o-mini",

aimain.tsx17 matches

@goode_bye•Updated 3 months ago
3import { createGoogleGenerativeAI, google } from "npm:@ai-sdk/google";
4import { createMistral, mistral } from "npm:@ai-sdk/mistral";
5import { createOpenAI } from "npm:@ai-sdk/openai";
6import { embed, generateObject, generateText, streamText } from "npm:ai";
7import { cors } from "npm:hono/cors";
33});
34
35const openai = createOpenAI({
36 // apiKey = Deno.env.get("OPENAI_API_KEY");
37 apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET"),
38});
39
40const groq = createOpenAI({
41 baseURL: "https://api.groq.com/openai/v1",
42 apiKey: Deno.env.get("GROQ_API_KEY"),
43});
44
45const perplexity = createOpenAI({
46 apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? "",
47 baseURL: "https://api.perplexity.ai/",
59 this.memories = options.memories || [];
60 this.messages = options.messages || [];
61 this.defaultProvider = options.provider || "openai";
62 this.defaultModel = options.model;
63 this.defaultMaxTokens = options.maxTokens;
125 let result;
126 switch (provider) {
127 case "openai":
128 result = await this.generateOpenAIResponse({
129 model,
130 prompt,
240 }
241
242 async generateOpenAIResponse(
243 {
244 model,
261
262 if (embed) {
263 let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
264 // console.log('embed!', result)
265 return result;
267
268 const options = {
269 model: openai(modelId),
270 system,
271 temperature,
320 }
321
322 async generateOpenAIEmbedding({ model, value, dimensions, user }) {
323 const modelId = model || "text-embedding-3-large";
324 const options = {
325 model: openai.embedding(modelId, {
326 dimensions,
327 user,
586
587app.get("/generate", async (c) => {
588 const provider = c.req.query("provider") || "openai";
589 const model = c.req.query("model");
590 const prompt = c.req.query("prompt");
646 console.log("post/generate", { mode: "post/generate", prompt, provider, model });
647 const response = await modelProvider.gen({
648 provider: provider || "openai",
649 model,
650 prompt,

OpenAIChatCompletionREADME.md5 matches

@rozek•Updated 3 months ago
12
13* fork this val
14* use the fork's HTTP endpoint URL (in the form "https://XXX-openaichatcompletion.web.val.run") as AI server URL
15* **define an environment variable called "OpenAIChatCompletion"** with any kind of content (but without any blanks or control characters, e.g., a [UUID](https://rozek-uuidv4_generator.web.val.run)) and use that as your personal access key
16
17Now, you can ask everybody to provide their AI credentials and still use the OpenAI access provided by Val.Town for your personal tests.
18
19> **Nota bene: if the environment variable "OpenAIChatCompletion" has not been defined, access to your fork's endpoint is free for everybody!**
20
21In addition to the described authorization, this val also provides resource "throttling" (using val [floatingQuotaTracker](https://www.val.town/v/rozek/floatingQuotaTracker) in sqlite tables "OpenAIChatCompletion_Info" and "OpenAIChatCompletion_Log") and calculates some access statistics (using val [InvocationTracker](https://www.val.town/v/rozek/InvocationTracker) in sqlite tables "OpenAIChatCompletion_Usage_Info" and "OpenAIChatCompletion_Usage_Log")

cloneChatGPTmain.tsx3 matches

@uniquebengal•Updated 3 months ago
91export default async function server(request: Request): Promise<Response> {
92 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
93 const { OpenAI } = await import("https://esm.town/v/std/openai");
94 const KEY = "cloneChatGPT";
95 const SCHEMA_VERSION = 3;
104
105 if (request.method === "POST") {
106 const openai = new OpenAI();
107 const { messages } = await request.json();
108
109 try {
110 const completion = await openai.chat.completions.create({
111 messages: messages,
112 model: "gpt-4o-mini",

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 1 week ago

testOpenAI1 file match

@stevekrouse•Updated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",