Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=80&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1632 results for "openai"(579ms)

r1main.tsx2 matches

@sboesen•Updated 3 months ago
1import { encode } from "https://deno.land/std@0.203.0/encoding/base64.ts";
2import OpenAI from "https://esm.sh/openai@4.24.1";
3import { email } from "https://esm.town/v/std/email";
4import { pdfText } from "jsr:@pdf/pdftext";
134async function sendRequestToFireworks(prompt, apiKey, model) {
135 try {
136 const client = new OpenAI({
137 baseURL: "https://api.fireworks.ai/inference/v1",
138 apiKey: apiKey,

OpenRouterChatCompletion_Testmain.tsx2 matches

@rozek•Updated 3 months ago
2
3 export default async function (Request:Request):Promise<Response> {
4 const OpenAIRequest = {
5 messages: [
6 { role:'system', content:'please answer the following question' },
14 method: 'POST',
15 headers:{ 'Content-Type':'application/json' },
16 body: JSON.stringify(OpenAIRequest)
17 });
18

jarvisPrototypemain.tsx3 matches

@pashaabhi•Updated 3 months ago
243export default async function server(request: Request): Promise<Response> {
244 if (request.method === "POST") {
245 const { OpenAI } = await import("https://esm.town/v/std/openai");
246 const openai = new OpenAI();
247
248 const body = await request.json();
249 const messages = body.messages || [];
250
251 const completion = await openai.chat.completions.create({
252 messages: messages,
253 model: "gpt-4o-mini",

OpenRouterChatCompletionmain.tsx1 match

@rozek•Updated 3 months ago
114 }
115
116 return await OpenAIChatCompletion(Request)
117 }
118

OpenRouterChatCompletionLeadmain.tsx1 match

@rozek•Updated 3 months ago
2 import { floatingQuotaTracker } from 'https://esm.town/v/rozek/floatingQuotaTracker'
3
4/**** how long until "OpenAIChatCompletion" becomes available again? ****/
5
6 export default async function (Request:Request):Promise<Response> {

translatormain.tsx8 matches

@AIWB•Updated 3 months ago
2import { Hono } from "npm:hono@3";
3import { html } from "npm:hono@3/html";
4import { OpenAI } from "npm:openai";
5
6const app = new Hono();
7const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY_VOICE") });
8
9class TranscriptionService {
10 async transcribeAudio(audioFile) {
11 try {
12 const transcription = await openai.audio.transcriptions.create({
13 file: audioFile,
14 model: "whisper-1",
18 return transcription;
19 } catch (error) {
20 console.error("OpenAI API error:", error);
21 throw error;
22 }
403
404 try {
405 const response = await openai.chat.completions.create({
406 model: "gpt-4o-mini",
407 messages: [
421 return c.text(translation);
422 } catch (error) {
423 console.error("OpenAI API error:", error);
424 return c.text("Error occurred during translation", 500);
425 }
436
437 try {
438 const mp3 = await openai.audio.speech.create({
439 model: "tts-1",
440 voice: voice,
448 });
449 } catch (error) {
450 console.error("OpenAI API error:", error);
451 return c.text("Error occurred during speech generation", 500);
452 }

translatorREADME.md1 match

@AIWB•Updated 3 months ago
3The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.
4
5Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.

API_URLmain.tsx4 matches

@awhitter•Updated 3 months ago
1import { val } from 'https://esm.town/v/std/val';
2import { OpenAI } from 'https://esm.town/v/std/openai';
3
4// Initialize OpenAI
5const openai = new OpenAI();
6
7// Type definitions for Airtable attachments
166 `;
167
168 const completion = await openai.chat.completions.create({
169 messages: [
170 {

contentTemplateAppmain.tsx3 matches

@awhitter•Updated 3 months ago
282 } else if (url.pathname === "/api/analyze") {
283 if (req.method === 'POST') {
284 const { OpenAI } = await import("https://esm.town/v/std/openai");
285 const openai = new OpenAI();
286
287 try {
292 Full Content: ${fullContent}`;
293
294 const completion = await openai.chat.completions.create({
295 messages: [{ role: "user", content: prompt }],
296 model: "gpt-4o-mini",

aimain.tsx17 matches

@goode_bye•Updated 3 months ago
3import { createGoogleGenerativeAI, google } from "npm:@ai-sdk/google";
4import { createMistral, mistral } from "npm:@ai-sdk/mistral";
5import { createOpenAI } from "npm:@ai-sdk/openai";
6import { embed, generateObject, generateText, streamText } from "npm:ai";
7import { cors } from "npm:hono/cors";
33});
34
35const openai = createOpenAI({
36 // apiKey = Deno.env.get("OPENAI_API_KEY");
37 apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET"),
38});
39
40const groq = createOpenAI({
41 baseURL: "https://api.groq.com/openai/v1",
42 apiKey: Deno.env.get("GROQ_API_KEY"),
43});
44
45const perplexity = createOpenAI({
46 apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? "",
47 baseURL: "https://api.perplexity.ai/",
59 this.memories = options.memories || [];
60 this.messages = options.messages || [];
61 this.defaultProvider = options.provider || "openai";
62 this.defaultModel = options.model;
63 this.defaultMaxTokens = options.maxTokens;
125 let result;
126 switch (provider) {
127 case "openai":
128 result = await this.generateOpenAIResponse({
129 model,
130 prompt,
240 }
241
242 async generateOpenAIResponse(
243 {
244 model,
261
262 if (embed) {
263 let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
264 // console.log('embed!', result)
265 return result;
267
268 const options = {
269 model: openai(modelId),
270 system,
271 temperature,
320 }
321
322 async generateOpenAIEmbedding({ model, value, dimensions, user }) {
323 const modelId = model || "text-embedding-3-large";
324 const options = {
325 model: openai.embedding(modelId, {
326 dimensions,
327 user,
586
587app.get("/generate", async (c) => {
588 const provider = c.req.query("provider") || "openai";
589 const model = c.req.query("model");
590 const prompt = c.req.query("prompt");
646 console.log("post/generate", { mode: "post/generate", prompt, provider, model });
647 const response = await modelProvider.gen({
648 provider: provider || "openai",
649 model,
650 prompt,

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 6 days ago

testOpenAI1 file match

@stevekrouse•Updated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",