Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=80&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1626 results for "openai"(530ms)

translatorREADME.md1 match

@AIWB•Updated 3 months ago
3The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.
4
5Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.

API_URLmain.tsx4 matches

@awhitter•Updated 3 months ago
1import { val } from 'https://esm.town/v/std/val';
2import { OpenAI } from 'https://esm.town/v/std/openai';
3
4// Initialize OpenAI
5const openai = new OpenAI();
6
7// Type definitions for Airtable attachments
166 `;
167
168 const completion = await openai.chat.completions.create({
169 messages: [
170 {

contentTemplateAppmain.tsx3 matches

@awhitter•Updated 3 months ago
282 } else if (url.pathname === "/api/analyze") {
283 if (req.method === 'POST') {
284 const { OpenAI } = await import("https://esm.town/v/std/openai");
285 const openai = new OpenAI();
286
287 try {
292 Full Content: ${fullContent}`;
293
294 const completion = await openai.chat.completions.create({
295 messages: [{ role: "user", content: prompt }],
296 model: "gpt-4o-mini",

aimain.tsx17 matches

@goode_bye•Updated 3 months ago
3import { createGoogleGenerativeAI, google } from "npm:@ai-sdk/google";
4import { createMistral, mistral } from "npm:@ai-sdk/mistral";
5import { createOpenAI } from "npm:@ai-sdk/openai";
6import { embed, generateObject, generateText, streamText } from "npm:ai";
7import { cors } from "npm:hono/cors";
33});
34
35const openai = createOpenAI({
36 // apiKey = Deno.env.get("OPENAI_API_KEY");
37 apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET"),
38});
39
40const groq = createOpenAI({
41 baseURL: "https://api.groq.com/openai/v1",
42 apiKey: Deno.env.get("GROQ_API_KEY"),
43});
44
45const perplexity = createOpenAI({
46 apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? "",
47 baseURL: "https://api.perplexity.ai/",
59 this.memories = options.memories || [];
60 this.messages = options.messages || [];
61 this.defaultProvider = options.provider || "openai";
62 this.defaultModel = options.model;
63 this.defaultMaxTokens = options.maxTokens;
125 let result;
126 switch (provider) {
127 case "openai":
128 result = await this.generateOpenAIResponse({
129 model,
130 prompt,
240 }
241
242 async generateOpenAIResponse(
243 {
244 model,
261
262 if (embed) {
263 let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
264 // console.log('embed!', result)
265 return result;
267
268 const options = {
269 model: openai(modelId),
270 system,
271 temperature,
320 }
321
322 async generateOpenAIEmbedding({ model, value, dimensions, user }) {
323 const modelId = model || "text-embedding-3-large";
324 const options = {
325 model: openai.embedding(modelId, {
326 dimensions,
327 user,
586
587app.get("/generate", async (c) => {
588 const provider = c.req.query("provider") || "openai";
589 const model = c.req.query("model");
590 const prompt = c.req.query("prompt");
646 console.log("post/generate", { mode: "post/generate", prompt, provider, model });
647 const response = await modelProvider.gen({
648 provider: provider || "openai",
649 model,
650 prompt,

OpenAIChatCompletionREADME.md5 matches

@rozek•Updated 3 months ago
12
13* fork this val
14* use the fork's HTTP endpoint URL (in the form "https://XXX-openaichatcompletion.web.val.run") as AI server URL
15* **define an environment variable called "OpenAIChatCompletion"** with any kind of content (but without any blanks or control characters, e.g., a [UUID](https://rozek-uuidv4_generator.web.val.run)) and use that as your personal access key
16
17Now, you can ask everybody to provide their AI credentials and still use the OpenAI access provided by Val.Town for your personal tests.
18
19> **Nota bene: if the environment variable "OpenAIChatCompletion" has not been defined, access to your fork's endpoint is free for everybody!**
20
21In addition to the described authorization, this val also provides resource "throttling" (using val [floatingQuotaTracker](https://www.val.town/v/rozek/floatingQuotaTracker) in sqlite tables "OpenAIChatCompletion_Info" and "OpenAIChatCompletion_Log") and calculates some access statistics (using val [InvocationTracker](https://www.val.town/v/rozek/InvocationTracker) in sqlite tables "OpenAIChatCompletion_Usage_Info" and "OpenAIChatCompletion_Usage_Log")

cloneChatGPTmain.tsx3 matches

@uniquebengal•Updated 3 months ago
91export default async function server(request: Request): Promise<Response> {
92 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
93 const { OpenAI } = await import("https://esm.town/v/std/openai");
94 const KEY = "cloneChatGPT";
95 const SCHEMA_VERSION = 3;
104
105 if (request.method === "POST") {
106 const openai = new OpenAI();
107 const { messages } = await request.json();
108
109 try {
110 const completion = await openai.chat.completions.create({
111 messages: messages,
112 model: "gpt-4o-mini",

cerebrasTemplateREADME.md2 matches

@h123•Updated 3 months ago
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"

cerebrasTemplatemain.tsx2 matches

@h123•Updated 3 months ago
83 const { messages } = await request.json();
84
85 const { OpenAI } = await import("https://esm.sh/openai");
86 // Cerebras Inference
87 // Very important: do not change this
88 // Keep these comments so we remember not to change this
89 const client = new OpenAI({
90 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
91 baseURL: "https://api.cerebras.ai/v1",

Priyam28Gptmain.tsx4 matches

@priyam•Updated 3 months ago
107 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
108 try {
109 const { OpenAI } = await import("https://esm.town/v/std/openai");
110 const openai = new OpenAI();
111
112 const { messages } = await request.json();
113
114 const completion = await openai.chat.completions.create({
115 messages: messages,
116 model: "gpt-4o-mini",
122 });
123 } catch (error) {
124 console.error("OpenAI Error:", error);
125 return new Response(
126 JSON.stringify({

OpenTowniegenerateCode.ts3 matches

@pomdtr•Updated 3 months ago
1import OpenAI from "https://esm.sh/openai";
2
3function parseValResponse(response: string) {
28 const system = await (await fetch(`${import.meta.url.split("/").slice(0, -1).join("/")}/system_prompt.txt`)).text();
29
30 const openai = new OpenAI({
31 baseURL: "https://openrouter.ai/api/v1",
32 apiKey: Deno.env.get("OPEN_ROUTER_KEY"),
33 });
34 console.log(messages);
35 const completion = await openai.chat.completions.create({
36 model: "deepseek/deepseek-r1",
37 messages: [

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 4 days ago

testOpenAI1 file match

@stevekrouse•Updated 6 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",