Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=77&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1632 results for "openai"(937ms)

InstantQueryApp2 matches

@vawogbemiUpdated 3 months ago
8
9 if (req.method == "POST") {
10 const { OpenAI } = await import("https://esm.town/v/std/openai");
11 const { text } = await req.json();
12 const client = new OpenAI();
13
14 const schema = await getInstantSchema(Deno.env.get("INSTANT_APP_ID"), Deno.env.get("INSTANT_ADMIN_TOKEN"));

translateToEnglishWithOpenAImain.tsx5 matches

@HadarhubaraUpdated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function translateToEnglishWithOpenAI(text: string) {
4 const openai = new OpenAI();
5 const completion = await openai.chat.completions.create({
6 messages: [
7 {
32}
33
34console.log(await translateToEnglishWithOpenAI("שלום"));

boldWhiteDinosaurartisticScarletScorpion3 matches

@VicodinUpdated 3 months ago
224
225export default async function server(request: Request): Promise<Response> {
226 const { OpenAI } = await import("https://esm.town/v/std/openai");
227 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
228
269 `, [JSON.stringify(messages)]);
270
271 const openai = new OpenAI();
272 const systemPrompt = "Ты дружелюбный и умный ассистент. Отвечай четко и по существу.";
273
277 ];
278
279 const stream = await openai.chat.completions.create({
280 model: "gpt-4o-mini",
281 messages: modifiedMessages,

Clone_CHATGPTbubblyGreenBat3 matches

@CustodiaUpdated 3 months ago
168
169export default async function server(request: Request): Promise<Response> {
170 const { OpenAI } = await import("https://esm.town/v/std/openai");
171 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
172
213 `, [JSON.stringify(messages)]);
214
215 const openai = new OpenAI();
216 const systemPrompt = "Ты дружелюбный и умный ассистент. Отвечай четко и по существу.";
217
221 ];
222
223 const stream = await openai.chat.completions.create({
224 model: "gpt-4o-mini",
225 messages: modifiedMessages,

Cloneklonmain.tsx3 matches

@CustodiaUpdated 3 months ago
224
225export default async function server(request: Request): Promise<Response> {
226 const { OpenAI } = await import("https://esm.town/v/std/openai");
227 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
228
269 `, [JSON.stringify(messages)]);
270
271 const openai = new OpenAI();
272 const systemPrompt = "Ты дружелюбный и умный ассистент. Отвечай четко и по существу.";
273
277 ];
278
279 const stream = await openai.chat.completions.create({
280 model: "gpt-4o-mini",
281 messages: modifiedMessages,

enchantingScarletChickadeemain.tsx3 matches

@CustodiaUpdated 3 months ago
224
225export default async function server(request: Request): Promise<Response> {
226 const { OpenAI } = await import("https://esm.town/v/std/openai");
227 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
228
269 `, [JSON.stringify(messages)]);
270
271 const openai = new OpenAI();
272 const systemPrompt = "Ты дружелюбный и умный ассистент. Отвечай четко и по существу.";
273
277 ];
278
279 const stream = await openai.chat.completions.create({
280 model: "gpt-4o-mini",
281 messages: modifiedMessages,

LlmDashboardmodel-management2 matches

@prashamtrivediUpdated 3 months ago
5import { AnthropicProvider } from "./llm-providers/llm-provider-anthropic";
6import { GoogleProvider } from "./llm-providers/llm-provider-gemini";
7import { OpenAIProvider } from "./llm-providers/llm-provider-openai";
8
9function ProgressBar({ progress }: { progress: number }) {
338 // Initialize providers
339 const providers = [
340 new OpenAIProvider(),
341 new AnthropicProvider(),
342 new GoogleProvider(),
184 ): Promise<CompletionResponse> {
185 // Implement function calling for Google
186 // Note: Gemini has different function calling format than OpenAI
187 throw new Error("Function calling not yet implemented for Google provider");
188 }

LlmDashboardllm-provider-openai24 matches

@prashamtrivediUpdated 3 months ago
1/**
2 * llm-provider-openai.ts
3 * OpenAI-specific provider implementation
4 */
5
6import { OpenAI } from "https://esm.town/v/std/openai";
7import type {
8 BaseModel,
16
17/**
18 * OpenAI-specific parameter extensions
19 */
20export interface OpenAIParams {
21 reasoningEffort?: number; // For models with reasoning capabilities
22 jsonMode?: boolean; // Force JSON output
25
26/**
27 * OpenAI model configuration with provider-specific params
28 */
29export type OpenAIConfig = WithProviderParams<BaseModelConfig, OpenAIParams>;
30
31/**
32 * OpenAI-specific model interface
33 */
34interface OpenAIModel extends BaseModel {
35 providerSpecific: OpenAIParams;
36}
37
38export class OpenAIProvider implements BaseProvider {
39 readonly id = "openai";
40 readonly name = "OpenAI";
41 private client: OpenAI;
42
43 constructor() {
44 this.client = new OpenAI();
45 }
46
47 models: OpenAIModel[] = [
48 {
49 id: "gpt-4",
123 prompt: string | Message[],
124 modelId?: string,
125 config?: OpenAIConfig,
126 ): Promise<CompletionResponse> {
127 const model = this.models.find(m => m.id === (modelId || this.defaultModel));
167 };
168 } catch (error) {
169 throw new Error(`OpenAI API error: ${error.message}`);
170 }
171 }
175 functions: FunctionDefinition[],
176 modelId?: string,
177 config?: OpenAIConfig,
178 ): Promise<CompletionResponse> {
179 const model = this.models.find(m => m.id === (modelId || this.defaultModel));
223 };
224 } catch (error) {
225 throw new Error(`OpenAI API error: ${error.message}`);
226 }
227 }
228
229 private calculateCost(
230 model: OpenAIModel,
231 inputTokens: number,
232 outputTokens: number,
241 prompt: string | Message[],
242 modelId?: string,
243 config?: OpenAIConfig,
244 ): AsyncIterableIterator<CompletionResponse> {
245 const model = this.models.find(m => m.id === (modelId || this.defaultModel));
282 }
283 } catch (error) {
284 throw new Error(`OpenAI API error: ${error.message}`);
285 }
286 }
287}
288
289export default OpenAIProvider;

chatGPTClonemain.tsx4 matches

@CpkUpdated 3 months ago
89export default async function server(request: Request): Promise<Response> {
90 if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
91 const { OpenAI } = await import("https://esm.town/v/std/openai");
92 const openai = new OpenAI();
93
94 try {
95 const { messages } = await request.json();
96
97 const completion = await openai.chat.completions.create({
98 messages: messages,
99 model: "gpt-4o-mini",
107 });
108 } catch (error) {
109 console.error('OpenAI Error:', error);
110 return new Response(JSON.stringify({
111 message: 'Sorry, there was an error processing your request.'

translateToEnglishWithOpenAI1 file match

@shlmtUpdated 6 days ago

testOpenAI1 file match

@stevekrouseUpdated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",