Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg%20%22Image%20title%22?q=openai&page=136&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 2472 results for "openai"(8110ms)

weatherDashboardREADME.md1 match

@poorvis885•Updated 3 months ago
12
13Frontend: React 18, TailwindCSS
14APIs: Open-Meteo, OpenAI GPT-4o
15Hosting: ESM-based module imports

basemain.tsx8 matches

@Learn•Updated 3 months ago
360
361export default async function server(request: Request): Promise<Response> {
362 const { OpenAI } = await import("https://esm.town/v/std/openai");
363
364 // Enhanced server-side logging
375 }
376
377 const openai = new OpenAI();
378
379 const url = new URL(request.url);
421
422 // Call AgentA to create the tool definition
423 const agentACompletion = await openai.chat.completions.create({
424 model: "gpt-4o-mini",
425 response_format: { type: "json_object" },
470 `;
471
472 const agentBCompletion = await openai.chat.completions.create({
473 model: "gpt-4o-mini",
474 response_format: { type: "json_object" },
519 });
520
521 // Use AgentB (a separate OpenAI instance) to process the tool request
522 const agentBPrompt = `
523 You are AgentB, a specialized tool agent designed to process specific information requests.
538 `;
539
540 const agentBCompletion = await openai.chat.completions.create({
541 model: "gpt-4o-mini",
542 response_format: { type: "json_object" },
582 });
583
584 // Fallback response if OpenAI fails
585 let agentAResponse: AgentResponse = {
586 actionType: "direct_response",
627 `;
628
629 const agentACompletion = await openai.chat.completions.create({
630 model: "gpt-4o-mini",
631 response_format: { type: "json_object" },

basemain.tsx8 matches

@web•Updated 3 months ago
248
249export default async function server(request: Request): Promise<Response> {
250 const { OpenAI } = await import("https://esm.town/v/std/openai");
251 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
252
276 };
277
278 const openai = new OpenAI();
279 const KEY = extractKey(new URL(import.meta.url));
280 const SCHEMA_VERSION = 3; // Increment schema version for tool support
531
532 // Call AgentA to create the tool definition
533 const agentACompletion = await openai.chat.completions.create({
534 model: "gpt-4o-mini",
535 response_format: { type: "json_object" },
596 `;
597
598 const agentBCompletion = await openai.chat.completions.create({
599 model: "gpt-4o-mini",
600 response_format: { type: "json_object" },
710 }
711
712 // Use AgentB (a separate OpenAI instance) to process the tool request
713 const agentBPrompt = `
714 You are AgentB, a specialized tool agent designed to process specific information requests.
729 `;
730
731 const agentBCompletion = await openai.chat.completions.create({
732 model: "gpt-4o-mini",
733 response_format: { type: "json_object" },
794 });
795
796 // Fallback response if OpenAI fails
797 let agentAResponse: AgentResponse = {
798 actionType: "direct_response",
841 `;
842
843 const agentACompletion = await openai.chat.completions.create({
844 model: "gpt-4o-mini",
845 response_format: { type: "json_object" },

componentmain.tsx3 matches

@web•Updated 3 months ago
65
66export default async function server(request: Request): Promise<Response> {
67 const { OpenAI } = await import("https://esm.town/v/std/openai");
68 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
69
70 const openai = new OpenAI();
71 const url = new URL(request.url);
72 const path = url.pathname.split("/").filter(Boolean);
128 `;
129
130 const completion = await openai.chat.completions.create({
131 model: "gpt-4o-mini",
132 messages: [{ role: "user", content: componentPrompt }],

blah_migratedmain.tsx1 match

@ajax•Updated 3 months ago
6 "description": "A sample blah manifest demonstrating various tool types and configurations.",
7 "env": {
8 "OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
9 },
10 "tools": [

talk_about_conspiracymain.tsx2 matches

@ajax•Updated 3 months ago
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
8 try {
9 const { text: fact } = await generateText({
10 model: openai("gpt-4o-mini"),
11 system: "You are an expert in conspiracy.",
12 prompt: `Provide an interesting conspiracy for fun`,
askSMHI

askSMHIREADME.md4 matches

@ljus•Updated 3 months ago
1# askSMHI
2Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish_Meteorological_and_Hydrological_Institute) api
3
4The API is instructed to use the current time in Europe/Stockholm timezone.
8## Relevant API documentation
9* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
10* [OPEN AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=chat&lang=javascript)
11
12## How to use this endpoint
41
42## Enviroment variables
43* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
44## Packages used
45* openai: For typesafe API request and responses
46* valibot: for describing the SMHI API response and function API input
47* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
askSMHI

askSMHIservice6 matches

@ljus•Updated 3 months ago
3import { offset, removeOffset } from "npm:@formkit/tempo";
4import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
5import { OpenAI } from "npm:openai";
6import * as v from "npm:valibot";
7import { openAIModeration } from "./gpt/moderation";
8import { getWeatherAtCoordinate } from "./smhi/forecast/service";
9import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
18 return { error: "Too many tokens in question" };
19 }
20 const { flagged } = await openAIModeration([question]);
21 if (flagged) {
22 return { error: "Be nice in your question" };
42 }[],
43 };
44 const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
45
46 const completion = await openai.chat.completions.create({
47 model: completionOptions.model,
48 store: completionOptions.store,
130 }]
131 : [];
132 const formattedFunctionResponseData = await openai.chat.completions.create({
133 model: completionOptions.model,
134 store: completionOptions.store,
askSMHI

askSMHImoderation6 matches

@ljus•Updated 3 months ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import process from "node:process";
3import { OpenAI } from "npm:openai";
4
5export async function openAIModeration(publicMessages: string[]) {
6 const openAIKey = process.env.OPENAI_CHAT;
7 const headers = {
8 "Content-Type": "application/json",
9 Authorization: `Bearer ${openAIKey}`,
10 };
11 const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
12 const responseData = await openai.moderations.create({ input: publicMessages });
13 return { flagged: responseData.results.some((r) => r.flagged) };
14}

researchAgentemailHandler2 matches

@xdstudiosmedia•Updated 3 months ago
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",

openai2 file matches

@wangqiao1234•Updated 2 days ago

openaiproxy2 file matches

@wangqiao1234•Updated 2 days ago
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp
kwhinnery_openai