Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=76&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1632 results for "openai"(1042ms)

festiveRosePeacockmain.tsx3 matches

@Tanveer•Updated 3 months ago
108export default async function server(request: Request): Promise<Response> {
109 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
110 const { OpenAI } = await import("https://esm.town/v/std/openai");
111 const openai = new OpenAI();
112
113 const { messages } = await request.json();
114
115 try {
116 const completion = await openai.chat.completions.create({
117 messages: messages,
118 model: "gpt-4o-mini",

telegramWebhookEchoMessagemain.tsx3 matches

@ynonp•Updated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
3import translateToEnglishWithOpenAI from "https://esm.town/v/ynonp/translateToEnglishWithOpenAI";
4
5export const telegramWebhookEchoMessage = async (req: Request) => {
21 const chatId: number = body.message.chat.id;
22
23 const translated = await translateToEnglishWithOpenAI(text);
24 await telegramSendMessage(Deno.env.get("TELEGRAM_BOT_TOKEN"), { chat_id: chatId, text: translated });
25

trustingBronzeHedgehogmain.tsx3 matches

@ynonp•Updated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
3import translateToEnglishWithOpenAI from "https://esm.town/v/ynonp/translateToEnglishWithOpenAI";
4
5export const telegramWebhookEchoMessage = async (req: Request) => {
21 const chatId: number = body.message.chat.id;
22
23 const translated = await translateToEnglishWithOpenAI(text);
24 await telegramSendMessage(Deno.env.get("TELEGRAM_BOT_TOKEN"), { chat_id: chatId, text: translated });
25

SpeakEnglishToMe_botmain.tsx3 matches

@ynonp•Updated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
3import translateToEnglishWithOpenAI from "https://esm.town/v/ynonp/translateToEnglishWithOpenAI";
4
5export const telegramWebhookEchoMessage = async (req: Request) => {
21 const chatId: number = body.message.chat.id;
22
23 const translated = await translateToEnglishWithOpenAI(text);
24 await telegramSendMessage(Deno.env.get("TELEGRAM_BOT_TOKEN"), { chat_id: chatId, text: translated });
25

admirableWhiteBirdmain.tsx3 matches

@Tanveer•Updated 3 months ago
108export default async function server(request: Request): Promise<Response> {
109 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
110 const { OpenAI } = await import("https://esm.town/v/std/openai");
111 const openai = new OpenAI();
112
113 const { messages } = await request.json();
114
115 try {
116 const completion = await openai.chat.completions.create({
117 messages: messages,
118 model: "gpt-4o-mini",

caringCoralMoosemain.tsx3 matches

@Tanveer•Updated 3 months ago
108export default async function server(request: Request): Promise<Response> {
109 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
110 const { OpenAI } = await import("https://esm.town/v/std/openai");
111 const openai = new OpenAI();
112
113 const { messages } = await request.json();
114
115 try {
116 const completion = await openai.chat.completions.create({
117 messages: messages,
118 model: "gpt-4o-mini",

cerebrasTemplateREADME.md2 matches

@daisuke•Updated 3 months ago
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"

cerebrasTemplatemain.tsx2 matches

@daisuke•Updated 3 months ago
83 const { messages } = await request.json();
84
85 const { OpenAI } = await import("https://esm.sh/openai");
86 // Cerebras Inference
87 // Very important: do not change this
88 // Keep these comments so we remember not to change this
89 const client = new OpenAI({
90 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
91 baseURL: "https://api.cerebras.ai/v1",

AlwaysHeremain.tsx2 matches

@AIWB•Updated 3 months ago
309
310 // Cerebras LLM for response generation
311 const { OpenAI } = await import("https://esm.sh/openai");
312 const llmClient = new OpenAI({
313 apiKey: Deno.env.get("FIREWORKS_API"),
314 baseURL: "https://api.fireworks.ai/inference/v1",

AlwaysHereREADME.md2 matches

@AIWB•Updated 3 months ago
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 6 days ago

testOpenAI1 file match

@stevekrouse•Updated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",