Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=97&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1610 results for "openai"(518ms)

Ms_Spanglermain.tsx4 matches

@jidun•Updated 5 months ago
287
288 try {
289 const { OpenAI } = await import("https://esm.town/v/std/openai");
290 const openai = new OpenAI();
291
292 const completion = await openai.chat.completions.create({
293 messages: [
294 {
307 });
308 } catch (error) {
309 console.error("OpenAI error:", error);
310 return c.json({ response: "Neural networks malfunctioning. Try again, human." });
311 }

multilingualchatroommain.tsx3 matches

@lloydpearsoniv•Updated 5 months ago
498 const url = new URL(request.url);
499 const { blob } = await import("https://esm.town/v/std/blob");
500 const { OpenAI } = await import("https://esm.town/v/std/openai");
501
502 const openai = new OpenAI();
503
504 // Simple rate limiting
537
538 try {
539 const completion = await openai.chat.completions.create({
540 messages: [
541 {

HermesTresmegistusmain.tsx4 matches

@jidun•Updated 5 months ago
287
288 try {
289 const { OpenAI } = await import("https://esm.town/v/std/openai");
290 const openai = new OpenAI();
291
292 const completion = await openai.chat.completions.create({
293 messages: [
294 {
307 });
308 } catch (error) {
309 console.error("OpenAI error:", error);
310 return c.json({ response: "Neural networks malfunctioning. Try again, human." });
311 }

VALLEmain.tsx3 matches

@ubixsnow•Updated 5 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2const openai = new OpenAI();
3const completion = await openai.chat.completions.create({
4 messages: [
5 { role: "user", content: "Say hello in a creative way" },

VALLEREADME.md1 match

@ubixsnow•Updated 5 months ago
6* Fork this val to your own profile.
7* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
8* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
9* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).
10* Create a [Val Town API token](https://www.val.town/settings/api), open the browser preview of this val, and use the API token as the password to log in.

webpage_summarizermain.tsx4 matches

@ubixsnow•Updated 5 months ago
104 const webpageText = await webpageResponse.text();
105
106 // Use OpenAI as Claude proxy
107 const { OpenAI } = await import("https://esm.town/v/std/openai");
108 const openai = new OpenAI();
109
110 const completion = await openai.chat.completions.create({
111 messages: [
112 {

travelSurveyAppmain.tsx3 matches

@ntontischris•Updated 5 months ago
47 async function generateRecommendation() {
48 try {
49 const { OpenAI } = await import("https://esm.town/v/std/openai");
50 const openai = new OpenAI();
51
52 const completion = await openai.chat.completions.create({
53 messages: [
54 {

lang_codemain.tsx6 matches

@faseeu•Updated 5 months ago
6
7// Updated LangChain-compatible imports
8import { OpenAI } from "https://esm.town/v/std/openai";
9
10// Custom LangChain-like Workflow
11class CodeGenerationWorkflow {
12 private openai: OpenAI;
13
14 constructor() {
15 this.openai = new OpenAI();
16 }
17
39 const fullPrompt = baseCodePrompt.format({ prompt });
40
41 const completion = await this.openai.chat.completions.create({
42 messages: [{ role: "user", content: fullPrompt }],
43 model: "gpt-4o-mini",
66 const fullPrompt = reviewPrompt.format({ baseCode });
67
68 const completion = await this.openai.chat.completions.create({
69 messages: [{ role: "user", content: fullPrompt }],
70 model: "gpt-4o-mini",
91 const fullPrompt = docPrompt.format({ code });
92
93 const completion = await this.openai.chat.completions.create({
94 messages: [{ role: "user", content: fullPrompt }],
95 model: "gpt-4o-mini",

tidyRedWhalemain.tsx3 matches

@websrai•Updated 5 months ago
84export default async function server(request: Request): Promise<Response> {
85 if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
86 const { OpenAI } = await import("https://esm.town/v/std/openai");
87 const openai = new OpenAI();
88
89 try {
90 const { messages } = await request.json();
91
92 const completion = await openai.chat.completions.create({
93 model: "gpt-4o-mini",
94 messages: messages,

generateImageSummarymain.tsx3 matches

@jackd•Updated 5 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { fileToDataURL } from "https://esm.town/v/stevekrouse/fileToDataURL";
3
4export async function generateImageSummary(image: File): Promise<string[]> {
5 const openai = new OpenAI();
6 const dataURL = await fileToDataURL(image);
7 const response = await openai.chat.completions.create({
8 model: "gpt-4-vision-preview",
9 messages: [

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 2 days ago

testOpenAI1 file match

@stevekrouse•Updated 4 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",