Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=openai&page=93&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1639 results for "openai"(1016ms)

LampBlogsmain.tsx4 matches

@AppleLampsUpdated 4 months ago
191 && new URL(request.url).pathname === "/generate-blog"
192 ) {
193 const { OpenAI } = await import("https://esm.town/v/std/openai");
194 const openai = new OpenAI();
195
196 try {
199
200 if (topic === "RANDOM") {
201 const randomTopicCompletion = await openai.chat.completions.create({
202 messages: [
203 {
220 }
221
222 const completion = await openai.chat.completions.create({
223 messages: [
224 {

weatherGPTmain.tsx3 matches

@lisazzUpdated 4 months ago
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "Asia, Taipei";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",

getModelBuildermain.tsx14 matches

@lisazzUpdated 4 months ago
5export async function getModelBuilder(spec: {
6 type?: "llm" | "chat" | "embedding";
7 provider?: "openai" | "huggingface";
8} = { type: "llm", provider: "openai" }, options?: any) {
9 // 3. 從 lodash-es 中導入函數
10 const { extend, cond, matches, invoke } = await import("npm:lodash-es");
22 // 5. 為每個提供者設置 API 密鑰
23 const args = extend({ callbacks }, options);
24 if (spec?.provider === "openai")
25 args.openAIApiKey = process.env.OPENAI;
26 else if (spec?.provider === "huggingface")
27 args.apiKey = process.env.HUGGINGFACE;
30 const setup = cond([
31 [
32 matches({ type: "llm", provider: "openai" }),
33 async () => {
34 const { OpenAI } = await import("npm:langchain/llms/openai");
35 return new OpenAI(args);
36 },
37 ],
38 [
39 matches({ type: "chat", provider: "openai" }),
40 async () => {
41 const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
42 return new ChatOpenAI(args);
43 },
44 ],
45 [
46 matches({ type: "embedding", provider: "openai" }),
47 async () => {
48 const { OpenAIEmbeddings } = await import(
49 "npm:langchain/embeddings/openai"
50 );
51 return new OpenAIEmbeddings(args);
52 },
53 ],

LampChatmain.tsx4 matches

@AppleLampsUpdated 4 months ago
287export default async function server(request) {
288 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
289 const { OpenAI } = await import("https://esm.town/v/std/openai");
290 const openai = new OpenAI();
291
292 try {
339 }
340
341 const completion = await openai.chat.completions.create({
342 messages: messages,
343 model: imageFiles.length > 0 ? "chatgpt-4o-latest" : "gpt-4o",
350 );
351 } catch (error) {
352 console.error("OpenAI API error:", error);
353 return new Response(
354 JSON.stringify({ error: "Failed to get AI response" }),

GROKPROMPTmain.tsx11 matches

@AppleLampsUpdated 4 months ago
526 * Server-side logic for generating prompts.
527 * The server expects a POST request to "/generate-prompts" with either text or image form data.
528 * It uses OpenAI API to generate detailed and concise prompts.
529 */
530export default async function server(request: Request): Promise<Response> {
531 if (request.method === "POST" && new URL(request.url).pathname === "/generate-prompts") {
532 try {
533 const { OpenAI } = await import("https://esm.town/v/std/openai");
534 const openai = new OpenAI();
535 const formData = await request.formData();
536 const inputType = formData.get("inputType") as string;
627 }
628
629 console.log("Sending request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
630
631 const completion = await openai.chat.completions.create({
632 model: "chatgpt-4o-latest",
633 messages: analysisMessages,
635 });
636
637 console.log("Received response from OpenAI:", JSON.stringify(completion, null, 2));
638
639 const content = completion.choices[0].message.content || "";
695 if (request.method === "POST" && new URL(request.url).pathname === "/clarify-prompts") {
696 try {
697 const { OpenAI } = await import("https://esm.town/v/std/openai");
698 const openai = new OpenAI();
699 const { inputType, originalInput, clarificationAnswer } = await request.json();
700
749 const analysisMessages = [systemMessage, userMessage];
750
751 console.log("Sending clarification request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
752
753 const completion = await openai.chat.completions.create({
754 model: "chatgpt-4o-latest",
755 messages: analysisMessages,
757 });
758
759 console.log("Received clarification response from OpenAI:", JSON.stringify(completion, null, 2));
760
761 const content = completion.choices[0].message.content || "";

VEOPROMPTERmain.tsx5 matches

@AppleLampsUpdated 4 months ago
307 const url = new URL(request.url);
308 if (url.pathname === "/generate-prompts") {
309 const { OpenAI } = await import("https://esm.sh/openai@4.11.1");
310 const openai = new OpenAI({
311 apiKey: Deno.env.get("OPENAI_API_KEY"),
312 });
313
316
317 try {
318 const completion = await openai.chat.completions.create({
319 model: "chatgpt-4o-latest", // Changed from "gpt-4o-latest" to "chatgpt-4o-latest"
320 messages: [
364
365 if (!response) {
366 throw new Error("No response from OpenAI");
367 }
368

chatmain.tsx6 matches

@lisazzUpdated 4 months ago
5 options = {},
6) => {
7 // Initialize OpenAI API stub
8 const { Configuration, OpenAIApi } = await import(
9 "https://esm.sh/openai@3.3.0"
10 );
11 const configuration = new Configuration({
12 apiKey: process.env.OPENAI,
13 });
14 const openai = new OpenAIApi(configuration);
15 // Request chat completion
16 const messages = typeof prompt === "string"
17 ? [{ role: "user", content: prompt }]
18 : prompt;
19 const { data } = await openai.createChatCompletion({
20 model: "gpt-3.5-turbo-0613",
21 messages,

byobREADME.md1 match

@yawnxyzUpdated 4 months ago
11
12### Toolings
13* Llms can uses [tools](https://platform.openai.com/docs/guides/function-calling), meaning you can make this an agent and a whole lot more useful.
14
15

byobmain.tsx3 matches

@yawnxyzUpdated 4 months ago
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { email } from "https://esm.town/v/std/email";
4import { OpenAI } from "https://esm.town/v/std/openai";
5
6export default async function(e: Email) {
7 const client = new OpenAI();
8
9 const Messages = z.object({

chatgptchessmain.tsx4 matches

@nguyenquangdinh82Updated 4 months ago
1import { OpenAI } from "https://esm.town/v/std/openai?v=5"
2import { sqlite } from "https://esm.town/v/std/sqlite?v=6"
3import { Chess } from "npm:chess.js"
131 return c.html(`<body>
132 <div class='p-4'>
133 <h2 class='font-bold'>OpenAI Chess</h2>
134 <p class='pb-4'>Play chess against ChatGPT-4</p>
135 <div id="myBoard" style="width: 400px"></div>
171 chess.move(san)
172
173 const openai = new OpenAI()
174
175 let messages = []
182 args: [c.req.param().id, `Requesting response to ${san}`],
183 })
184 const completion = await openai.chat.completions.create({
185 messages: [
186 {

translateToEnglishWithOpenAI1 file match

@shlmtUpdated 1 week ago

testOpenAI1 file match

@stevekrouseUpdated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",