Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=90&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1614 results for "openai"(847ms)

Test00_getModelBuildermain.tsx11 matches

@lisazzUpdated 4 months ago
2export async function getModelBuilder(spec: {
3 type?: "llm" | "chat" | "embedding";
4 provider?: "openai" | "huggingface";
5} = { type: "llm", provider: "openai" }, options?: any) {
6 // 2. 使用動態導入以確保兼容性
7 const { extend, cond, matches } = await import("https://esm.sh/lodash-es");
16 const setup = cond([
17 [
18 matches({ type: "llm", provider: "openai" }),
19 async () => {
20 const { OpenAI } = await import("https://esm.sh/langchain/llms/openai");
21 return new OpenAI(args);
22 },
23 ],
24 [
25 matches({ type: "chat", provider: "openai" }),
26 async () => {
27 const { ChatOpenAI } = await import("https://esm.sh/langchain/chat_models/openai");
28 return new ChatOpenAI(args);
29 },
30 ],
31 [
32 matches({ type: "embedding", provider: "openai" }),
33 async () => {
34 const { OpenAIEmbeddings } = await import("https://esm.sh/langchain/embeddings/openai");
35 return new OpenAIEmbeddings(args);
36 },
37 ],

MILLENCHATmain.tsx16 matches

@LucasMillenUpdated 4 months ago
2// {
3// "name": "AI Chat Assistant",
4// "description": "A chat assistant using OpenAI's API",
5// "permissions": ["env"]
6// }
88}
89
90async function callOpenAI(userMessage: string): Promise<string> {
91 const apiKey = Deno.env.get("OPENAI_API_KEY");
92
93 if (!apiKey) {
94 throw new Error("OpenAI API key is not configured. Please set the OPENAI_API_KEY environment variable.");
95 }
96
97 try {
98 const response = await fetch('https://api.openai.com/v1/chat/completions', {
99 method: 'POST',
100 headers: {
117 if (!response.ok) {
118 const errorBody = await response.text();
119 throw new Error(`OpenAI API error: ${response.status} - ${errorBody}`);
120 }
121
124 "I'm not sure how to respond to that.";
125 } catch (error) {
126 console.error("OpenAI API Call Error:", error);
127 throw error;
128 }
140 const [input, setInput] = useState('');
141 const [isLoading, setIsLoading] = useState(false);
142 const [openAIError, setOpenAIError] = useState<string | null>(null);
143 const messagesEndRef = useRef<HTMLDivElement>(null);
144
167 setInput('');
168 setIsLoading(true);
169 setOpenAIError(null);
170
171 try {
172 const botReply = await callOpenAI(userMessage);
173 addMessage(botReply, 'bot');
174 } catch (error) {
179 : String(error);
180
181 setOpenAIError(errorMessage);
182 const fallbackResponse = generateFallbackResponse(userMessage);
183 addMessage(`Sorry, AI service error: ${errorMessage}. Fallback response: ${fallbackResponse}`, 'bot');
195 backgroundColor: '#f4f4f4'
196 }}>
197 {openAIError && (
198 <div style={{
199 backgroundColor: '#f8d7da',
203 borderRadius: '5px'
204 }}>
205 OpenAI Error: {openAIError}
206 </div>
207 )}
279
280export default async function server(request: Request): Promise<Response> {
281 // Check if OpenAI API key is configured
282 const apiKey = Deno.env.get("OPENAI_API_KEY");
283
284 if (!apiKey) {
313 <div class="error-container">
314 <h1>🚨 Configuration Error</h1>
315 <p>OpenAI API key is not configured. Please set the OPENAI_API_KEY environment variable.</p>
316 <p>Contact the val owner to resolve this issue.</p>
317 </div>

openai_svgmain.tsx4 matches

@stevekrouseUpdated 4 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function (req: Request): Promise<Response> {
117 }
118
119 // Generate SVG using OpenAI
120 const openai = new OpenAI();
121 const completion = await openai.chat.completions.create({
122 messages: [
123 { role: "system", content: "You are a helpful assistant that generates simple SVG images based on text input. Return only valid SVG code wrapped in ```xml tags without any explanation." },

dreamInterpreterAppmain.tsx3 matches

@BilelghrsalliUpdated 4 months ago
68export default async function server(request: Request): Promise<Response> {
69 if (request.method === "POST" && new URL(request.url).pathname === "/interpret") {
70 const { OpenAI } = await import("https://esm.town/v/std/openai");
71 const openai = new OpenAI();
72
73 const { dream } = await request.json();
74
75 try {
76 const completion = await openai.chat.completions.create({
77 messages: [
78 {

openai_svgREADME.md1 match

@stevekrouseUpdated 4 months ago
1# OpenAI SVG
2
3Generates an SVG based on the ?text input pararm

LampBlogsmain.tsx4 matches

@AppleLampsUpdated 4 months ago
191 && new URL(request.url).pathname === "/generate-blog"
192 ) {
193 const { OpenAI } = await import("https://esm.town/v/std/openai");
194 const openai = new OpenAI();
195
196 try {
199
200 if (topic === "RANDOM") {
201 const randomTopicCompletion = await openai.chat.completions.create({
202 messages: [
203 {
220 }
221
222 const completion = await openai.chat.completions.create({
223 messages: [
224 {

weatherGPTmain.tsx3 matches

@lisazzUpdated 4 months ago
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "Asia, Taipei";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",

getModelBuildermain.tsx14 matches

@lisazzUpdated 4 months ago
5export async function getModelBuilder(spec: {
6 type?: "llm" | "chat" | "embedding";
7 provider?: "openai" | "huggingface";
8} = { type: "llm", provider: "openai" }, options?: any) {
9 // 3. 從 lodash-es 中導入函數
10 const { extend, cond, matches, invoke } = await import("npm:lodash-es");
22 // 5. 為每個提供者設置 API 密鑰
23 const args = extend({ callbacks }, options);
24 if (spec?.provider === "openai")
25 args.openAIApiKey = process.env.OPENAI;
26 else if (spec?.provider === "huggingface")
27 args.apiKey = process.env.HUGGINGFACE;
30 const setup = cond([
31 [
32 matches({ type: "llm", provider: "openai" }),
33 async () => {
34 const { OpenAI } = await import("npm:langchain/llms/openai");
35 return new OpenAI(args);
36 },
37 ],
38 [
39 matches({ type: "chat", provider: "openai" }),
40 async () => {
41 const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
42 return new ChatOpenAI(args);
43 },
44 ],
45 [
46 matches({ type: "embedding", provider: "openai" }),
47 async () => {
48 const { OpenAIEmbeddings } = await import(
49 "npm:langchain/embeddings/openai"
50 );
51 return new OpenAIEmbeddings(args);
52 },
53 ],

LampChatmain.tsx4 matches

@AppleLampsUpdated 4 months ago
287export default async function server(request) {
288 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
289 const { OpenAI } = await import("https://esm.town/v/std/openai");
290 const openai = new OpenAI();
291
292 try {
339 }
340
341 const completion = await openai.chat.completions.create({
342 messages: messages,
343 model: imageFiles.length > 0 ? "chatgpt-4o-latest" : "gpt-4o",
350 );
351 } catch (error) {
352 console.error("OpenAI API error:", error);
353 return new Response(
354 JSON.stringify({ error: "Failed to get AI response" }),

GROKPROMPTmain.tsx11 matches

@AppleLampsUpdated 4 months ago
526 * Server-side logic for generating prompts.
527 * The server expects a POST request to "/generate-prompts" with either text or image form data.
528 * It uses OpenAI API to generate detailed and concise prompts.
529 */
530export default async function server(request: Request): Promise<Response> {
531 if (request.method === "POST" && new URL(request.url).pathname === "/generate-prompts") {
532 try {
533 const { OpenAI } = await import("https://esm.town/v/std/openai");
534 const openai = new OpenAI();
535 const formData = await request.formData();
536 const inputType = formData.get("inputType") as string;
627 }
628
629 console.log("Sending request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
630
631 const completion = await openai.chat.completions.create({
632 model: "chatgpt-4o-latest",
633 messages: analysisMessages,
635 });
636
637 console.log("Received response from OpenAI:", JSON.stringify(completion, null, 2));
638
639 const content = completion.choices[0].message.content || "";
695 if (request.method === "POST" && new URL(request.url).pathname === "/clarify-prompts") {
696 try {
697 const { OpenAI } = await import("https://esm.town/v/std/openai");
698 const openai = new OpenAI();
699 const { inputType, originalInput, clarificationAnswer } = await request.json();
700
749 const analysisMessages = [systemMessage, userMessage];
750
751 console.log("Sending clarification request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
752
753 const completion = await openai.chat.completions.create({
754 model: "chatgpt-4o-latest",
755 messages: analysisMessages,
757 });
758
759 console.log("Received clarification response from OpenAI:", JSON.stringify(completion, null, 2));
760
761 const content = completion.choices[0].message.content || "";

translateToEnglishWithOpenAI1 file match

@shlmtUpdated 3 days ago

testOpenAI1 file match

@stevekrouseUpdated 5 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",