239
240async function fetchAIResponse(query: string): Promise<string> {
241 const { OpenAI } = await import("https://esm.town/v/std/openai");
242 const openai = new OpenAI();
243
244 const completion = await openai.chat.completions.create({
245 messages: [
246 { role: "system", content: "You are a helpful voice assistant. Keep responses concise and friendly." },
239
240async function fetchAIResponse(query: string): Promise<string> {
241 const { OpenAI } = await import("https://esm.town/v/std/openai");
242 const openai = new OpenAI();
243
244 const completion = await openai.chat.completions.create({
245 messages: [
246 { role: "system", content: "You are a helpful voice assistant. Keep responses concise and friendly." },
408 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
409 const { email: sendEmail } = await import("https://esm.town/v/std/email");
410 const { OpenAI } = await import("https://esm.town/v/std/openai");
411 const openai = new OpenAI();
412
413 const KEY = "bhaavsynthlandipgpage";
1import { readFile } from "https://esm.town/v/std/utils@71-main/index.ts";
2import { createOpenAI } from "npm:@ai-sdk/openai";
3import { streamText, tool } from "npm:ai";
4import { z } from "npm:zod";
56});
57
58const openai = createOpenAI({
59 baseURL: "https://std-openaiproxy.web.val.run/v1",
60 apiKey: Deno.env.get("valtown"),
61 organization: undefined,
79 try {
80 const result = await streamText({
81 model: openai("gpt-4o-mini"),
82 messages,
83 tools: {
111 });
112 } catch (error) {
113 console.error("OpenAI API error:", error);
114 return new Response("An error occurred during the analysis.");
115 }
7// Import a deep cloning library for safe state updates
8import cloneDeep from "https://esm.sh/lodash-es@4.17.21/cloneDeep";
9// Use Val Town's std/openai as requested via example
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12// --- TypeScript Interfaces: Defining the AI's Structured Output ---
396 console.log(`Analyzing: "${sentence.substring(0, 50)}..."`);
397
398 // --- OpenAI Setup using std/openai ---
399 // NOTE: Using std/openai as requested via example. API key handling might be managed by Val Town infrastructure.
400 // If issues occur, consider switching back to `npm:openai` and using API Key secret.
401 console.log("Using std/openai client");
402 const openai = new OpenAI(); // Assumes std/openai handles auth/key implicitly
403
404 // --- System Prompt for Structured Tree Output ---
407 `You are an AI language structuring expert... [PROMPT REMAINS THE SAME AS PREVIOUS VERSION - asking for AnalysisTree JSON with effects, IDs, options etc, using double quotes in JSON output] ... Output ONLY the raw, valid JSON object using double quotes for all JSON keys and string values.`;
408
409 console.log("Sending prompt to OpenAI (gpt-4o)...");
410 const response = await openai.chat.completions.create({
411 model: "gpt-4o",
412 messages: [{ role: "system", content: systemPrompt }],
415 max_tokens: 3000,
416 });
417 console.log(`OpenAI Response. Finish: ${response.choices[0]?.finish_reason}.`);
418
419 const aiResponseContent = response.choices[0]?.message?.content;
137Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
138
139### OpenAI
140```ts
141import { OpenAI } from "https://esm.town/v/std/openai";
142const openai = new OpenAI();
143const completion = await openai.chat.completions.create({
144 messages: [
145 { role: "user", content: "Say hello in a creative way" },
567// --- Backend Server (Val Town) ---
568export default async function server(request: Request): Promise<Response> {
569 const { OpenAI } = await import("https://esm.town/v/std/openai");
570
571 if (request.method === "OPTIONS") {
590 if (request.method === "POST" && new URL(request.url).pathname === "/grade-card") {
591 try {
592 // Check for OpenAI API Key in Val Town secrets
593 const openai = new OpenAI(); // Assumes OPENAI_API_KEY is set in Val Town secrets
594 const { imageBase64 } = await request.json();
595 if (!imageBase64 || typeof imageBase64 !== "string") {
600 }
601
602 console.log("Received image data, preparing request to OpenAI...");
603
604 const modelChoice = "gpt-4o"; // Or "gpt-4o-mini"
605
606 // --- Construct the detailed prompt for OpenAI ---
607 const prompt = `
608You are an expert collectible card grader simulating a PSA-style inspection based SOLELY on the provided image. Analyze the card image and provide a detailed N-point inspection report in JSON format.
662`;
663
664 const response = await openai.chat.completions.create({
665 model: modelChoice,
666 response_format: { type: "json_object" },
684 });
685
686 console.log("Received response from OpenAI.");
687
688 const jsonString = response.choices[0]?.message?.content;
689 if (!jsonString) {
690 throw new Error("OpenAI did not return valid response content.");
691 }
692
696 console.log("Successfully parsed JSON response.");
697 } catch (parseError) {
698 console.error("Failed to parse JSON from OpenAI:", jsonString);
699 throw new Error("AI returned improperly formatted JSON data.");
700 }
271// --- Backend Server (Val Town) ---
272export default async function server(request: Request): Promise<Response> {
273 const { OpenAI } = await import("https://esm.town/v/std/openai");
274
275 if (request.method === "OPTIONS") {
281 if (request.method === "POST" && new URL(request.url).pathname === "/grade-card") {
282 try {
283 // Check for OpenAI API Key in Val Town secrets
284 const openai = new OpenAI();
285 const { imageBase64 } = await request.json();
286 if (!imageBase64 || typeof imageBase64 !== "string") {
288 }
289
290 console.log("Received image data, preparing request to OpenAI..."); // Log start
291
292 const modelChoice = "gpt-4o"; // Use GPT-4o for better vision capabilities. Use "gpt-4o-mini" for lower cost/speed, potentially less accuracy.
293
294 // --- Construct the detailed prompt for OpenAI ---
295 const prompt = `
296You are an expert collectible card grader simulating a PSA-style inspection based SOLELY on the provided image. Analyze the card image and provide a detailed N-point inspection report in JSON format.
346`;
347
348 const response = await openai.chat.completions.create({
349 model: modelChoice,
350 response_format: { type: "json_object" }, // Enforce JSON output
375 });
376
377 console.log("Received response from OpenAI."); // Log success
378
379 const jsonString = response.choices[0]?.message?.content;
380 if (!jsonString) {
381 throw new Error("OpenAI did not return valid response content.");
382 }
383
384 // Parse the JSON response from OpenAI
385 let parsedResponse;
386 try {
388 console.log("Successfully parsed JSON response.");
389 } catch (parseError) {
390 console.error("Failed to parse JSON from OpenAI:", jsonString);
391 throw new Error("AI returned improperly formatted JSON data.");
392 }
401export default async function server(request: Request): Promise<Response> {
402 // Standard Val Town imports (ensure these versions are suitable)
403 const { OpenAI } = await import("http://esm.town/v/std/openai"); // Use npm import for better version control if needed
404 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite"); // Stick with esm.town for sqlite unless issues arise
405
406 // Use environment variable for API key (Val Town handles
407 const openai = new OpenAI(); // Automatically uses process.env.OPENAI_API_KEY
408
409 // --- Database Setup ---
438
439 console.log(`Generating study material for: ${topic}`);
440 const studyMaterialResponse = await openai.chat.completions.create({
441 model: "gpt-4o-mini", // Or choose another appropriate model
442 messages: [
457 const studyMaterial = studyMaterialResponse.choices[0]?.message?.content;
458 if (!studyMaterial) {
459 throw new Error("OpenAI did not return study material content.");
460 }
461
475
476 console.log(`Generating quiz for: ${topic}`);
477 const quizResponse = await openai.chat.completions.create({
478 model: "gpt-4o-mini", // Or choose another appropriate model
479 response_format: { type: "json_object" }, // Request JSON output
494 const jsonString = quizResponse.choices[0]?.message?.content;
495 if (!jsonString) {
496 throw new Error("OpenAI did not return quiz content.");
497 }
498
499 // Parse the JSON response from OpenAI
500 let parsedQuiz;
501 try {
502 parsedQuiz = JSON.parse(jsonString);
503 } catch (parseError) {
504 console.error("Failed to parse JSON from OpenAI:", jsonString);
505 throw new Error("Failed to parse quiz data from AI response.");
506 }
529
530export default async function server(request: Request): Promise<Response> {
531 const { OpenAI } = await import("https://esm.town/v/std/openai");
532 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
533 const openai = new OpenAI();
534 const KEY = "homerepair";
535
560 const { agentId, command, inputs } = await request.json();
561
562 // Simulate command execution with OpenAI
563 const messages = [
564 {
572
573 try {
574 const completion = await openai.chat.completions.create({
575 messages: messages,
576 model: "gpt-4o-mini",
588 headers: { "Content-Type": "application/json" },
589 });
590 } catch (openAIError) {
591 console.error("OpenAI API Error:", {
592 message: openAIError.message,
593 name: openAIError.name,
594 stack: openAIError.stack,
595 response: openAIError.response
596 ? await openAIError.response.text()
597 : "No response body",
598 });
600 return new Response(
601 JSON.stringify({
602 error: "OpenAI API Error",
603 details: openAIError.message,
604 stackTrace: openAIError.stack,
605 }),
606 {