7* Uses 'npm:pdf.js-extract' for PDF extraction.
8* Serves HTML UI & API endpoint from the same Val.
9* Assumes 'openai' secret is set in Val Town environment variables.
10*
11* Last Updated: {{current_date}} (Templated Version)
18* max_pdf_size_mb: {{max_pdf_size_mb}}, // e.g., 10
19* text_truncation_length: {{text_truncation_length}}, // e.g., 25000
20* openai_model_name: "{{openai_model_name}}", // e.g., "gpt-4o"
21* contact_form_placeholders_en: { name: "Your Name", email: "Your Email", message: "Message" },
22* contact_form_placeholders_es: { name: "Tu Nombre", email: "Tu Correo", message: "Mensaje" },
639export default async function(req: Request) {
640// --- Dynamic Imports (Unchanged) ---
641const { OpenAI } = await import("https://esm.town/v/std/openai");
642// const { z } = await import("npm:zod"); // Zod might be optional if config is trusted
643const { fetch } = await import("https://esm.town/v/std/fetch");
645646// --- CONFIGURATION (These would be replaced by the template variables at generation time) ---
647const APP_CONFIG = `\{{\app_config_json}}`; // e.g., { openai_model_name: "gpt-4o", text_truncation_length: 25000, ... }
648const ANALYSIS_AGENTS = `\{\{analysis_agents_json}}`; // Array of agent objects
649651async function extractPdfTextNative(data: ArrayBuffer, fileName: string, log: LogEntry[]): Promise<string | null> { /* ... original ... */ }
652653// --- Helper Function: Call OpenAI API (Uses APP_CONFIG for model) ---
654async function callOpenAI(
655openai: OpenAI,
656systemPrompt: string,
657userMessage: string,
658modelFromConfig = APP_CONFIG.openai_model_name || "gpt-4o", // Use configured model
659expectJson = false,
660): Promise<{ role: "assistant" | "system"; content: string | object }> {
661/* ... original logic, but use modelFromConfig ... */
662const model = modelFromConfig;
663// ... rest of the original callOpenAI function
664try {
665const response = await openai.chat.completions.create({
666model,
667messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
701log: LogEntry[],
702): Promise<LogEntry[]> {
703const openai = new OpenAI();
704log.push({ agent: "System", type: "step", message: "Workflow started." });
705// ... initial logging of input type ...
730// If chaining is needed, {{previous_output}} could be another placeholder in prompts.
731732const agentResult = await callOpenAI(
733openai,
734agentSystemPrompt, // The agent's specific prompt
735truncText, // User message is the doc text itself, or could be empty if prompt is self-contained
736APP_CONFIG.openai_model_name,
737agentConfig.expects_json
738);
830* 1. Define Application Configuration:
831* Fill in the \`{{app_config_json}}\` placeholder with general settings for your app
832* (e.g., OpenAI model, max file size, default language).
833*
834* 2. Define Analysis Agents:
836* - `agent_id`: A unique machine-readable ID.
837* - `agent_name_en`/`agent_name_es`: Human-readable names for UI and logs.
838* - `system_prompt`: The OpenAI prompt for this agent. Can use `{{document_text}}`.
839* - `expects_json`: Boolean, if the prompt asks OpenAI for JSON output.
840* - `ui_display_info`: How to render this agent's results:
841* - `card_title_en`/`card_title_es`: Title for the results card.
857* and `{{app_config.document_format_accepted_label}}` (e.g. "PDF") for UI text.
858*
859* 5. OpenAI API Key:
860* Ensure your environment (e.g., Val Town secrets) has the `OPENAI_API_KEY` (or the appropriate
861* environment variable name for the `OpenAI` library) set.
862*
863* 6. Deployment:
74### Tech Stack
75- **Backend**: Hono.js for API routing
76- **AI**: OpenAI GPT-4o-mini for content analysis
77- **Web Scraping**: Cheerio for HTML parsing
78- **Frontend**: Vanilla JavaScript with TailwindCSS
91## 🔧 Environment Setup
9293The analyzer uses OpenAI's API which is automatically configured in Val Town. No additional setup required!
9495## 📊 What It Analyzes
1import { Hono } from "https://esm.sh/hono@3.11.7";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { readFile } from "https://esm.town/v/std/utils/index.ts";
4import { analyzeWebPage } from "./analyzer.ts";
111}
112
113if (error.message.includes("OpenAI") || error.message.includes("API key")) {
114return c.json({ error: "AI analysis service requires API key configuration. Please contact the administrator." }, 503);
115}
4// SERVER-SIDE LOGIC (TypeScript)
5// =============================================================================
6import { OpenAI } from "https://esm.town/v/std/openai";
78// --- Configuration ---
23maskSrc?: string;
24}
25interface OpenAIResponse {
26races: RaceInfo[];
27}
105];
106107// --- OpenAI Generation Function ---
108async function generateRaceDataWithOpenAI(): Promise<RaceInfo[]> {
109const openai = new OpenAI();
110const numToRequest = Math.max(1, NUM_CARDS_TO_GENERATE);
111const prompt =
126Return STRICTLY as a single JSON object: { "races": [ { race1 }, { race2 }, ... ] }. No introductory text or explanations outside the JSON structure.`;
127try {
128console.info(`Requesting ${numToRequest} race data generation from OpenAI...`);
129const completion = await openai.chat.completions.create({
130model: "gpt-4o",
131messages: [{ role: "user", content: prompt }],
134});
135const rawContent = completion.choices[0]?.message?.content;
136if (!rawContent) throw new Error("OpenAI returned an empty response message.");
137138let parsedJson;
140parsedJson = JSON.parse(rawContent);
141} catch (parseError) {
142console.error("Failed to parse OpenAI JSON response:", parseError);
143console.error("Raw OpenAI response:", rawContent);
144throw new Error(`JSON Parsing Error: ${parseError.message}`);
145}
160) {
161console.warn(
162`OpenAI response JSON failed validation for ${numToRequest} races:`,
163JSON.stringify(parsedJson, null, 2),
164);
165throw new Error(
166"OpenAI response JSON structure, count, data types, color format, hint value, or mask URL invalid.",
167);
168}
169170const generatedData = (parsedJson as OpenAIResponse).races.map(race => ({
171...race,
172borderAnimationHint: race.borderAnimationHint || "none",
173}));
174console.info(`Successfully generated and validated ${generatedData.length} races from OpenAI.`);
175return generatedData;
176} catch (error) {
177console.error("Error fetching or processing data from OpenAI:", error);
178console.warn("Using fallback race data due to the error.");
179return fallbackRaceData.slice(0, numToRequest).map(race => ({
186// --- Main HTTP Handler (Val Town Entry Point) ---
187export default async function server(request: Request): Promise<Response> {
188const activeRaceData = await generateRaceDataWithOpenAI();
189190const css = `
x402-demoserver.tsx3 matches
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { type Context, Hono } from "npm:hono";
3import { paymentMiddleware } from "npm:x402-hono";
45const openai = new OpenAI();
67const app = new Hono();
2829app.get("/jokes", async (c: Context) => {
30const completion = await openai.chat.completions.create({
31messages: [
32{ role: "user", content: "Tell a punny programming joke" },
stevensDemo.cursorrules4 matches
100Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
101102### OpenAI
103```ts
104import { OpenAI } from "https://esm.town/v/std/openai";
105const openai = new OpenAI();
106const completion = await openai.chat.completions.create({
107messages: [
108{ role: "user", content: "Say hello in a creative way" },
8- **SQLite Database**: Execute queries and manage database operations
9- **Email**: Send emails through Val Town's email service
10- **OpenAI Integration**: Access OpenAI API through Val Town's service
11- **File Operations**: Read and list project files
12- **Environment Variables**: Access environment configuration
40- `email_send` - Send emails
41424. **OpenAI**
43- `openai_chat` - Generate text with OpenAI
44455. **File Operations**
6import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
7import { email } from "https://esm.town/v/std/email";
8import { OpenAI } from "https://esm.town/v/std/openai";
9import { readFile, listFiles } from "https://esm.town/v/std/utils@85-main/index.ts";
10import { MCPTool, MCPToolResult } from "./types.ts";
85},
86{
87name: "openai_chat",
88description: "Generate text using OpenAI's chat completion API",
89inputSchema: {
90type: "object",
102}
103},
104model: { type: "string", default: "gpt-4o-mini", description: "OpenAI model to use" },
105max_tokens: { type: "number", description: "Maximum tokens to generate" },
106temperature: { type: "number", description: "Sampling temperature" }
156case "email_send":
157return await handleEmailSend(args);
158case "openai_chat":
159return await handleOpenAIChat(args);
160case "file_read":
161return await handleFileRead(args);
285}
286287async function handleOpenAIChat(args: any): Promise<MCPToolResult> {
288validateRequired(args, ["messages"]);
289
290const openai = new OpenAI();
291const options: any = {
292messages: args.messages,
297if (args.temperature !== undefined) options.temperature = args.temperature;
298
299const completion = await openai.chat.completions.create(options);
300
301return {