1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { PDFExtract, PDFExtractOptions } from "npm:pdf.js-extract";
4
173}
174
175async function callOpenAI(
176 openaiInstance: OpenAI,
177 systemPrompt: string,
178 userMessage: string,
182 agentName: string,
183): Promise<object | string> {
184 log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
185 try {
186 const response = await openaiInstance.chat.completions.create({
187 model: model,
188 messages: [
222 }
223 } catch (error) {
224 console.error(agentName, "OpenAI API call error:", error);
225 let errMsg = "AI communication error.";
226 if (error.message) errMsg += ` Message: ${error.message}`;
1072 <li>**No client-side persistence of documents and analyses**</li>
1073 </ul>
1074 <p>This application uses OpenAI's GPT models for its AI capabilities. Data submitted will be processed by OpenAI.</p>
1075 </div>
1076 \`;
1085
1086export default async function(req: Request) {
1087 const openai = new OpenAI();
1088 const url = new URL(req.url);
1089 const format = url.searchParams.get("format");
1154 const suggPrompt = legalTaskSuggestionSystemPromptTemplate.replace("%%DOCUMENT_TEXT%%", suggText);
1155 const suggAgent = "Task Suggestion AI (LLM1)";
1156 const suggRes = await callOpenAI(
1157 openai,
1158 suggPrompt,
1159 "Generate task suggestions based on the provided document text.",
1303
1304 const analysisAgent = "Legal Analysis AI (LLM2)";
1305 const aiRes = await callOpenAI(openai, finalPrompt, docToAnalyze, "gpt-4o", true, log, analysisAgent);
1306
1307 if (typeof aiRes === "object" && (aiRes as any).error) {
4
5// Environment Variables to set in Val.Town:
6// - OPENAI_KEY: Your OpenAI API Key
7// - NOTION_KEY: Your Notion API Key
8// - NOTION_DATABASE_ID: The Database ID for your "Reflections" database (submissions)
114
115 console.log("Main Submission Handler: Starting AI Analysis for:", userNumberFromForm);
116 const OPENAI_KEY = getEnv("OPENAI_KEY") as string;
117 let aiAnalysisResult = {
118 summary: "AI summary not generated.",
173 const userMessage =
174 `Please analyze the following user reflection:\n\nUser Reflection Text:\n\"\"\"\n${combinedResponsesText}\n\"\"\"\n\nProvide your analysis as a single JSON object.`;
175 const openAIPayload = {
176 model: "gpt-3.5-turbo",
177 messages: [{ role: "system", content: systemMessage }, { role: "user", content: userMessage }],
180 };
181 try {
182 const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", {
183 method: "POST",
184 headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` },
185 body: JSON.stringify(openAIPayload),
186 });
187 if (!openAIResponse.ok) {
188 const errorBody = await openAIResponse.text();
189 console.error("Main Submission Handler: OpenAI API Error:", openAIResponse.status, errorBody);
190 }
191 else {
192 const openAIData = await openAIResponse.json();
193 const aiContent = openAIData.choices[0]?.message?.content;
194 if (aiContent) {
195 try {
210 } catch (parseError) {
211 console.error(
212 "Main Submission Handler: Failed to parse OpenAI JSON response:",
213 parseError,
214 "Raw AI content:",
217 }
218 } else {
219 console.warn("Main Submission Handler: OpenAI response content was empty.");
220 }
221 }
222 } catch (aiError) {
223 console.error("Main Submission Handler: Error calling OpenAI API:", aiError);
224 }
225
76 const NOTION_USERS_DB_ID = getEnv("NOTION_USERS_DATABASE_ID") as string;
77 const NOTION_REFLECTIONS_DB_ID = getEnv("NOTION_DATABASE_ID") as string;
78 const OPENAI_KEY = getEnv("OPENAI_KEY") as string;
79
80 let isFirstTimeUser = true;
187Output ONLY the welcome message text.`;
188
189 const openAIPayload = {
190 model: "gpt-3.5-turbo",
191 messages: [{ role: "system", content: systemPrompt }],
194 };
195
196 console.log("getUserReflectionContext: Calling OpenAI for personalized welcome...");
197 try {
198 const openAIResponse = await fetch("https://api.openai.com/v1/chat/completions", {
199 method: "POST",
200 headers: { "Content-Type": "application/json", "Authorization": `Bearer ${OPENAI_KEY}` },
201 body: JSON.stringify(openAIPayload),
202 });
203
204 if (!openAIResponse.ok) {
205 const errorBody = await openAIResponse.text();
206 console.error("getUserReflectionContext: OpenAI API Error:", openAIResponse.status, errorBody);
207 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
208 } else {
209 const openAIData = await openAIResponse.json();
210 if (openAIData.choices && openAIData.choices[0] && openAIData.choices[0].message) {
211 welcomeMessage = openAIData.choices[0].message.content.trim();
212 console.log("getUserReflectionContext: OpenAI generated welcome:", welcomeMessage);
213 } else {
214 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
215 console.warn(
216 "getUserReflectionContext: OpenAI response structure unexpected, using default welcome.",
217 );
218 }
219 }
220 } catch (aiError) {
221 console.error("getUserReflectionContext: Error calling OpenAI:", aiError);
222 welcomeMessage = DEFAULT_RETURNING_USER_MESSAGE;
223 }
11const aiTools: AITool[] = [
12 // AI Assistants & Chatbots
13 { name: "ChatGPT", url: "https://chatgpt.com/", category: "Assistant", description: "OpenAI's conversational AI assistant" },
14 { name: "Claude", url: "https://claude.ai/", category: "Assistant", description: "Anthropic's AI assistant for various tasks and conversations" },
15 { name: "Gemini", url: "https://gemini.google.com/", category: "Assistant", description: "Google's advanced AI assistant" },
27 // Image Generation & Editing
28 { name: "Midjourney", url: "https://www.midjourney.com/", category: "Images", description: "AI art and image generation platform" },
29 { name: "DALL·E 3", url: "https://openai.com/dall-e-3", category: "Images", description: "OpenAI's advanced image generation model" },
30 { name: "Stable Diffusion", url: "https://stability.ai/", category: "Images", description: "Open-source AI image generation" },
31 { name: "Lexica", url: "https://lexica.art/", category: "Images", description: "AI art search engine and generator" },
73 { name: "Runway", url: "https://runwayml.com/", category: "Video", description: "AI video editing and generation tools" },
74 { name: "Synthesia", url: "https://www.synthesia.io/", category: "Video", description: "AI video generation with virtual avatars" },
75 { name: "Sora", url: "https://openai.com/sora", category: "Video", description: "OpenAI's text-to-video generation model" },
76 { name: "Kling", url: "https://klingai.com/", category: "Video", description: "AI video generation platform" },
77 { name: "Hailuo", url: "https://hailuo.ai/", category: "Video", description: "AI video creation tool" },
318
319 // AI Research & Platforms
320 { name: "OpenAI", url: "https://openai.com/", category: "Research", description: "AI research and deployment company" },
321 { name: "Anthropic", url: "https://www.anthropic.com/", category: "Research", description: "AI safety research company" },
322 { name: "xAI", url: "https://x.ai/", category: "Research", description: "Elon Musk's AI company" },
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
449}
450
451async function callOpenAI(
452 sysPrompt: string,
453 userPrompt: string,
458 lg(
459 "DEBUG",
460 "callOpenAI",
461 `Initiating OpenAI call tid=${tid}`,
462 { spLen: sysPrompt.length, upLen: userPrompt.length },
463 mid,
465 );
466 try {
467 const openai = new OpenAI();
468 const completion = await openai.chat.completions.create({
469 model: "gpt-4o-mini",
470 messages: [{ role: "system", content: sysPrompt }, { role: "user", content: userPrompt }],
477 lg(
478 "WARN",
479 "callOpenAI",
480 `OpenAI call returned no text tid=${tid}.`,
481 { usage: usage, finishReason: completion.choices[0]?.finish_reason },
482 mid,
487 lg(
488 "INFO",
489 "callOpenAI",
490 `OpenAI call OK tid=${tid}`,
491 { resLen: resText.length, usage: usage, finishReason: completion.choices[0]?.finish_reason },
492 mid,
496 } catch (err: any) {
497 const errDtls = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
498 lg("ERROR", "callOpenAI", `OpenAI API call failed tid=${tid}`, { error: errDtls }, mid, tid);
499 throw new Error(`OpenAI API failed: ${err.message}` + (err.code ? ` (Code: ${err.code})` : ""));
500 }
501}
910 `Generate the ${p.cType} for the specified platform based on the context provided in the system prompt.`;
911 try {
912 const genContent = await callOpenAI(sysP, userP, mid, tid, lg);
913 if (!genContent) {
914 lg("WARN", "CntAgent", `LLM returned no content tid=${tid}.`, undefined, mid, tid);
956 const userP = `Develop the marketing strategy based on the system prompt context & framework.`;
957 try {
958 const stratContent = await callOpenAI(sysP, userP, mid, tid, lg);
959 if (!stratContent) {
960 lg("WARN", "StratAgent", `LLM no content for strat tid=${tid}.`, undefined, mid, tid);
1007 `Generate 2-3 distinct logo concepts for "${p.bName}" based on the system prompt. Provide descriptions and AI prompts.`;
1008 try {
1009 const conceptContent = await callOpenAI(sysP, userP, mid, tid, lg);
1010 if (!conceptContent) {
1011 lg("WARN", "LogoAgent", `LLM no content for logo tid=${tid}.`, undefined, mid, tid);
1084 `Develop foundational brand ID guide for "${p.bName}" based on system prompt context/instructions. Ensure cohesive & practical.`;
1085 try {
1086 const idContent = await callOpenAI(sysP, userP, mid, tid, lg);
1087 if (!idContent) {
1088 lg("WARN", "BrandAgent", `LLM no content for brand ID tid=${tid}.`, undefined, mid, tid);
1148 const userP = `Run simulation type '${p.simType}' based on system prompt context/params. Follow format.`;
1149 try {
1150 const simContent = await callOpenAI(sysP, userP, mid, tid, lg);
1151 if (!simContent) {
1152 lg("WARN", "SimAgent", `LLM no content for sim tid=${tid}.`, undefined, mid, tid);
1196 `Based on my request in system context, generate/refine the system prompt per guidelines. Output only resulting prompt text.`;
1197 try {
1198 const genSysP = await callOpenAI(sysP, userP, mid, tid, lg);
1199 if (!genSysP) {
1200 lg("WARN", "MetaAgent", `LLM no content for meta-prompt tid=${tid}.`, undefined, mid, tid);