1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
666}
667
668async function callOpenAI(
669 systemPrompt: string,
670 userPrompt: string,
675 log(
676 "DEBUG",
677 "callOpenAI",
678 `Calling OpenAI for task ${taskId}`,
679 { sysLen: systemPrompt.length, usrLen: userPrompt.length },
680 mandateId,
682 );
683 try {
684 const openai = new OpenAI();
685 const completion = await openai.chat.completions.create({
686 model: "gpt-4o-mini",
687 messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userPrompt }],
692 const usage = completion.usage;
693 if (!resultText) {
694 log("WARN", "callOpenAI", `OpenAI returned no content for task ${taskId}.`, { usage }, mandateId, taskId);
695 return null;
696 }
697 log(
698 "DEBUG",
699 "callOpenAI",
700 `OpenAI call successful for task ${taskId}`,
701 { resLen: resultText.length, usage },
702 mandateId,
706 } catch (error) {
707 const errorDetails = { msg: error.message, name: error.name, status: error.status, type: error.type };
708 log("ERROR", "callOpenAI", `OpenAI API call failed for task ${taskId}`, { error: errorDetails }, mandateId, taskId);
709 throw new Error(`OpenAI API error: ${error.message}`);
710 }
711}
1096 `Please generate the ${payload.contentType} for ${payload.platform} based on the details provided in the system prompt.`;
1097 try {
1098 const generatedContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1099 if (!generatedContent) {
1100 log("WARN", "ContentCreationAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1154 const userPrompt = `Please develop the marketing strategy based on the details provided in the system prompt.`;
1155 try {
1156 const strategyContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1157 if (!strategyContent) {
1158 log("WARN", "MarketingStrategyAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1217 const userPrompt = `Generate logo concepts for "${payload.businessName}" based on the details in the system prompt.`;
1218 try {
1219 const conceptContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1220 if (!conceptContent) {
1221 log("WARN", "LogoConceptAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1283 `Build the brand identity guide for "${payload.businessName}" based on the details in the system prompt.`;
1284 try {
1285 const identityContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1286 if (!identityContent) {
1287 log("WARN", "BrandIdentityAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1333 const userPrompt = `Run the ${payload.simulationType} simulation using the parameters provided in the system prompt.`;
1334 try {
1335 const simContent = await callOpenAI(systemPrompt, userPrompt, mandateId, taskId, log);
1336 if (!simContent) {
1337 log("WARN", "SimulationAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1384 `Generate or refine the system prompt based on my request provided in the system prompt context.`;
1385 try {
1386 const genSysPrompt = await callOpenAI(systemPromptForMeta, userPromptForMeta, mandateId, taskId, log);
1387 if (!genSysPrompt) {
1388 log("WARN", "MetaPromptAgent", `LLM returned no content for task ${taskId}`, undefined, mandateId, taskId);
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
396}
397
398async function callOpenAI(sysPrompt: string, userPrompt: string, mid: string, tid: string, lg: LogFn): Promise<string | null> {
399 lg("DEBUG", "callOpenAI", `Initiating OpenAI call tid=${tid}`, { spLen: sysPrompt.length, upLen: userPrompt.length }, mid, tid);
400 try {
401 if (!process.env.OPENAI_API_KEY) {
402 lg("ERROR", "callOpenAI", "OpenAI API key missing.", undefined, mid, tid);
403 throw new Error("OpenAI API key not configured.");
404 }
405 const openai = new OpenAI();
406 const completion = await openai.chat.completions.create({
407 model: "gpt-4o-mini",
408 messages: [{ role: "system", content: sysPrompt }, { role: "user", content: userPrompt }],
412 const usage = completion.usage;
413 if (!resText) {
414 lg("WARN", "callOpenAI", `OpenAI call returned no text tid=${tid}.`, { usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, tid);
415 return null;
416 }
417 lg("INFO", "callOpenAI", `OpenAI call OK tid=${tid}`, { resLen: resText.length, usage: usage, finishReason: completion.choices[0]?.finish_reason }, mid, tid);
418 return resText.trim();
419 } catch (err: any) {
420 const errDtls = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
421 lg("ERROR", "callOpenAI", `OpenAI API call failed tid=${tid}`, { error: errDtls }, mid, tid);
422 throw new Error(`OpenAI API failed: ${err.message}` + (err.code ? ` (Code: ${err.code})` : ''));
423 }
424}
626 const userP = `Generate the ${p.cType} for the specified platform based on the context provided in the system prompt.`;
627 try {
628 const genContent = await callOpenAI(sysP, userP, mid, tid, lg);
629 if (!genContent) { lg("WARN", "CntAgent", `LLM returned no content tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { content: "" }, e: "AI failed to generate content." }; }
630 const output: CntOut = { content: genContent };
644 const userP = `Develop the marketing strategy based on the system prompt context & framework.`;
645 try {
646 const stratContent = await callOpenAI(sysP, userP, mid, tid, lg);
647 if (!stratContent) { lg("WARN", "StratAgent", `LLM no content for strat tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { strategy: "" }, e: "AI failed strategy." }; }
648 let adIdeas: string[] = [];
664 const userP = `Generate 2-3 distinct logo concepts for "${p.bName}" based on the system prompt. Provide descriptions and AI prompts.`;
665 try {
666 const conceptContent = await callOpenAI(sysP, userP, mid, tid, lg);
667 if (!conceptContent) { lg("WARN", "LogoAgent", `LLM no content for logo tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { concepts: [], prompts: [] }, e: "AI failed logo concepts." }; }
668 const concepts: string[] = []; const prompts: string[] = [];
690 const userP = `Develop foundational brand ID guide for "${p.bName}" based on system prompt context/instructions. Ensure cohesive & practical.`;
691 try {
692 const idContent = await callOpenAI(sysP, userP, mid, tid, lg);
693 if (!idContent) { lg("WARN", "BrandAgent", `LLM no content for brand ID tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { bVoice: "", kMsgs: [], cPalSugs: "", typo: "" }, e: "AI failed brand ID guide." }; }
694 const voiceM = idContent.match(/## Brand Voice & Personality\n([\s\S]*?)(?=\n##|$)/i);
718 const userP = `Run simulation type '${p.simType}' based on system prompt context/params. Follow format.`;
719 try {
720 const simContent = await callOpenAI(sysP, userP, mid, tid, lg);
721 if (!simContent) { lg("WARN", "SimAgent", `LLM no content for sim tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { simResult: "" }, e: "AI failed sim." }; }
722 let simResult = simContent; let analysis: string | undefined = undefined;
741 const userP = `Based on my request in system context, generate/refine the system prompt per guidelines. Output only resulting prompt text.`;
742 try {
743 const genSysP = await callOpenAI(sysP, userP, mid, tid, lg);
744 if (!genSysP) { lg("WARN", "MetaAgent", `LLM no content for meta-prompt tid=${tid}.`, undefined, mid, tid); return { mid, cid: tid, p: { sysPrompt: "" }, e: "AI failed to generate system prompt." }; }
745 const finalP = genSysP.replace(/^```(markdown|text)?\s*/i, '').replace(/\s*```$/, '').trim();
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai"; // Assuming OpenAI library is available
3import { z } from "npm:zod"; // Using Zod for input validation
4
340// LLM Interaction Function (Simplified)
341const llm = async (systemPrompt: string, userPrompt: string, reqId: string, log: LogFn): Promise<string> => {
342 log("DEBUG", "LLM", `OpenAI Call`, { sysL: systemPrompt.length, usrL: userPrompt.length }, reqId);
343 try {
344 // IMPORTANT: Replace with your actual OpenAI API key mechanism (e.g., environment variable)
345 const oa = new OpenAI(); // Assumes OPENAI_API_KEY env var is set
346 const completion = await oa.chat.completions.create({
347 // Consider using a more powerful model for prompt generation, maybe cheaper for testing
354 const usage = completion.usage;
355 if (!txt) {
356 log("WARN", "LLM", `OpenAI returned no content.`, { usage }, reqId);
357 throw new Error("LLM returned no content.");
358 }
359 log("DEBUG", "LLM", `OpenAI Success`, { resL: txt.length, usage }, reqId);
360 return txt.trim();
361 } catch (err: any) {
362 const errD = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
363 log("ERROR", "LLM", `OpenAI API call failed`, { error: errD }, reqId);
364 throw new Error(`LLM API error: ${err.message}${err.code ? ` (Code: ${err.code})` : ''}`);
365 }
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import { z } from "npm:zod";
4
114
115const llm = async (sysPrompt: string, userPrompt: string, mid: string, tid: string, log: LogFn): Promise<string | null> => {
116 log("DEBUG", "LLM", `Call OpenAI task ${tid}`, { sysL: sysPrompt.length, usrL: userPrompt.length }, mid, tid);
117 const finalSys = sysPrompt + `\n\nREMINDER: Info only, not medical advice. Verify & use clinical judgment.`;
118 try {
119 const oa = new OpenAI(); // Assumes API key is set in environment
120 const completion = await oa.chat.completions.create({
121 model: "gpt-4o-mini", // Use appropriate model
125 const txt = completion.choices[0]?.message?.content;
126 const usage = completion.usage;
127 if (!txt) { log("WARN", "LLM", `OpenAI task ${tid} no content.`, { usage }, mid, tid); return null; }
128 log("DEBUG", "LLM", `OpenAI task ${tid} success`, { resL: txt.length, usage }, mid, tid);
129 return txt.trim();
130 } catch (err: any) {
131 const errD = { msg: err.message, name: err.name, status: err.status, type: err.type, code: err.code };
132 log("ERROR", "LLM", `OpenAI API task ${tid} failed`, { error: errD }, mid, tid);
133 throw new Error(`OpenAI API error: ${err.message}${err.code ? ` (Code: ${err.code})` : ''}`);
134 }
135}