1// agent_ecosystem.ts
2import { XMLParser } from "https://esm.sh/fast-xml-parser@4.2.5"; // For parsing RSS/Atom feeds
3// import { combinerAgent, fetchAgent, summarizerAgent } from "https://esm.town/v/salon/mandate/agents.ts";
4import { AgentContext, AgentInput, AgentOutput } from "https://esm.town/v/salon/mandate/interfaces.ts";
5import { fetch } from "https://esm.town/v/std/fetch";
6import { OpenAI } from "https://esm.town/v/std/openai";
7// If Deno.env is used, ensure it's for Val Town secrets if possible or clearly noted.
56}
57
58// Your provided fetchAgent (Note: "Workspaceing" seems like a typo, changed to "Fetching")
59export async function fetchAgent(
60 input: AgentInput<{ url_from_input?: string; maxHeadlines?: number }>,
61 context: AgentContext,
76
77 const DEFAULT_RSS_URL = "https://feeds.npr.org/1001/rss.xml";
78 const urlToFetch = payload?.url_from_input ?? config?.rssFeedUrl ?? DEFAULT_RSS_URL;
79 const maxHeadlines = Number(payload?.maxHeadlines ?? config?.maxHeadlines ?? 5);
80
81 log("INFO", "FetchAgent", `Workspaceing headlines from ${urlToFetch}, max: ${maxHeadlines}`);
82
83 try {
84 const resp = await fetch(urlToFetch, { headers: { "User-Agent": "NewsAnalysisBot/1.0 (ValTown)" } }); // Added User-Agent
85 const fetchedContentType = resp.headers.get("content-type")?.toLowerCase() || "";
86 const rawResponseText = await resp.text();
87
89 let errorBody = rawResponseText;
90 if (errorBody.length > 500) errorBody = errorBody.substring(0, 500) + "...";
91 throw new Error(`Workspace failed: ${resp.status} ${resp.statusText}. URL: ${urlToFetch}. Body: ${errorBody}`);
92 }
93
98
99 if (
100 fetchedContentType.includes("xml") || fetchedContentType.includes("rss") || urlToFetch.endsWith(".xml")
101 || urlToFetch.endsWith(".rss") || urlToFetch === DEFAULT_RSS_URL
102 ) {
103 try {
153 }
154 if (parsedSuccessfully)
155 log("INFO", "FetchAgent", `Successfully parsed ${headlines.length} headlines from "${feedTitle}".`);
156 else {
157 parsingMessage = "RSS/Atom structure not as expected or no items found.";
158 log("WARN", "FetchAgent", parsingMessage);
159 }
160 } catch (parseError: any) {
161 parsingMessage = `Failed to parse XML/RSS content. Error: ${parseError.message}`;
162 log("WARN", "FetchAgent", `${parsingMessage} from URL: ${urlToFetch}`);
163 }
164 } else {
165 parsingMessage =
166 `Content type "${fetchedContentType}" is not XML/RSS. Not attempting RSS parse. Raw text will be available.`;
167 log("INFO", "FetchAgent", parsingMessage);
168 }
169 const outputData = {
170 headlines,
171 feedTitle,
172 sourceUrl: urlToFetch,
173 ...(parsingMessage && !parsedSuccessfully ? { message: parsingMessage } : {}),
174 };
176 mandateId,
177 correlationId: taskId,
178 payload: { data: outputData, rawText: rawResponseText, contentType: fetchedContentType },
179 };
180 } catch (e: any) {
181 log("ERROR", "FetchAgent", `Workspace or processing failed: ${e.message}`, e);
182 return {
183 mandateId,
187 headlines: [],
188 feedTitle: "Error",
189 sourceUrl: urlToFetch,
190 message: `Workspace or processing failed: ${e.message}`,
191 },
219 try {
220 const summaryText = payload.summary ?? "N/A";
221 let externalDataDescription = "External Data: Not Fetched/Available or not in expected format.";
222 // ... (rest of your description logic)
223 if (
292}
293
294interface FetchedArticle {
295 id: string;
296 title: string;
300 sourceFeedName: string;
301 sourceFeedUrl: string;
302 rawContent?: string; // Full article text, if fetched
303 cleanedSummary?: string; // HTML-stripped summary for processing
304 isRelevant?: boolean;
314 config?: AnalysisConfig;
315 discoveredFeeds?: { name: string; url: string }[]; // Feeds to be used
316 fetchedArticles?: FetchedArticle[]; // Raw from fetch (multiple feeds)
317 processedArticles?: FetchedArticle[]; // Cleaned articles
318 relevantArticles?: FetchedArticle[]; // Filtered by relevance
319 articlesWithSentiment?: FetchedArticle[];
320 articlesWithThemes?: FetchedArticle[];
321 trendReport?: any;
322 anomalyReport?: any;
406}
407
408// 3. ParallelFetchAgent (This agent will call the user-provided fetchAgent for each feed)
409export async function parallelFetchAgent(
410 input: AgentInput<{ feedsToFetch: { name: string; url: string }[]; maxHeadlinesPerFeed: number }>,
411 context: AgentContext, // This context will be passed to the individual fetchAgent calls
412): Promise<AgentOutput<{ fetchedArticles: FetchedArticle[] }>> {
413 const { log } = context; // Main orchestrator log
414 const { feedsToFetch, maxHeadlinesPerFeed } = input.payload;
415 let allArticles: FetchedArticle[] = [];
416
417 log("INFO", "ParallelFetchAgent", `Starting parallel fetch from ${feedsToFetch.length} feeds.`);
418
419 for (const feed of feedsToFetch) {
420 log("INFO", "ParallelFetchAgent", `Dispatching fetch for ${feed.name} (${feed.url})`);
421 // Call the user-provided fetchAgent
422 const singleFetchOutput = await fetchAgent(
423 {
424 mandateId: input.mandateId,
433 );
434
435 if (singleFetchOutput.payload?.data?.headlines) {
436 const transformedArticles = singleFetchOutput.payload.data.headlines.map(h => ({
437 id: h.link || `${feed.url}-${h.title}-${Date.now()}`, // Ensure an ID
438 title: h.title,
444 }));
445 allArticles.push(...transformedArticles);
446 log("INFO", "ParallelFetchAgent", `Workspaceed ${transformedArticles.length} articles from ${feed.name}.`);
447 } else {
448 log(
449 "WARN",
450 "ParallelFetchAgent",
451 `No headlines fetched or error from ${feed.name}. Error: ${
452 singleFetchOutput.error || singleFetchOutput.payload?.data?.message
453 }`,
454 );
455 }
456 }
457 log("SUCCESS", "ParallelFetchAgent", `Total articles fetched from all feeds: ${allArticles.length}.`);
458 return { mandateId: input.mandateId, correlationId: input.taskId, payload: { fetchedArticles: allArticles } };
459}
460
461// 4. ArticleCleaningAgent
462export async function articleCleaningAgent(
463 input: AgentInput<{ articles: FetchedArticle[] }>,
464 context: AgentContext,
465): Promise<AgentOutput<{ processedArticles: FetchedArticle[] }>> {
466 const { log } = context;
467 log("INFO", "ArticleCleaningAgent", `Cleaning ${input.payload.articles.length} articles.`);
478// 5. RelevanceAssessmentAgent
479export async function relevanceAssessmentAgent(
480 input: AgentInput<{ articles: FetchedArticle[]; topic: string; keywords: string[] }>,
481 context: AgentContext,
482): Promise<AgentOutput<{ relevantArticles: FetchedArticle[] }>> {
483 const { log } = context;
484 const { articles, topic, keywords } = input.payload;
485 const openai = new OpenAI();
486 let relevantArticles: FetchedArticle[] = [];
487 log("INFO", "RelevanceAssessmentAgent", `Assessing relevance for ${articles.length} articles on topic: ${topic}`);
488
536// 6. ContentExtractionAgent (Simplified: mainly uses cleaned summary, conceptual for full text)
537export async function contentExtractionAgent(
538 input: AgentInput<{ articles: FetchedArticle[]; analysisDepth: "cursory" | "standard" | "deep" }>,
539 context: AgentContext,
540): Promise<AgentOutput<{ articlesWithContent: FetchedArticle[] }>> {
541 const { log } = context;
542 const { articles, analysisDepth } = input.payload;
543 log("INFO", "ContentExtractionAgent", `Content extraction. Depth: ${analysisDepth}. Articles: ${articles.length}`);
544 // Simulating full content: For "deep" analysis, one might try to fetch article.link.
545 // Here, we'll just ensure `rawContent` is populated from `cleanedSummary` for LLM processing consistency.
546 const updatedArticles = articles.map(article => ({
555// 7. SentimentAnalysisAgent
556export async function sentimentAnalysisAgent(
557 input: AgentInput<{ articles: FetchedArticle[] }>,
558 context: AgentContext,
559): Promise<AgentOutput<{ articlesWithSentiment: FetchedArticle[] }>> {
560 const { log } = context;
561 const openai = new OpenAI();
562 let articlesWithSentiment: FetchedArticle[] = [];
563 if (!input.payload.articles)
564 return { mandateId: input.mandateId, correlationId: input.taskId, payload: { articlesWithSentiment } };
569 const prompt =
570 `Analyze the sentiment of the following text. Respond with a JSON object: {"sentiment": "positive" | "negative" | "neutral" | "mixed", "score": float_between_-1_and_1 (optional, e.g. 0.7 for positive)}.\n\nText:\n${textToAnalyze}`;
571 let sentimentResult: FetchedArticle["sentiment"] = "neutral";
572 let sentimentScore: FetchedArticle["sentimentScore"] = 0.0;
573 try {
574 const completion = await openai.chat.completions.create({
603// 8. KeyThemeExtractionAgent
604export async function keyThemeExtractionAgent(
605 input: AgentInput<{ articles: FetchedArticle[]; topic: string }>,
606 context: AgentContext,
607): Promise<AgentOutput<{ articlesWithThemes: FetchedArticle[] }>> {
608 const { log } = context;
609 const openai = new OpenAI();
610 let articlesWithThemes: FetchedArticle[] = [];
611 log(
612 "INFO",
619 const prompt =
620 `For the text (topic: "${input.payload.topic}"), identify 2-4 key themes and prominent named entities (people, orgs). JSON: {"themes": ["theme1", ...], "entities": [{"text": "EntityName", "type": "PERSON"|"ORG"|"LOC"|"MISC"}]}.\nText:\n${textToAnalyze}`;
621 let themeResult: { themes: string[]; entities: FetchedArticle["entities"] } = { themes: [], entities: [] };
622 try {
623 const completion = await openai.chat.completions.create({
648// 9. TrendAndAnomalyDetectionAgent
649export async function trendAndAnomalyDetectionAgent(
650 input: AgentInput<{ articlesWithThemes: FetchedArticle[]; topic: string; historicalContextSummary?: string }>, // historicalContext is optional
651 context: AgentContext,
652): Promise<AgentOutput<{ trendReport: any; anomalyReport: any }>> {
980 if (sourcesOut.error) throw new Error(`SourceValidationAgent Error: ${sourcesOut.error}`);
981
982 const pFetchOut = await parallelFetchAgent({
983 mandateId,
984 taskId: "pfetch-3",
985 payload: {
986 feedsToFetch: workflowData.discoveredFeeds!,
987 maxHeadlinesPerFeed: workflowData.config!.maxHeadlinesPerFeed,
988 },
989 }, context);
990 workflowData.fetchedArticles = pFetchOut.payload.fetchedArticles;
991 if (pFetchOut.error) throw new Error(`ParallelFetchAgent Error: ${pFetchOut.error}`);
992
993 const cleanOut = await articleCleaningAgent({
994 mandateId,
995 taskId: "clean-4",
996 payload: { articles: workflowData.fetchedArticles! },
997 }, context);
998 workflowData.processedArticles = cleanOut.payload.processedArticles;