Townie-09system_prompt.txt4 matches
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
8990### OpenAI
9192```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96messages: [
97{ role: "user", content: "Say hello in a creative way" },
Townie-09.cursorrules4 matches
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9596### OpenAI
9798```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102messages: [
103{ role: "user", content: "Say hello in a creative way" },
autonomous-valREADME.md1 match
9Configure the following variables in your environment:
10- `AGENT_API_KEY` (This is a secure token that you choose to secure the agent.tsx POST endpoint)
11- `OPENAI_API_KEY` (An OpenAI API Key)
12- `EXA_API_KEY` (Optional, though needed if you use the web search tool)
13
autonomous-valagent.tsx2 matches
1import { anthropic } from "npm:@ai-sdk/anthropic";
2import { openai } from "npm:@ai-sdk/openai";
3import { generateText, streamText } from "npm:ai";
4import { getSystemPrompt } from "./prompt.tsx";
34const maxSteps = 10;
3536const model = Deno.env.get("ANTHROPIC_API_KEY") ? anthropic("claude-3-7-sonnet-latest") : openai("gpt-4.1");
3738const options = {
markdown-embed.cursorrules4 matches
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9596### OpenAI
9798```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102messages: [
103{ role: "user", content: "Say hello in a creative way" },
stevensDemo.cursorrules4 matches
100Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
101102### OpenAI
103```ts
104import { OpenAI } from "https://esm.town/v/std/openai";
105const openai = new OpenAI();
106const completion = await openai.chat.completions.create({
107messages: [
108{ role: "user", content: "Say hello in a creative way" },
thirdTimerval-town.mdc4 matches
93Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9495### OpenAI
9697```ts
98import { OpenAI } from "https://esm.town/v/std/openai";
99const openai = new OpenAI();
100const completion = await openai.chat.completions.create({
101messages: [
102{ role: "user", content: "Say hello in a creative way" },
953// --- Main Request Handler (Server Code - MODIFIED) ---
954export default async function(req: Request) {
955const { OpenAI } = await import("https://esm.town/v/std/openai");
956const { fetch } = await import("https://esm.town/v/std/fetch");
957// PDFExtract is kept if you want to add document features later, but not primary for this use case.
965const action = url.searchParams.get("action"); // New: "loanAssumptionInfo"
966const sourceUrl = import.meta.url.replace("esm.town", "val.town");
967const openai = new OpenAI();
968const MAX_TEXT_LENGTH_ANALYSIS = 10000; // Reduced as input is smaller now
969992}
993994// callOpenAI function (same as original, but uses gpt-4o by default)
995async function callOpenAI(
996openaiInstance: OpenAI,
997systemPrompt: string,
998userMessage: string, // For this app, userMessage to AI might be empty if all info is in systemPrompt
1003): Promise<object | string> {
1004// ... (implementation from original)
1005log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
1006try {
1007const response = await openaiInstance.chat.completions.create({
1008model: model,
1009messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
1124.replace("%%USER_NAME%%", analysisInput.userName);
1125// The %%INPUT_SOURCE_DESCRIPTION%% and %%LEGAL_TASK_QUERY%% are not in the new prompt in this direct way.
1126// The userMessage to openAI can be kept minimal or empty as the system prompt is rich.
1127const userMessageForAI = ""; // Or analysisInput.documentText if you want to provide more context there.
11281129const analysisAgentName = "HomeAdvantage AI";
1130const aiResponse = await callOpenAI(
1131openai,
1132finalSystemPrompt,
1133userMessageForAI,
880}
881export default async function(req: Request) {
882const { OpenAI } = await import("https://esm.town/v/std/openai");
883const { fetch } = await import("https://esm.town/v/std/fetch");
884const { PDFExtract, PDFExtractOptions } = await import("npm:pdf.js-extract");
893const action = url.searchParams.get("action");
894const sourceUrl = import.meta.url.replace("esm.town", "val.town");
895const openai = new OpenAI();
896const MAX_TEXT_LENGTH_SUGGESTION = 20000;
897const MAX_TEXT_LENGTH_ANALYSIS = 30000;
917}
918}
919async function callOpenAI(
920openaiInstance: OpenAI,
921systemPrompt: string,
922userMessage: string,
926agentName: string,
927): Promise<object | string> {
928log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
929try {
930const response = await openaiInstance.chat.completions.create({
931model: model,
932messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
1092);
1093const suggestionAgentName = "Task Suggestion AI";
1094const suggestionsResponse = await callOpenAI(
1095openai,
1096suggestionPrompt,
1097"",
1212);
1213const analysisAgentName = "Legal Analysis AI";
1214const aiResponse = await callOpenAI(
1215openai,
1216finalSystemPrompt,
1217documentTextToAnalyze,
2* Legal AI Document Analysis (Single Val Version with PDF Upload & Dashboard Style)
3* Ingests documents (URL, Text, PDF Upload), takes a user-defined legal task query,
4* and uses a Legal AI Agent (via OpenAI) to analyze the content.
5* The Legal AI Agent outputs a standardized JSON structure.
6* Uses 'npm:pdf.js-extract' for direct PDF text extraction within the Val.
566export default async function(req: Request) {
567// --- Dynamic Imports (Inside Handler) ---
568const { OpenAI } = await import("https://esm.town/v/std/openai");
569const { z } = await import("npm:zod"); // For potential future robust input validation on server
570const { fetch } = await import("https://esm.town/v/std/fetch");
608}
609610// --- Helper Function: Call OpenAI API ---
611async function callOpenAI(
612openai: OpenAI,
613systemPrompt: string,
614userMessage: string, // This will be the document text
617): Promise<object | string> { // Return type can be object if JSON, or string if error
618try {
619const response = await openai.chat.completions.create({
620model: model,
621messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
630return JSON.parse(content);
631} catch (parseError) {
632console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
633// Return the raw content if parsing fails, for debugging, wrapped as an error object
634return {
642}
643} catch (error) {
644console.error(`OpenAI API call failed. Error:`, error);
645let errorMessage = "Error communicating with AI model.";
646if (error.message) { errorMessage += ` Details: ${error.message}`; }
661log: LogEntry[],
662): Promise<{ finalResult?: LegalAIResponse | object; log: LogEntry[] }> {
663const openai = new OpenAI(); // Assumes OPENAI_API_KEY is in environment
664665log.push({ agent: "System", type: "step", message: "Starting Legal AI analysis workflow." });
756finalSystemPrompt = finalSystemPrompt.replace("%%INPUT_SOURCE_DESCRIPTION%%", input.inputSourceDescription);
757758const aiResponse = await callOpenAI(openai, finalSystemPrompt, truncatedText, "gpt-4o", true); // Expect JSON
759760if (typeof aiResponse === "object" && (aiResponse as any).error) {
761// Error from callOpenAI helper (API or parsing)
762log.push({
763agent: "Legal AI Agent",
784log.push({ agent: "System", type: "final", message: "Workflow finished successfully." });
785return { finalResult: aiResponse as LegalAIResponse, log };
786} else { // Should not happen if expectJson is true and no error object from callOpenAI
787log.push({
788agent: "Legal AI Agent",