markdown-embed.cursorrules4 matches
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9596### OpenAI
9798```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102messages: [
103{ role: "user", content: "Say hello in a creative way" },
openai-clientmain.tsx9 matches
1import { OpenAI } from "https://esm.sh/openai@4.85.1";
2import { sqlite } from "https://esm.town/v/std/sqlite";
37};
89interface ChatOpenAI {
10invoke(messages: Message[]): Promise<string>;
11}
1213export function ChatOpenAI(model: string): ChatOpenAI {
14const openai = new OpenAI();
1516return {
17invoke: async (messages: Message[]): Promise<string> => {
18const completion = await openai.chat.completions.create({
19messages: messages.map(message => ({
20role: message.role as "user" | "assistant" | "system",
29}
3031// Decorator for ChatOpenAI that will eventually add rate limiting
32export function GlobalRateLimitedChatOpenAI(model: string, requestsPerSecond: number): ChatOpenAI {
33const openAi = ChatOpenAI(model);
3435const rateLimiter = new GlobalRateLimiter(requestsPerSecond);
39await rateLimiter.check();
4041return openAi.invoke(messages);
42},
43};
stevensDemo.cursorrules4 matches
100Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
101102### OpenAI
103```ts
104import { OpenAI } from "https://esm.town/v/std/openai";
105const openai = new OpenAI();
106const completion = await openai.chat.completions.create({
107messages: [
108{ role: "user", content: "Say hello in a creative way" },
thirdTimerval-town.mdc4 matches
93Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9495### OpenAI
9697```ts
98import { OpenAI } from "https://esm.town/v/std/openai";
99const openai = new OpenAI();
100const completion = await openai.chat.completions.create({
101messages: [
102{ role: "user", content: "Say hello in a creative way" },
953// --- Main Request Handler (Server Code - MODIFIED) ---
954export default async function(req: Request) {
955const { OpenAI } = await import("https://esm.town/v/std/openai");
956const { fetch } = await import("https://esm.town/v/std/fetch");
957// PDFExtract is kept if you want to add document features later, but not primary for this use case.
965const action = url.searchParams.get("action"); // New: "loanAssumptionInfo"
966const sourceUrl = import.meta.url.replace("esm.town", "val.town");
967const openai = new OpenAI();
968const MAX_TEXT_LENGTH_ANALYSIS = 10000; // Reduced as input is smaller now
969992}
993994// callOpenAI function (same as original, but uses gpt-4o by default)
995async function callOpenAI(
996openaiInstance: OpenAI,
997systemPrompt: string,
998userMessage: string, // For this app, userMessage to AI might be empty if all info is in systemPrompt
1003): Promise<object | string> {
1004// ... (implementation from original)
1005log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
1006try {
1007const response = await openaiInstance.chat.completions.create({
1008model: model,
1009messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
1124.replace("%%USER_NAME%%", analysisInput.userName);
1125// The %%INPUT_SOURCE_DESCRIPTION%% and %%LEGAL_TASK_QUERY%% are not in the new prompt in this direct way.
1126// The userMessage to openAI can be kept minimal or empty as the system prompt is rich.
1127const userMessageForAI = ""; // Or analysisInput.documentText if you want to provide more context there.
11281129const analysisAgentName = "HomeAdvantage AI";
1130const aiResponse = await callOpenAI(
1131openai,
1132finalSystemPrompt,
1133userMessageForAI,
880}
881export default async function(req: Request) {
882const { OpenAI } = await import("https://esm.town/v/std/openai");
883const { fetch } = await import("https://esm.town/v/std/fetch");
884const { PDFExtract, PDFExtractOptions } = await import("npm:pdf.js-extract");
893const action = url.searchParams.get("action");
894const sourceUrl = import.meta.url.replace("esm.town", "val.town");
895const openai = new OpenAI();
896const MAX_TEXT_LENGTH_SUGGESTION = 20000;
897const MAX_TEXT_LENGTH_ANALYSIS = 30000;
917}
918}
919async function callOpenAI(
920openaiInstance: OpenAI,
921systemPrompt: string,
922userMessage: string,
926agentName: string,
927): Promise<object | string> {
928log.push({ agent: agentName, type: "step", message: `Calling OpenAI model ${model}...` });
929try {
930const response = await openaiInstance.chat.completions.create({
931model: model,
932messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
1092);
1093const suggestionAgentName = "Task Suggestion AI";
1094const suggestionsResponse = await callOpenAI(
1095openai,
1096suggestionPrompt,
1097"",
1212);
1213const analysisAgentName = "Legal Analysis AI";
1214const aiResponse = await callOpenAI(
1215openai,
1216finalSystemPrompt,
1217documentTextToAnalyze,
2* Legal AI Document Analysis (Single Val Version with PDF Upload & Dashboard Style)
3* Ingests documents (URL, Text, PDF Upload), takes a user-defined legal task query,
4* and uses a Legal AI Agent (via OpenAI) to analyze the content.
5* The Legal AI Agent outputs a standardized JSON structure.
6* Uses 'npm:pdf.js-extract' for direct PDF text extraction within the Val.
566export default async function(req: Request) {
567// --- Dynamic Imports (Inside Handler) ---
568const { OpenAI } = await import("https://esm.town/v/std/openai");
569const { z } = await import("npm:zod"); // For potential future robust input validation on server
570const { fetch } = await import("https://esm.town/v/std/fetch");
608}
609610// --- Helper Function: Call OpenAI API ---
611async function callOpenAI(
612openai: OpenAI,
613systemPrompt: string,
614userMessage: string, // This will be the document text
617): Promise<object | string> { // Return type can be object if JSON, or string if error
618try {
619const response = await openai.chat.completions.create({
620model: model,
621messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
630return JSON.parse(content);
631} catch (parseError) {
632console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
633// Return the raw content if parsing fails, for debugging, wrapped as an error object
634return {
642}
643} catch (error) {
644console.error(`OpenAI API call failed. Error:`, error);
645let errorMessage = "Error communicating with AI model.";
646if (error.message) { errorMessage += ` Details: ${error.message}`; }
661log: LogEntry[],
662): Promise<{ finalResult?: LegalAIResponse | object; log: LogEntry[] }> {
663const openai = new OpenAI(); // Assumes OPENAI_API_KEY is in environment
664665log.push({ agent: "System", type: "step", message: "Starting Legal AI analysis workflow." });
756finalSystemPrompt = finalSystemPrompt.replace("%%INPUT_SOURCE_DESCRIPTION%%", input.inputSourceDescription);
757758const aiResponse = await callOpenAI(openai, finalSystemPrompt, truncatedText, "gpt-4o", true); // Expect JSON
759760if (typeof aiResponse === "object" && (aiResponse as any).error) {
761// Error from callOpenAI helper (API or parsing)
762log.push({
763agent: "Legal AI Agent",
784log.push({ agent: "System", type: "final", message: "Workflow finished successfully." });
785return { finalResult: aiResponse as LegalAIResponse, log };
786} else { // Should not happen if expectJson is true and no error object from callOpenAI
787log.push({
788agent: "Legal AI Agent",
Towniesystem_prompt.txt4 matches
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
8990### OpenAI
9192```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96messages: [
97{ role: "user", content: "Say hello in a creative way" },
Townie.cursorrules4 matches
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9596### OpenAI
9798```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102messages: [
103{ role: "user", content: "Say hello in a creative way" },
2* Legal AI Document Analysis (Single Val Version with PDF Upload & Dashboard Style)
3* Ingests documents (URL, Text, PDF Upload), takes a user-defined legal task query,
4* and uses a Legal AI Agent (via OpenAI) to analyze the content.
5* The Legal AI Agent outputs a standardized JSON structure.
6* Uses 'npm:pdf.js-extract' for direct PDF text extraction within the Val.
813export default async function(req: Request) {
814// --- Dynamic Imports (Inside Handler) ---
815const { OpenAI } = await import("https://esm.town/v/std/openai");
816const { z } = await import("npm:zod"); // For potential future robust input validation on server
817const { fetch } = await import("https://esm.town/v/std/fetch");
855}
856857// --- Helper Function: Call OpenAI API ---
858async function callOpenAI(
859openai: OpenAI,
860systemPrompt: string,
861userMessage: string, // This will be the document text
864): Promise<object | string> { // Return type can be object if JSON, or string if error
865try {
866const response = await openai.chat.completions.create({
867model: model,
868messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
877return JSON.parse(content);
878} catch (parseError) {
879console.error("OpenAI JSON Parse Error:", parseError, "Raw Content:", content);
880// Return the raw content if parsing fails, for debugging, wrapped as an error object
881return {
889}
890} catch (error) {
891console.error(`OpenAI API call failed. Error:`, error);
892let errorMessage = "Error communicating with AI model.";
893if (error.message) { errorMessage += ` Details: ${error.message}`; }
908log: LogEntry[],
909): Promise<{ finalResult?: LegalAIResponse | object; log: LogEntry[] }> {
910const openai = new OpenAI(); // Assumes OPENAI_API_KEY is in environment
911912log.push({ agent: "System", type: "step", message: "Starting Legal AI analysis workflow." });
1003finalSystemPrompt = finalSystemPrompt.replace("%%INPUT_SOURCE_DESCRIPTION%%", input.inputSourceDescription);
10041005const aiResponse = await callOpenAI(openai, finalSystemPrompt, truncatedText, "gpt-4o", true); // Expect JSON
10061007if (typeof aiResponse === "object" && (aiResponse as any).error) {
1008// Error from callOpenAI helper (API or parsing)
1009log.push({
1010agent: "Legal AI Agent",
1031log.push({ agent: "System", type: "final", message: "Workflow finished successfully." });
1032return { finalResult: aiResponse as LegalAIResponse, log };
1033} else { // Should not happen if expectJson is true and no error object from callOpenAI
1034log.push({
1035agent: "Legal AI Agent",