35 }
36
37 // Prepare the data for OpenAI
38 const reviewData = reviews.map(review => ({
39 reviewer: review.name,
42 }));
43
44 // Format the prompt for OpenAI
45 const prompt = `
46 You are judging a cake competition. Based on the following reviews of two cakes,
57
58 try {
59 // Make the API call to OpenAI
60 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
61
62 const response = await fetch("https://api.openai.com/v1/chat/completions", {
63 method: "POST",
64 headers: {
65 "Content-Type": "application/json",
66 "Authorization": `Bearer ${OPENAI_API_KEY}`,
67 },
68 body: JSON.stringify({
85 if (!response.ok) {
86 const errorData = await response.json();
87 console.error("OpenAI API error:", errorData);
88 throw new Error(`OpenAI API error: ${errorData.error?.message || "Unknown error"}`);
89 }
90
92 return data.choices[0].message.content.trim();
93 } catch (error) {
94 console.error("Error analyzing reviews with OpenAI:", error);
95 }
96}
296
297export default async function server(request: Request): Promise<Response> {
298 const { OpenAI } = await import("https://esm.town/v/std/openai");
299 const openai = new OpenAI();
300
301 if (request.method === "POST") {
302 try {
303 const { message } = await request.json();
304 const completion = await openai.chat.completions.create({
305 messages: [{ role: "user", content: message }],
306 model: "gpt-4o-mini",
296
297export default async function server(request: Request): Promise<Response> {
298 const { OpenAI } = await import("https://esm.town/v/std/openai");
299 const openai = new OpenAI();
300
301 if (request.method === "POST") {
302 try {
303 const { message } = await request.json();
304 const completion = await openai.chat.completions.create({
305 messages: [{ role: "user", content: message }],
306 model: "gpt-4o-mini",
109export default async function server(request: Request): Promise<Response> {
110 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
111 const { OpenAI } = await import("https://esm.town/v/std/openai");
112 const openai = new OpenAI();
113
114 const body = await request.json();
115 const messages = body.messages || [];
116
117 const stream = await openai.chat.completions.create({
118 model: "gpt-4o-mini",
119 messages: messages,
6
7export default async function server(request: Request): Promise<Response> {
8 let OpenAI, sqlite, blob;
9 try {
10 // Dynamic imports with error handling
11 const openAIModule = await import("https://esm.town/v/std/openai");
12 const sqliteModule = await import("https://esm.town/v/stevekrouse/sqlite");
13 const blobModule = await import("https://esm.town/v/std/blob");
14
15 OpenAI = openAIModule.OpenAI;
16 sqlite = sqliteModule.sqlite;
17 blob = blobModule.blob;
33 const { messages } = await request.json();
34 try {
35 if (!OpenAI) {
36 throw new Error("OpenAI module not initialized");
37 }
38
39 const openai = new OpenAI();
40 const chatCompletion = await openai.chat.completions.create({
41 model: "gpt-4o-mini",
42 messages: messages.map(m => ({
70
71 try {
72 if (!OpenAI) {
73 throw new Error("OpenAI module not initialized");
74 }
75
76 const openai = new OpenAI();
77 let mediaUrl;
78
79 switch (mode) {
80 case "image-gen":
81 const imageResponse = await openai.images.generate({
82 model: "dall-e-3",
83 prompt: input,
93 : "Transform a static photo into a dynamic, moving scene with subtle camera movements";
94
95 const photoToVideoResponse = await openai.images.generate({
96 model: "dall-e-3",
97 prompt: videoGenerationPrompt,
108
109 case "text-to-video":
110 const textToVideoResponse = await openai.images.generate({
111 model: "dall-e-3",
112 prompt: `Create a cinematic video scene representing: ${input}. Make it look like a movie trailer.`,
80export default async function server(request: Request): Promise<Response> {
81 if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
82 const { OpenAI } = await import("https://esm.town/v/std/openai");
83 const openai = new OpenAI();
84
85 const body = await request.json();
86 const completion = await openai.chat.completions.create({
87 messages: body.messages,
88 model: "gpt-4o-mini",
170export default async function server(request: Request): Promise<Response> {
171 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
172 const { OpenAI } = await import("https://esm.town/v/std/openai");
173 const openai = new OpenAI();
174
175 const { text } = await request.json();
176
177 const completion = await openai.chat.completions.create({
178 messages: [
179 {
52
53 // Configure LLM
54 await retryOperation(() => catClientRef.current.api.llm.upsertLlmSetting('LLMOpenAIConfig', {
55 openai_api_key: config.openaiApiKey
56 }));
57
149 secure: Deno.env.get("CCAT_CORE_USE_SECURE_PROTOCOLS") === "true",
150 credential: Deno.env.get("CCAT_API_KEY"),
151 openaiApiKey: Deno.env.get("OPENAI_API_KEY"),
152 };
153
110export default async function server(request: Request): Promise<Response> {
111 if (request.method === "POST" && new URL(request.url).pathname === "/analyze") {
112 const { OpenAI } = await import("https://esm.town/v/std/openai");
113 const openai = new OpenAI();
114
115 try {
158 const limitedData = jsonData.slice(0, 50);
159
160 // Use OpenAI for intelligent analysis with more specific instructions
161 const analysisPrompt = `Provide a concise data analysis and forecasting insights:
162 - Total rows: ${jsonData.length}
171 Dataset preview: ${JSON.stringify(limitedData.slice(0, 5))}`;
172
173 const completion = await openai.chat.completions.create({
174 messages: [{ role: "user", content: analysisPrompt }],
175 model: "gpt-4o-mini",
189export default async function server(request: Request) {
190 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
191 const { OpenAI } = await import("https://esm.town/v/std/openai");
192 const KEY = "groundedWhiteFowl";
193 const SCHEMA_VERSION = 7; // Incrementing schema version for tracking
344 }
345
346 const openai = new OpenAI();
347
348 // Ensure messages are in the correct format for OpenAI
349 const formattedMessages = [
350 {
359 ];
360
361 const completion = await openai.chat.completions.create({
362 messages: formattedMessages,
363 model: "gpt-4o-mini",