1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import OpenAI from "openai";
3import { ITERATIONS_TABLE, KEY, PROJECTS_TABLE, SCHEMA_VERSION } from "./migrations";
4
5const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });
6
7export async function createProject(prompt: string) {
59}
60
61// Example of using OpenAI (you'll need to adapt this to your specific use case)
62export async function generateText(prompt: string) {
63 try {
64 const completion = await openai.chat.completions.create({
65 messages: [{ role: "user", content: prompt }],
66 model: "gpt-4-turbo-preview",
70 return completion.choices[0].message?.content || "No response";
71 } catch (error) {
72 console.error("OpenAI Error:", error);
73 return "Error generating text";
74 }
1import OpenAI from "openai";
2import STARTER_PROMPTS from "../public/starter-prompts.js";
3
16 };
17 } else {
18 const openai = new OpenAI({
19 apiKey: Deno.env.get(
20 "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",
21 ),
22 });
23 const completion = await openai.chat.completions.create({
24 messages: [
25 {
46 },
47 ].filter(Boolean),
48 model: "gpt-4-turbo-preview", // Or another suitable OpenAI model
49 max_tokens: 2000, // Adjust as needed
50 });
52 return {
53 code: extractCodeFromFence(completion.choices[0].message.content),
54 time: 0, // OpenAI doesn't directly provide completion time
55 totalTokens: completion.usage?.total_tokens || 1,
56 };
191export default async function server(request: Request): Promise<Response> {
192 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
193 const { OpenAI } = await import("https://esm.town/v/std/openai");
194 const openai = new OpenAI();
195
196 const { messages, rawInput } = await request.json();
219 ];
220
221 const stream = await openai.chat.completions.create({
222 model: "gpt-4o-mini",
223 messages: constrainedMessages,
79 setIsLoading(true);
80 try {
81 const { OpenAI } = await import("https://esm.town/v/std/openai");
82 const openai = new OpenAI();
83
84 const analysisPrompt = `Comprehensive Medical Report Analysis:
100Respond with a structured, compassionate, and informative analysis that prioritizes patient understanding and next steps.`;
101
102 const analysis = await openai.chat.completions.create({
103 model: "gpt-4o-mini",
104 messages: [
340export default async function server(request: Request): Promise<Response> {
341 if (request.method === 'POST' && new URL(request.url).pathname === '/chat') {
342 const { OpenAI } = await import("https://esm.town/v/std/openai");
343 const openai = new OpenAI();
344
345 const { messages } = await request.json();
346
347 const stream = await openai.chat.completions.create({
348 model: "gpt-4o-mini",
349 messages: messages,
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
8 try {
9 const { text: compliments } = await generateText({
10 model: openai("gpt-4o-mini"),
11 system: "You are a creative compliment generator.",
12 prompt: "Generate 5 unique, thoughtful, and specific compliments about a person named Traves.",
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
33
34 const { text: generatedCompliment } = await generateText({
35 model: openai("gpt-4o-mini"),
36 system: "You are a creative compliment generator.",
37 prompt: `Generate a compliment for: ${compliment}`,
5
6// Store the API key as a constant or use environment variable
7const OPENAI_API_KEY =
8 "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";
9
116 : "You are an expert guide helping users accelerate their learning and mastery of skills. Provide concise, actionable insights that help people learn faster and more effectively.");
117
118 const response = await fetch("https://api.openai.com/v1/chat/completions", {
119 method: "POST",
120 headers: {
121 "Authorization": `Bearer ${OPENAI_API_KEY}`,
122 "Content-Type": "application/json",
123 },
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
3
12 }
13 const { text: processedOutput } = await generateText({
14 model: openai("gpt-4o-mini"),
15 system: "You are a helpful assistant.",
16 prompt: "Process the following input based on the requirements: " + body.input,
770
771export default async function server(request: Request): Promise<Response> {
772 const { OpenAI } = await import("https://esm.town/v/std/openai");
773 const openai = new OpenAI();
774
775 // Enhanced server-side logging
870 `;
871
872 const completion = await openai.chat.completions.create({
873 model: "gpt-4o-mini",
874 response_format: { type: "json_object" },
949 `;
950
951 const completion = await openai.chat.completions.create({
952 model: "gpt-4o-mini",
953 response_format: { type: "json_object" },
1021 `;
1022
1023 const completion = await openai.chat.completions.create({
1024 model: "gpt-4o-mini",
1025 messages: [{ role: "user", content: prompt }],