1import { OpenAI } from "https://esm.town/v/std/openai";
2
3interface SchemaResponse {
39 }
40
41 const openai = new OpenAI();
42 let completion;
43 try {
44 completion = await openai.chat.completions.create({
45 messages: [
46 { role: "system", content: "You are a backend schema generator. Generate a backend schema based on the given project description." },
51 });
52 } catch (error) {
53 console.error("Error calling OpenAI API:", error);
54 return new Response("Error generating schema. Please try again later.", { status: 500, headers });
55 }
59 schemaResponse = JSON.parse(completion.choices[0].message.content);
60 } catch (error) {
61 console.error("Error parsing OpenAI response:", error);
62 return new Response("Error processing the generated schema. Please try again.", { status: 500, headers });
63 }
480export default async function server(request: Request): Promise<Response> {
481 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
482 const { OpenAI } = await import("https://esm.town/v/std/openai");
483 const openai = new OpenAI();
484
485 try {
514
515 try {
516 const completion = await openai.chat.completions.create({
517 model: "gpt-4o-mini",
518 messages: [
578 }
579
580 console.error('OpenAI processing error:', processingError);
581 return new Response(
582 language === 'en'
206export default async function server(request: Request): Promise<Response> {
207 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
208 const { OpenAI } = await import("https://esm.town/v/std/openai");
209 const openai = new OpenAI();
210
211 try {
222 );
223
224 const completion = await openai.chat.completions.create({
225 model: "gpt-4o-mini",
226 messages: [
100export default async function server(request: Request): Promise<Response> {
101 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
102 const { OpenAI } = await import("https://esm.town/v/std/openai");
103 const openai = new OpenAI();
104
105 try {
116 );
117
118 const completion = await openai.chat.completions.create({
119 model: "gpt-4o-mini",
120 messages: [
181 const { messages, location } = await request.json();
182
183 const { OpenAI } = await import("https://esm.sh/openai");
184 const client = new OpenAI({
185 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
186 baseURL: "https://api.cerebras.ai/v1",
309
310 // Cerebras LLM for response generation
311 const { OpenAI } = await import("https://esm.sh/openai");
312 const llmClient = new OpenAI({
313 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
314 baseURL: "https://api.cerebras.ai/v1",
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"
15import { generateOpenGraphTags, OpenGraphData } from "https://esm.town/v/dthyresson/generateOpenGraphTags"
16import { ValTownLink } from "https://esm.town/v/dthyresson/viewOnValTownComponent"
17import { chat } from "https://esm.town/v/stevekrouse/openai"
18import * as fal from "npm:@fal-ai/serverless-client"
19
13* and activity (befriends aliens, goes to the doctor, rides a rollercoaster, bakes a cake for friends)
14
15It uses OpenAI to write a children's bedtime story
16
17* title
21for a "fantastical story about a green whale who rides the bus" or the "spooky story about the tomato fox who explores a cave".
22
23Then using the summary, OpenAI geenrates another prompt to describe the instructions to geneate a childrens story book image.
24
25That's sent to Fal to generate an image.
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"