1import OpenAI from "https://esm.sh/openai";
2
3function parseValResponse(response: string) {
28 const system = await (await fetch(`${import.meta.url.split("/").slice(0, -1).join("/")}/system_prompt.txt`)).text();
29
30 const openai = new OpenAI({
31 baseURL: "https://openrouter.ai/api/v1",
32 apiKey: Deno.env.get("OPEN_ROUTER_KEY"),
33 });
34 console.log(messages);
35 const completion = await openai.chat.completions.create({
36 model: "deepseek/deepseek-r1",
37 messages: [
83
84 <library>
85 ## OpenAI
86
87 Val Town includes a free, proxied OpenAI:
88
89 ```ts
90 import { OpenAI } from "https://esm.town/v/std/openai";
91 const openai = new OpenAI();
92 const completion = await openai.chat.completions.create({
93 messages: [
94 { role: "user", content: "Say hello in a creative way" },
99 ```
100
101 OpenAI only works on the server. If the val includes client-side code, use dynamic imports to import this module in the server function, e.g.:
102 `const { OpenAI } = await import "https://esm.town/v/std/openai");`
103 </library>
104
106 if (request.method === 'POST' && new URL(request.url).pathname === '/identify') {
107 try {
108 const { OpenAI } = await import("https://esm.town/v/std/openai");
109 const openai = new OpenAI();
110
111 const formData = await request.formData();
124 );
125
126 const response = await openai.chat.completions.create({
127 model: "gpt-4o",
128 messages: [
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3interface SchemaResponse {
39 }
40
41 const openai = new OpenAI();
42 let completion;
43 try {
44 completion = await openai.chat.completions.create({
45 messages: [
46 { role: "system", content: "You are a backend schema generator. Generate a backend schema based on the given project description." },
51 });
52 } catch (error) {
53 console.error("Error calling OpenAI API:", error);
54 return new Response("Error generating schema. Please try again later.", { status: 500, headers });
55 }
59 schemaResponse = JSON.parse(completion.choices[0].message.content);
60 } catch (error) {
61 console.error("Error parsing OpenAI response:", error);
62 return new Response("Error processing the generated schema. Please try again.", { status: 500, headers });
63 }
480export default async function server(request: Request): Promise<Response> {
481 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
482 const { OpenAI } = await import("https://esm.town/v/std/openai");
483 const openai = new OpenAI();
484
485 try {
514
515 try {
516 const completion = await openai.chat.completions.create({
517 model: "gpt-4o-mini",
518 messages: [
578 }
579
580 console.error('OpenAI processing error:', processingError);
581 return new Response(
582 language === 'en'
206export default async function server(request: Request): Promise<Response> {
207 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
208 const { OpenAI } = await import("https://esm.town/v/std/openai");
209 const openai = new OpenAI();
210
211 try {
222 );
223
224 const completion = await openai.chat.completions.create({
225 model: "gpt-4o-mini",
226 messages: [
100export default async function server(request: Request): Promise<Response> {
101 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
102 const { OpenAI } = await import("https://esm.town/v/std/openai");
103 const openai = new OpenAI();
104
105 try {
116 );
117
118 const completion = await openai.chat.completions.create({
119 model: "gpt-4o-mini",
120 messages: [
181 const { messages, location } = await request.json();
182
183 const { OpenAI } = await import("https://esm.sh/openai");
184 const client = new OpenAI({
185 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
186 baseURL: "https://api.cerebras.ai/v1",
309
310 // Cerebras LLM for response generation
311 const { OpenAI } = await import("https://esm.sh/openai");
312 const llmClient = new OpenAI({
313 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
314 baseURL: "https://api.cerebras.ai/v1",
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"