1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function(req: Request): Promise<Response> {
11 });
12 }
13 const openai = new OpenAI();
14
15 try {
28 }
29
30 const stream = await openai.chat.completions.create(body);
31
32 if (!body.stream) {
240 const finalBudget = budget.length > 0 ? budget : [budgetOptions[Math.floor(Math.random() * budgetOptions.length)]];
241
242 const { OpenAI } = await import("https://esm.town/v/std/openai");
243 const openai = new OpenAI();
244
245 const completion = await openai.chat.completions.create({
246 messages: [{
247 role: "system",
259 try {
260 const content = completion.choices[0].message.content || "{}";
261 console.log("Raw OpenAI response:", content);
262 const parsedContent = JSON.parse(content);
263
275 }));
276 } catch (error) {
277 console.error("Failed to parse OpenAI response:", error);
278 return new Response(JSON.stringify({ error: "Failed to generate suggestions" }), {
279 status: 500,
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { OpenAI } from "https://esm.town/v/std/openai";
4
5const openai = new OpenAI();
6
7const CalendarEvent = z.object({
11});
12
13const completion = await openai.beta.chat.completions.parse({
14 model: "gpt-4o-mini",
15 messages: [
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3const openai = new OpenAI();
4
5export default async function(req: Request): Promise<Response> {
16
17 const messages = prepareMessages(selection, followUp, context);
18 log.info("Prepared messages for OpenAI:", JSON.stringify(messages));
19
20 const openai = new OpenAI();
21 const stream = await openai.chat.completions.create({
22 messages,
23 model: config.MODEL_NAME,
26 stream: true,
27 });
28 log.info("OpenAI stream created successfully");
29
30 return streamResponse(stream);
31 } catch (error) {
32 log.error("Error in OpenAI request:", error);
33 return handleError(error);
34 }
90 const content = chunk.choices[0]?.delta?.content || "";
91 if (content) {
92 // log.debug("Received chunk from OpenAI:", content);
93 const encodedChunk = encoder.encode(JSON.stringify({ chunk: content }) + "\n");
94 controller.enqueue(encodedChunk);
514 const url = new URL(request.url);
515 const { blob } = await import("https://esm.town/v/std/blob");
516 const { OpenAI } = await import("https://esm.town/v/std/openai");
517
518 const openai = new OpenAI();
519
520 // Simple rate limiting
553
554 try {
555 const completion = await openai.chat.completions.create({
556 messages: [
557 {
1import { fetch } from "https://esm.town/v/std/fetch";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import process from "node:process";
4
5const openai = new OpenAI();
6
7// Depending on the day do one of these:
43 console.log(prompt);
44
45 const completion = await openai.chat.completions.create({
46 messages: [
47 { role: "user", content: prompt },
498 const url = new URL(request.url);
499 const { blob } = await import("https://esm.town/v/std/blob");
500 const { OpenAI } = await import("https://esm.town/v/std/openai");
501
502 const openai = new OpenAI();
503
504 // Simple rate limiting
537
538 try {
539 const completion = await openai.chat.completions.create({
540 messages: [
541 {
498 const url = new URL(request.url);
499 const { blob } = await import("https://esm.town/v/std/blob");
500 const { OpenAI } = await import("https://esm.town/v/std/openai");
501
502 const openai = new OpenAI();
503
504 // Simple rate limiting
537
538 try {
539 const completion = await openai.chat.completions.create({
540 messages: [
541 {
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 get beta() {
34 return {
35 chat: this.rawOpenAIClient.beta.chat,
36 };
37 }
110
111export default async function server(request: Request): Promise<Response> {
112 const { OpenAI } = await import("https://esm.town/v/std/openai");
113 const openai = new OpenAI();
114
115 const url = new URL(request.url);
119
120 try {
121 const completion = await openai.chat.completions.create({
122 model: "gpt-4o",
123 messages: [