179export default async function server(request: Request): Promise<Response> {
180 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
181 const { OpenAI } = await import("https://esm.town/v/std/openai");
182
183 const KEY = "resourcefulPurpleBobolink";
205 if (request.method === "POST") {
206 const { query } = await request.json();
207 const openai = new OpenAI();
208
209 // Retrieve previous conversation context
220 ).join("\n");
221
222 const completion = await openai.chat.completions.create({
223 messages: [
224 {
277export default async function server(request: Request): Promise<Response> {
278 if (request.method === "POST") {
279 const { OpenAI } = await import("https://esm.town/v/std/openai");
280 const openai = new OpenAI();
281
282 const body = await request.json();
290 : "You are Thunder, an intelligent and helpful AI assistant created by Team HTKhan. You are skilled in conversing about various topics, answering questions, and providing assistance. and HTKhan refers to Huzaifa Taif Khan,Provide friendly, clear, and helpful responses.";
291
292 const completion = await openai.chat.completions.create({
293 messages: [
294 { role: "system", content: systemPrompt },
155export default async function server(request: Request): Promise<Response> {
156 if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
157 const { OpenAI } = await import("https://esm.town/v/std/openai");
158 const openai = new OpenAI();
159
160 try {
205 ];
206
207 const completion = await openai.chat.completions.create({
208 messages: completionMessages,
209 model: "gpt-4o-mini",
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("OPENAI_API_KEY"),
25 organization: null,
26 });
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 get beta() {
34 return {
35 chat: this.rawOpenAIClient.beta.chat,
36 };
37 }
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Basic Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
58## Limits
59
60While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
61
62* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66
671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
70
71```ts title="Example" val
72import { OpenAI } from "npm:openai";
73
74const openai = new OpenAI();
75```
76
77
78[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
147export default async function server(request: Request): Promise<Response> {
148 if (request.method === 'POST' && new URL(request.url).pathname === '/translate') {
149 const { OpenAI } = await import("https://esm.town/v/std/openai");
150 const openai = new OpenAI();
151
152 const { text, sourceLanguage, targetLanguage } = await request.json();
153
154 const completion = await openai.chat.completions.create({
155 messages: [
156 {
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",
35 }
36
37 // Prepare the data for OpenAI
38 const reviewData = reviews.map(review => ({
39 reviewer: review.name,
42 }));
43
44 // Format the prompt for OpenAI
45 const prompt = `
46 You are judging a cake competition. Based on the following reviews of two cakes,
57
58 try {
59 // Make the API call to OpenAI
60 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
61
62 const response = await fetch("https://api.openai.com/v1/chat/completions", {
63 method: "POST",
64 headers: {
65 "Content-Type": "application/json",
66 "Authorization": `Bearer ${OPENAI_API_KEY}`,
67 },
68 body: JSON.stringify({
85 if (!response.ok) {
86 const errorData = await response.json();
87 console.error("OpenAI API error:", errorData);
88 throw new Error(`OpenAI API error: ${errorData.error?.message || "Unknown error"}`);
89 }
90
92 return data.choices[0].message.content.trim();
93 } catch (error) {
94 console.error("Error analyzing reviews with OpenAI:", error);
95 }
96}
296
297export default async function server(request: Request): Promise<Response> {
298 const { OpenAI } = await import("https://esm.town/v/std/openai");
299 const openai = new OpenAI();
300
301 if (request.method === "POST") {
302 try {
303 const { message } = await request.json();
304 const completion = await openai.chat.completions.create({
305 messages: [{ role: "user", content: message }],
306 model: "gpt-4o-mini",
296
297export default async function server(request: Request): Promise<Response> {
298 const { OpenAI } = await import("https://esm.town/v/std/openai");
299 const openai = new OpenAI();
300
301 if (request.method === "POST") {
302 try {
303 const { message } = await request.json();
304 const completion = await openai.chat.completions.create({
305 messages: [{ role: "user", content: message }],
306 model: "gpt-4o-mini",