235 const { intervieweeResponse, interviewPosition } = await request.json();
236
237 const { OpenAI } = await import("https://esm.town/v/std/openai");
238 const openai = new OpenAI();
239
240 const completion = await openai.chat.completions.create({
241 messages: [
242 {
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Basic Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
58## Limits
59
60While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
61
62* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66
671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
70
71```ts title="Example" val
72import { OpenAI } from "npm:openai";
73
74const openai = new OpenAI();
75```
76
77
78[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
79
80Migrated from folder: Examples/OpenAI
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 get beta() {
34 return {
35 chat: this.rawOpenAIClient.beta.chat,
36 };
37 }
1import readline from "node:readline";
2import { createOpenAI } from "npm:@ai-sdk/openai";
3import { type CoreMessage, streamText } from "npm:ai";
4import { configDotenv } from "npm:dotenv";
9const toolKit = new VercelAIToolkit();
10const messages: CoreMessage[] = [];
11const openai = createOpenAI({
12 apiKey: Deno.env.get("OPENAI_API_KEY") || "",
13});
14
24
25 const result = streamText({
26 model: openai("gpt-4o"),
27 tools,
28 maxSteps: 10,
71 setIsAnalyzing(true);
72 try {
73 // Dynamically import OpenAI to ensure server-side compatibility
74 const { OpenAI } = await import("https://esm.town/v/std/openai");
75 const openai = new OpenAI();
76
77 // Detailed AI prompt for comprehensive analysis
78 const response = await openai.chat.completions.create({
79 model: "gpt-4o",
80 messages: [
152
153 // Generate narrative description
154 const narrativeResponse = await openai.chat.completions.create({
155 model: "gpt-4o",
156 messages: [
127export default async function server(request: Request): Promise<Response> {
128 if (request.method === "POST") {
129 const { OpenAI } = await import("https://esm.town/v/std/openai");
130 const openai = new OpenAI();
131
132 const { prompt, language } = await request.json();
140 };
141
142 const completion = await openai.chat.completions.create({
143 messages: [
144 {
34
35 try {
36 const { OpenAI } = await import("https://esm.town/v/std/openai");
37 const openai = new OpenAI();
38
39 // Generate app concept
40 conceptCompletion = await openai.chat.completions.create({
41 model: "gpt-4o-mini",
42 messages: [
54
55 // Generate basic code structure
56 codeCompletion = await openai.chat.completions.create({
57 model: "gpt-4o-mini",
58 messages: [
68 max_tokens: 500
69 });
70 } catch (openaiError) {
71 console.error('OpenAI generation error:', openaiError);
72
73 // Fallback generation
129export default async function server(request: Request): Promise<Response> {
130 if (request.method === 'POST') {
131 const { OpenAI } = await import("https://esm.town/v/std/openai");
132 const openai = new OpenAI();
133
134 try {
135 const { text } = await request.json();
136
137 const completion = await openai.chat.completions.create({
138 messages: [
139 {
452
453export default async function server(request: Request): Promise<Response> {
454 const { OpenAI } = await import("npm:openai");
455 const { zodResponseFormat } = await import("npm:openai/helpers/zod");
456 const { z } = await import("npm:zod");
457 const openai = new OpenAI();
458
459 const QuestionSchema = z.object({
501 currentQuestion,
502 } = await request.json();
503 console.log("OpenAI API Request:", { prompt, topic });
504
505 try {
506 let completion;
507 if (request.url.includes("generate-question")) {
508 completion = await openai.beta.chat.completions.parse({
509 model: "gpt-4o",
510 messages: [
515 });
516 } else if (request.url.includes("generate-exam-questions")) {
517 completion = await openai.beta.chat.completions.parse({
518 model: "gpt-4o",
519 messages: [
524 });
525 } else if (request.url.includes("get-feedback")) {
526 completion = await openai.beta.chat.completions.parse({
527 model: "gpt-4o",
528 messages: [
537 });
538 } else if (request.url.includes("get-exam-feedback")) {
539 completion = await openai.beta.chat.completions.parse({
540 model: "gpt-4o",
541 messages: [
554
555 const response = completion.choices[0].message.parsed;
556 console.log("OpenAI API Response:", response);
557
558 return new Response(JSON.stringify(response), {
560 });
561 } catch (error) {
562 console.error("Error in OpenAI API call:", error);
563 return new Response(JSON.stringify({ error: error.message }), {
564 status: 500,
48 const generateAISuggestion = async () => {
49 try {
50 const { OpenAI } = await import("https://esm.town/v/std/openai");
51 const openai = new OpenAI();
52
53 if (!gameMechanic) {
63 - Potential monetization or player engagement strategy`;
64
65 const response = await openai.chat.completions.create({
66 model: "gpt-4o-mini",
67 messages: [{ role: "user", content: prompt }],
85
86 try {
87 const { OpenAI } = await import("https://esm.town/v/std/openai");
88 const openai = new OpenAI();
89
90 const response = await openai.chat.completions.create({
91 model: "gpt-4o-mini",
92 messages: [