1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "brooklyn ny";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function (req: Request): Promise<Response> {
19 }
20
21 const openai = new OpenAI();
22 const completion = await openai.chat.completions.create({
23 model: "gpt-4o-mini",
24 messages: [
8It contains a simple HTTP end point which expects a POST request with a JSON
9structure containing the properties "SystemMessage" and "UserMessage". These
10message are then used to run an OpenAI chat completion and produce an "assistant
11message" which is sent back to the client as plain text.
12
8It contains a simple web page which allows users to enter some german (or other
9non-english) text and send it to a preconfigured server. That server translates
10the text with the help of OpenAI and sends the result back to this app where it
11is finally presented to the user.
12
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function (req: Request): Promise<Response> {
16 }
17
18 // Initialize OpenAI
19 const openai = new OpenAI();
20
21 // Translate the text using OpenAI
22 const completion = await openai.chat.completions.create({
23 messages: [
24 { role: "system", content: "You are a German to English translator. Translate the following text to English:" },
7
8It contains a simple HTTP end point which expects a POST request with a text
9body. That text is translated to english with the help of OpenAI and sent back
10to the client
11
128
129export default async function server(request: Request): Promise<Response> {
130 const { OpenAI } = await import("https://esm.town/v/std/openai");
131 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
132 const openai = new OpenAI();
133 const SCHEMA_VERSION = 2;
134 const KEY = "multiUserChatwithLLM";
178 messages.push({ role: "user", content: `${username}: ${message}` });
179
180 const completion = await openai.chat.completions.create({
181 messages,
182 model: "gpt-4o-mini",
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "brooklyn ny";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
25## Limits
26
27While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
28
29* **Usage Quota**: We limit each user to 10 requests per minute.
32If these limits are too low, let us know! You can also get around the limitation by using your own keys:
33
341. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
352. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
363. Use the `OpenAI` client from `npm:openai`:
37
38```ts title="Example" val
39import { OpenAI } from "npm:openai";
40
41const openai = new OpenAI();
42```
43
44
45[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 readonly beta = {
34 get chat(): RawOpenAI["beta"]["chat"] {
35 return this.rawOpenAIClient.beta.chat;
36 }
37 }