1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Basic Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
58## Limits
59
60While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
61
62* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66
671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
70
71```ts title="Example" val
72import { OpenAI } from "npm:openai";
73
74const openai = new OpenAI();
75```
76
77
78[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 get beta() {
34 return {
35 chat: this.rawOpenAIClient.beta.chat,
36 };
37 }
434export default async function server(request: Request): Promise<Response> {
435 if (request.method === 'POST' && request.url.includes('/recommendations')) {
436 const { OpenAI } = await import("https://esm.town/v/std/openai");
437 const openai = new OpenAI();
438
439 const { latitude, longitude } = await request.json();
459 `;
460
461 const completion = await openai.chat.completions.create({
462 messages: [{ role: "user", content: prompt }],
463 model: "gpt-4o-mini",
35Use Cerebras for AI on the backend like so:
36
37const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40 baseURL: "https://api.cerebras.ai/v1"
83 const { messages } = await request.json();
84
85 const { OpenAI } = await import("https://esm.sh/openai");
86 // Cerebras Inference
87 // Very important: do not change this
88 // Keep these comments so we remember not to change this
89 const client = new OpenAI({
90 apiKey: Deno.env.get("CEREBRAS_API_KEY"),
91 baseURL: "https://api.cerebras.ai/v1",
18brian@airbnb.com,Brian Chesky
19drew@dropbox.com,Drew Houston
20sam@openai.com,Sam Altman
21tim@apple.com,Tim Cook
22jeff@amazon.com,Jeff Bezos
1 import { InvocationTracker } from 'https://esm.town/v/rozek/InvocationTracker'
2 import { floatingQuotaTracker } from 'https://esm.town/v/rozek/floatingQuotaTracker'
3 import { OpenAI } from 'https://esm.town/v/std/openai'
4
5// don't forget to define Env.Var "OpenAIChatCompletion" for authorization!
6
7 interface Indexable { [Key:string]:any }
8
9/**** rate-limited access to the OpenAI API ****/
10
11 export default async function (Request:Request):Promise<Response> {
28 /**** handle authorization - but only if Env.Var. is set ****/
29
30 const AccessToken = process.env.OpenAIChatCompletion
31 if (AccessToken != null) {
32 const AuthHeader = Request.headers.get('Authorization')
43 /**** count this invocation ****/
44
45 const TrackingTable = 'OpenAIChatCompletion_Usage'
46 const Granularity_ = 15*60*1000 // how precisely should be logged?
47
51 /**** limit the rate at whic hthis resource is accessed ****/
52
53 const ResourceTable = 'OpenAIChatCompletion'
54 const ResourceLimit = 10 // max. number of allowed requests per period
55 const ResourcePeriod = 60*1000 // the period, given in ms
84 try {
85 if (stream == true) {
86 const CompletionStream = await new OpenAI().chat.completions.create({
87 ...ParameterSet, stream:true
88 })
109 })
110 } else {
111 const Completion = await new OpenAI().chat.completions.create({
112 ...ParameterSet, stream:false
113 })
118 }
119
120 return await OpenAIChatCompletion(Request)
121 }
122
5 options = {},
6) => {
7 // Initialize OpenAI API stub
8 const { Configuration, OpenAIApi } = await import(
9 "https://esm.sh/openai@3.3.0"
10 );
11 const configuration = new Configuration({
12 apiKey: process.env.OPENAI,
13 });
14 const openai = new OpenAIApi(configuration);
15 // Request chat completion
16 const messages = typeof prompt === "string"
17 ? [{ role: "user", content: prompt }]
18 : prompt;
19 const { data } = await openai.createChatCompletion({
20 model: "gpt-3.5-turbo-0613",
21 messages,
1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import { OpenAI } from "https://esm.town/v/std/openai";
3
4const KEY = "memorySampleSummary";
6const SCHEMA_VERSION = 1;
7
8const openai = new OpenAI();
9
10// Initialize the database
22// Generate embedding for a given text
23async function generateEmbedding(text: string): Promise<number[]> {
24 const response = await openai.embeddings.create({
25 model: "text-embedding-ada-002",
26 input: text,
2import { fileToDataURL } from "https://esm.town/v/stevekrouse/fileToDataURL";
3import { modifyImage } from "https://esm.town/v/stevekrouse/modifyImage";
4import { chat } from "https://esm.town/v/stevekrouse/openai";
5import { Hono } from "npm:hono@3";
6