128
129export default async function server(request: Request): Promise<Response> {
130 const { OpenAI } = await import("https://esm.town/v/std/openai");
131 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
132 const openai = new OpenAI();
133 const SCHEMA_VERSION = 2;
134 const KEY = "multiUserChatwithLLM";
178 messages.push({ role: "user", content: `${username}: ${message}` });
179
180 const completion = await openai.chat.completions.create({
181 messages,
182 model: "gpt-4o-mini",
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
3
4let location = "brooklyn ny";
8).then(r => r.json());
9
10const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12 messages: [{
13 role: "user",
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
25## Limits
26
27While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
28
29* **Usage Quota**: We limit each user to 10 requests per minute.
32If these limits are too low, let us know! You can also get around the limitation by using your own keys:
33
341. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
352. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
363. Use the `OpenAI` client from `npm:openai`:
37
38```ts title="Example" val
39import { OpenAI } from "npm:openai";
40
41const openai = new OpenAI();
42```
43
44
45[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 readonly beta = {
34 get chat(): RawOpenAI["beta"]["chat"] {
35 return this.rawOpenAIClient.beta.chat;
36 }
37 }
1# OpenAI Proxy
2
3This OpenAI API proxy injects Val Town's API keys. For usage documentation, check out https://www.val.town/v/std/openai
1import { parseBearerString } from "https://esm.town/v/andreterron/parseBearerString";
2import { API_URL } from "https://esm.town/v/std/API_URL?v=5";
3import { OpenAIUsage } from "https://esm.town/v/std/OpenAIUsage";
4import { RateLimit } from "npm:@rlimit/http";
5const client = new OpenAIUsage();
6
7
42
43 // Proxy the request
44 const url = new URL("." + pathname, "https://api.openai.com");
45 url.search = search;
46
47 const headers = new Headers(req.headers);
48 headers.set("Host", url.hostname);
49 headers.set("Authorization", `Bearer ${Deno.env.get("OPENAI_API_KEY")}`);
50 headers.set("OpenAI-Organization", Deno.env.get("OPENAI_API_ORG"));
51
52 const modifiedBody = await limitFreeModel(req, user);
63 });
64
65 const openAIRes = await fetch(url, {
66 method: req.method,
67 headers,
71
72 // Remove internal header
73 const res = new Response(openAIRes.body, openAIRes);
74 res.headers.delete("openai-organization");
75 return res;
76}
231 <button class="collapsible-button">Overview</button>
232 <div class="collapsible-content">
233 <p>This schema defines several components that can be used to integrate OpenAI's services into a logging and configuration system. Here's how each component relates to potential OpenAI use cases:</p>
234 <ul>
235 <li><strong>App</strong>: Represents an OpenAI-powered application with a name and URL.</li>
236 <li><strong>Config</strong>: Defines configuration options for OpenAI API integration and application settings.</li>
237 <li><strong>ConsoleLog</strong>: Captures console output from OpenAI model interactions and application processes.</li>
238 <li><strong>CronLog</strong>: Logs scheduled tasks related to OpenAI operations, such as model fine-tuning or dataset updates.</li>
239 <li><strong>HttpLog</strong>: Records HTTP requests made to and from the OpenAI API.</li>
240 </ul>
241 </div>
243
244 <div class="collapsible">
245 <button class="collapsible-button">Key Components and OpenAI Use Cases</button>
246 <div class="collapsible-content">
247 <dl>
255 <dt>Config</dt>
256 <dd>
257 Use Case: Store OpenAI API keys, model preferences, and application settings.
258 <br>
259 Example: Configure the GPT model to use, set token limits, and specify custom domains for AI services.
278 Use Case: Monitor and analyze API usage and performance.
279 <br>
280 Example: Track rate limits, response times, and payload sizes for OpenAI API calls.
281 </dd>
282 </dl>
294 <li><strong>Usage Analytics</strong>: Analyze HttpLog data to gain insights into API usage patterns, popular features, and potential areas for optimization or scaling.</li>
295 </ul>
296 <p>By implementing this schema, developers can create robust, scalable applications that effectively integrate and manage OpenAI's powerful AI capabilities while maintaining comprehensive logging and configuration control.</p>
297 </div>
298 </div>
312 <meta charset="UTF-8">
313 <meta name="viewport" content="width=device-width, initial-scale=1.0">
314 <title>OpenAI Integration Schema Guide</title>
315 <style>
316 body {
371 </head>
372 <body>
373 <h1>OpenAI Integration Schema Guide</h1>
374 ${guide}
375
5 options = {},
6) => {
7 // Initialize OpenAI API stub
8 const { Configuration, OpenAIApi } = await import(
9 "https://esm.sh/openai@3.3.0"
10 );
11 const configuration = new Configuration({
12 apiKey: process.env.OPENAI,
13 });
14 const openai = new OpenAIApi(configuration);
15 // Request chat completion
16 const messages = typeof prompt === "string"
17 ? [{ role: "user", content: prompt }]
18 : prompt;
19 const { data } = await openai.createChatCompletion({
20 model: "gpt-3.5-turbo-0613",
21 messages,
3import { passwordAuth } from "https://esm.town/v/pomdtr/password_auth?v=84";
4import { verifyToken } from "https://esm.town/v/pomdtr/verifyToken?v=1";
5import { openai } from "npm:@ai-sdk/openai";
6import ValTown from "npm:@valtown/sdk";
7import { streamText } from "npm:ai";
36
37 const stream = await streamText({
38 model: openai("gpt-4o", {
39 baseURL: "https://std-openaiproxy.web.val.run/v1",
40 apiKey: Deno.env.get("valtown"),
41 } as any),
3export async function getModelBuilder(spec: {
4 type?: "llm" | "chat" | "embedding";
5 provider?: "openai" | "huggingface";
6} = { type: "llm", provider: "openai" }, options?: any) {
7 const { extend, cond, matches, invoke } = await import("npm:lodash-es");
8 // Set up LangSmith tracer
17 // Set up API key for each providers
18 const args = extend({ callbacks }, options);
19 if (spec?.provider === "openai")
20 args.openAIApiKey = process.env.OPENAI;
21 else if (spec?.provider === "huggingface")
22 args.apiKey = process.env.HUGGINGFACE;
24 const setup = cond([
25 [
26 matches({ type: "llm", provider: "openai" }),
27 async () => {
28 const { OpenAI } = await import("npm:langchain/llms/openai");
29 return new OpenAI(args);
30 },
31 ],
32 [
33 matches({ type: "chat", provider: "openai" }),
34 async () => {
35 const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
36 return new ChatOpenAI(args);
37 },
38 ],
39 [
40 matches({ type: "embedding", provider: "openai" }),
41 async () => {
42 const { OpenAIEmbeddings } = await import(
43 "npm:langchain/embeddings/openai"
44 );
45 return new OpenAIEmbeddings(args);
46 },
47 ],