1import puppeteer from "https://deno.land/x/puppeteer@16.2.0/mod.ts";
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
3import { Browserbase } from "npm:@browserbasehq/sdk";
4
31
32// ask chat gpt for list of concert dates
33const openai = new OpenAI();
34
35const completion = await openai.chat.completions.create({
36 messages: [
37 { role: "system", content: "Return concert dates as JSON array. No code fences." },
89 console.log(`Created ${fileResponses.length} ${gzip ? "gzip" : ""} files`);
90
91 return new Response(JSON.stringify({ openaiFileResponse: fileResponses }), {
92 status: 200,
93 headers: { "Content-Type": "application/json" },
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-3.5-turbo`](https://www.val.town/v/std/openaiproxy?v=5#L69).
6
7Streaming is not yet supported. Upvote the [HTTP response streaming feature request](https://github.com/val-town/val-town-product/discussions/14) if you need it!
10
11```ts title="Example" val
12import { OpenAI } from "https://esm.town/v/std/openai";
13
14const openai = new OpenAI();
15
16const completion = await openai.chat.completions.create({
17 messages: [
18 { role: "user", content: "Say hello in a creative way" },
27## Limits
28
29While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
30
31* **Usage Quota**: We limit each user to 10 requests per minute.
34If these limits are too low, let us know! You can also get around the limitation by using your own keys:
35
361. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
372. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
383. Use the `OpenAI` client from `npm:openai`:
39
40```ts title="Example" val
41import { OpenAI } from "npm:openai";
42
43const openai = new OpenAI();
44```
45
46
47[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
4
5<p align=center>
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4
5// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
9
38 });
39
40 // Setup the SSE connection and stream back the response. OpenAI handles determining
41 // which message is the correct response based on what was last read from the
42 // thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60 const thread = await openai.beta.threads.create();
61 const assistant = await openai.beta.assistants.create({
62 name: "",
63 instructions:
114app.post("/post-message", async (c) => {
115 let message = await c.req.text();
116 await openai.beta.threads.messages.create(
117 c.req.query("threadId"),
118 { role: "user", content: message },
132 ));
133 };
134 const run = openai.beta.threads.runs.stream(threadId, {
135 assistant_id: assistantId,
136 // Make sure we only display messages we haven't seen yet.
4import Instructor from "npm:@instructor-ai/instructor";
5import Jimp from "npm:jimp";
6import OpenAI from "npm:openai";
7import { z } from "npm:zod";
8
17const TODOIST_API_KEY = process.env.TODOIST_API_KEY;
18const HABITIFY_API_KEY = process.env.HABITIFY_API_KEY;
19const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
20const DEF_TIMEZONE = "America/Los_Angeles"; // Get your timezone from here: https://stackoverflow.com/a/54500197
21
49const todoistapi = new TodoistApi(TODOIST_API_KEY);
50
51const oai = new OpenAI({
52 apiKey: OPENAI_API_KEY ?? undefined,
53});
54
1import { loadPageContent } from "https://esm.town/v/charlypoly/browserbaseUtils";
2import { email } from "https://esm.town/v/std/email?v=12";
3import { OpenAI } from "https://esm.town/v/std/openai?v=4";
4import { z } from "npm:zod";
5import { zodToJsonSchema } from "npm:zod-to-json-schema";
26 });
27
28 // we create a OpenAI Tool that takes our schema as argument
29 const extractContentTool: any = {
30 type: "function",
37 };
38
39 const openai = new OpenAI();
40
41 // We ask OpenAI to extract the content from the given web page.
42 // The model will reach out to our `extract_content` tool and
43 // by doing so, the model will extract the required data to satisfy
44 // the requirement of `extract_content`s argument.
45 const completion = await openai.chat.completions.create({
46 model: "gpt-4-turbo",
47 messages: [
56 });
57
58 // we retrieve the serialized arguments generated by OpenAI
59 const result = completion.choices[0].message.tool_calls![0].function.arguments;
60
64 const parsed = schema.parse(JSON.parse(result));
65
66 const completion2 = await openai.chat.completions.create({
67 model: "gpt-4-turbo",
68 messages: [
7import cosSimilarity from "npm:cos-similarity";
8import _ from "npm:lodash";
9import OpenAI from "npm:openai";
10
11export default async function blogPostEmbeddingsDimensionalityReduction() {
23 ];
24
25 const openai = new OpenAI();
26 async function getEmbedding(str) {
27 return (await openai.embeddings.create({
28 model: "text-embedding-3-large",
29 input: str,
6import cosSimilarity from "npm:cos-similarity";
7import _ from "npm:lodash";
8import OpenAI from "npm:openai";
9
10const comparisons = [
20];
21
22const openai = new OpenAI();
23const cache = {};
24async function getEmbedding(str) {
25 cache[str] = cache[str] || (await openai.embeddings.create({
26 model: "text-embedding-3-large",
27 input: str,
4import { db as allValsDb } from "https://esm.town/v/sqlite/db?v=9";
5import { blob } from "https://esm.town/v/std/blob";
6import OpenAI from "npm:openai";
7import { truncateMessage } from "npm:openai-tokens";
8
9const dimensions = 1536;
39);
40
41const openai = new OpenAI();
42for (const newValsBatch of newValsBatches) {
43 const batchDataIndex = nextDataIndex;
47 const code = getValCode(val);
48
49 const embedding = await openai.embeddings.create({
50 model: "text-embedding-3-small",
51 input: truncateMessage(code, "text-embedding-3-small"),