handsomeMagentaStoatREADME.md13 matches
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
23Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
45For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
67## Basic Usage
89```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
1112const openai = new OpenAI();
1314const completion = await openai.chat.completions.create({
15messages: [
16{ role: "user", content: "Say hello in a creative way" },
58## Limits
5960While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
6162* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
7071```ts title="Example" val
72import { OpenAI } from "npm:openai";
7374const openai = new OpenAI();
75```
767778[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
handsomeMagentaStoatmain.tsx10 matches
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
23/**
4* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5*/
6export class OpenAI {
7private rawOpenAIClient: RawOpenAI;
89/**
10* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11*
12* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19*/
20constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21this.rawOpenAIClient = new RawOpenAI({
22...options,
23baseURL: "https://std-openaiproxy.web.val.run/v1",
24apiKey: Deno.env.get("valtown"),
25organization: null,
2829get chat() {
30return this.rawOpenAIClient.chat;
31}
3233readonly beta = {
34get chat(): RawOpenAI["beta"]["chat"] {
35return this.rawOpenAIClient.beta.chat;
36}
37}
2import { Hono } from "npm:hono@3";
3import { cors } from "npm:hono/cors";
4import { createOpenAI } from "npm:@ai-sdk/openai";
5import { createAnthropic } from "npm:@ai-sdk/anthropic@0.0.48";
6import { google, createGoogleGenerativeAI } from 'npm:@ai-sdk/google';
30});
3132const openai = createOpenAI({
33// apiKey = Deno.env.get("OPENAI_API_KEY");
34apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET")
35});
363738const groq = createOpenAI({
39baseURL: 'https://api.groq.com/openai/v1',
40apiKey: Deno.env.get("GROQ_API_KEY"),
41});
4243const perplexity = createOpenAI({
44apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? '',
45baseURL: 'https://api.perplexity.ai/',
57this.memories = options.memories || [];
58this.messages = options.messages || [];
59this.defaultProvider = options.provider || 'openai';
60this.defaultModel = options.model;
61this.defaultMaxTokens = options.maxTokens;
122let result;
123switch (provider) {
124case 'openai':
125result = await this.generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, ...additionalSettings });
126break;
127case 'anthropic':
171}
172173async generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, embed, value, dimensions, user, ...additionalSettings }) {
174const modelId = model || 'gpt-3.5-turbo';
175176if (embed) {
177let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
178// console.log('embed!', result)
179return result
181182const options = {
183model: openai(modelId),
184system,
185temperature,
235}
236237async generateOpenAIEmbedding({ model, value, dimensions, user }) {
238const modelId = model || 'text-embedding-3-large';
239const options = {
240model: openai.embedding(modelId, {
241dimensions,
242user,
491492app.get('/generate', async (c) => {
493const provider = c.req.query('provider') || 'openai';
494const model = c.req.query('model');
495const prompt = c.req.query('prompt');
523console.log("post/generate", { mode: 'post/generate', prompt, provider, model });
524const response = await modelProvider.gen({
525provider: provider || 'openai',
526model,
527prompt,
6* Fork this val to your own profile.
7* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
8* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
9* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).
10* Create a [Val Town API token](https://www.val.town/settings/api), open the browser preview of this val, and use the API token as the password to log in.
GDI_AITranslatorREADME.md1 match
8It contains a simple web page which allows users to enter some german (or other
9non-english) text and send it to a preconfigured server. That server translates
10the text with the help of OpenAI and sends the result back to this app where it
11is finally presented to the user.
12
8081const API_CONFIG = {
82url: "https://willthereader-openaidefiner.web.val.run",
83method: "POST",
84mode: "cors",
healthtech4africamain.tsx3 matches
159160export default async function server(request: Request): Promise<Response> {
161const { OpenAI } = await import("https://esm.town/v/std/openai");
162const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
163const openai = new OpenAI();
164165const SCHEMA_VERSION = 2;
230}
231232const completion = await openai.chat.completions.create({
233messages: [
234{ role: "system", content: systemMessage },
email_channelREADME.md1 match
3This val creates an email address that posts forwarded emails to a [Campsite](https://campsite.com) channel.
45It uses GPT-4 to extract a readable version of the forwarded email from the raw body. If you don't want to use GPT-4, omit the `OPENAI_API_KEY` and the raw body will be included in the post. Other providers are available via [Vercel's AI SDK](https://sdk.vercel.ai/docs/introduction#model-providers).
67For help with creating integrations, check out the [Campsite API docs](https://app.campsite.com/campsite/p/notes/campsite-api-docs-l07d7gm5n5rm). You'll need to create an integration and get an API key.
weatherGPTmain.tsx3 matches
1import { email } from "https://esm.town/v/std/email?v=11";
2import { OpenAI } from "npm:openai";
34let location = "brooklyn ny";
8).then(r => r.json());
910const openai = new OpenAI();
11let chatCompletion = await openai.chat.completions.create({
12messages: [{
13role: "user",
GDI_AIChatCompletionServicemain.tsx3 matches
1import { OpenAI } from "https://esm.town/v/std/openai";
23export default async function (req: Request): Promise<Response> {
19}
2021const openai = new OpenAI();
22const completion = await openai.chat.completions.create({
23model: "gpt-4o-mini",
24messages: [