email_channelmain.tsx5 matches
1import { email } from "https://esm.town/v/std/email";
2import { createOpenAI } from "npm:@ai-sdk/openai";
3import { generateObject } from "npm:ai";
4import Campsite from "npm:campsite-client";
15// Other providers are available via Vercel's AI SDK:
16// https://sdk.vercel.ai/docs/introduction#model-providers
17const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
1819const campsite = new Campsite({ apiKey: CAMPSITE_API_KEY });
29let subject = payload.subject || "Forwarded email (no subject)";
3031if (OPENAI_API_KEY) {
32const openai = createOpenAI({ apiKey: OPENAI_API_KEY });
3334// Extract name and email from the forwarded message
35const { object: { name, email, subject, message } } = await generateObject({
36model: openai("gpt-4-turbo"),
37schema: z.object({
38name: z.string(),
flutteringVioletBirdmain.tsx1 match
12setIsLoading(true);
13try {
14const response = await fetch("https://esm.town/v/stevekrouse/openai", {
15method: "POST",
16headers: { "Content-Type": "application/json" },
8* Hono JSX
9* HTMX (probably overcomplicates things; should remove)
10* @stevekrouse/openai, which is a light wrapper around @std/openai
1112I'm finding HTMX a bit overpowered for this, so I have two experimental forks without it:
randomTextGeneratormain.tsx3 matches
7475if (url.searchParams.has("paragraphs")) {
76const { OpenAI } = await import("https://esm.town/v/std/openai");
77const openai = new OpenAI();
7879const completion = await openai.chat.completions.create({
80messages: [
81{ role: "system", content: "You are a random text generator. Generate coherent, interesting text without any specific theme." },
superiorHarlequinUrialmain.tsx3 matches
1import { OpenAI } from "https://esm.town/v/std/openai";
23export default async function(req: Request): Promise<Response> {
11});
12}
13const openai = new OpenAI();
1415try {
28}
2930const stream = await openai.chat.completions.create(body);
3132if (!body.stream) {
23import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
4import { OpenAI } from "https://esm.town/v/std/openai";
5import { blob } from "https://esm.town/v/std/blob";
67const openai = new OpenAI();
89// Task structure
98`;
99100const completion = await openai.chat.completions.create({
101messages: [{ role: "user", content: prompt }],
102model: "gpt-4o-mini",
6* Fork this val to your own profile.
7* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
8* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
9* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).
10* Create a [Val Town API token](https://www.val.town/settings/api), open the browser preview of this val, and use the API token as the password to log in.
handsomeMagentaStoatREADME.md13 matches
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
23Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
45For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
67## Basic Usage
89```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
1112const openai = new OpenAI();
1314const completion = await openai.chat.completions.create({
15messages: [
16{ role: "user", content: "Say hello in a creative way" },
58## Limits
5960While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
6162* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
7071```ts title="Example" val
72import { OpenAI } from "npm:openai";
7374const openai = new OpenAI();
75```
767778[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
handsomeMagentaStoatmain.tsx10 matches
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
23/**
4* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5*/
6export class OpenAI {
7private rawOpenAIClient: RawOpenAI;
89/**
10* API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11*
12* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19*/
20constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21this.rawOpenAIClient = new RawOpenAI({
22...options,
23baseURL: "https://std-openaiproxy.web.val.run/v1",
24apiKey: Deno.env.get("valtown"),
25organization: null,
2829get chat() {
30return this.rawOpenAIClient.chat;
31}
3233readonly beta = {
34get chat(): RawOpenAI["beta"]["chat"] {
35return this.rawOpenAIClient.beta.chat;
36}
37}
2import { Hono } from "npm:hono@3";
3import { cors } from "npm:hono/cors";
4import { createOpenAI } from "npm:@ai-sdk/openai";
5import { createAnthropic } from "npm:@ai-sdk/anthropic@0.0.48";
6import { google, createGoogleGenerativeAI } from 'npm:@ai-sdk/google';
30});
3132const openai = createOpenAI({
33// apiKey = Deno.env.get("OPENAI_API_KEY");
34apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET")
35});
363738const groq = createOpenAI({
39baseURL: 'https://api.groq.com/openai/v1',
40apiKey: Deno.env.get("GROQ_API_KEY"),
41});
4243const perplexity = createOpenAI({
44apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? '',
45baseURL: 'https://api.perplexity.ai/',
57this.memories = options.memories || [];
58this.messages = options.messages || [];
59this.defaultProvider = options.provider || 'openai';
60this.defaultModel = options.model;
61this.defaultMaxTokens = options.maxTokens;
122let result;
123switch (provider) {
124case 'openai':
125result = await this.generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, ...additionalSettings });
126break;
127case 'anthropic':
171}
172173async generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, embed, value, dimensions, user, ...additionalSettings }) {
174const modelId = model || 'gpt-3.5-turbo';
175176if (embed) {
177let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
178// console.log('embed!', result)
179return result
181182const options = {
183model: openai(modelId),
184system,
185temperature,
235}
236237async generateOpenAIEmbedding({ model, value, dimensions, user }) {
238const modelId = model || 'text-embedding-3-large';
239const options = {
240model: openai.embedding(modelId, {
241dimensions,
242user,
491492app.get('/generate', async (c) => {
493const provider = c.req.query('provider') || 'openai';
494const model = c.req.query('model');
495const prompt = c.req.query('prompt');
523console.log("post/generate", { mode: 'post/generate', prompt, provider, model });
524const response = await modelProvider.gen({
525provider: provider || 'openai',
526model,
527prompt,