Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=100&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1603 results for "openai"(1490ms)

email_channelmain.tsx5 matches

@campsiteUpdated 6 months ago
1import { email } from "https://esm.town/v/std/email";
2import { createOpenAI } from "npm:@ai-sdk/openai";
3import { generateObject } from "npm:ai";
4import Campsite from "npm:campsite-client";
15// Other providers are available via Vercel's AI SDK:
16// https://sdk.vercel.ai/docs/introduction#model-providers
17const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
18
19const campsite = new Campsite({ apiKey: CAMPSITE_API_KEY });
29 let subject = payload.subject || "Forwarded email (no subject)";
30
31 if (OPENAI_API_KEY) {
32 const openai = createOpenAI({ apiKey: OPENAI_API_KEY });
33
34 // Extract name and email from the forwarded message
35 const { object: { name, email, subject, message } } = await generateObject({
36 model: openai("gpt-4-turbo"),
37 schema: z.object({
38 name: z.string(),

flutteringVioletBirdmain.tsx1 match

@stevekrouseUpdated 6 months ago
12 setIsLoading(true);
13 try {
14 const response = await fetch("https://esm.town/v/stevekrouse/openai", {
15 method: "POST",
16 headers: { "Content-Type": "application/json" },

flutteringVioletBirdREADME.md1 match

@stevekrouseUpdated 6 months ago
8* Hono JSX
9* HTMX (probably overcomplicates things; should remove)
10* @stevekrouse/openai, which is a light wrapper around @std/openai
11
12I'm finding HTMX a bit overpowered for this, so I have two experimental forks without it:

randomTextGeneratormain.tsx3 matches

@aleafUpdated 6 months ago
74
75 if (url.searchParams.has("paragraphs")) {
76 const { OpenAI } = await import("https://esm.town/v/std/openai");
77 const openai = new OpenAI();
78
79 const completion = await openai.chat.completions.create({
80 messages: [
81 { role: "system", content: "You are a random text generator. Generate coherent, interesting text without any specific theme." },

superiorHarlequinUrialmain.tsx3 matches

@junkerman2004Updated 6 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3export default async function(req: Request): Promise<Response> {
11 });
12 }
13 const openai = new OpenAI();
14
15 try {
28 }
29
30 const stream = await openai.chat.completions.create(body);
31
32 if (!body.stream) {

telegramWebhookEchoMessageOLDmain.tsx3 matches

@dcm31Updated 6 months ago
2
3import { telegramSendMessage } from "https://esm.town/v/vtdocs/telegramSendMessage?v=5";
4import { OpenAI } from "https://esm.town/v/std/openai";
5import { blob } from "https://esm.town/v/std/blob";
6
7const openai = new OpenAI();
8
9// Task structure
98 `;
99
100 const completion = await openai.chat.completions.create({
101 messages: [{ role: "user", content: prompt }],
102 model: "gpt-4o-mini",

VALLEREADME.md1 match

@alanzouUpdated 6 months ago
6* Fork this val to your own profile.
7* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
8* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
9* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).
10* Create a [Val Town API token](https://www.val.town/settings/api), open the browser preview of this val, and use the API token as the password to log in.

handsomeMagentaStoatREADME.md13 matches

@wangqiao1234Updated 6 months ago
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Basic Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
58## Limits
59
60While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
61
62* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66
671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
70
71```ts title="Example" val
72import { OpenAI } from "npm:openai";
73
74const openai = new OpenAI();
75```
76
77
78[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)

handsomeMagentaStoatmain.tsx10 matches

@wangqiao1234Updated 6 months ago
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 readonly beta = {
34 get chat(): RawOpenAI["beta"]["chat"] {
35 return this.rawOpenAIClient.beta.chat;
36 }
37 }

aimain.tsx17 matches

@yawnxyzUpdated 6 months ago
2import { Hono } from "npm:hono@3";
3import { cors } from "npm:hono/cors";
4import { createOpenAI } from "npm:@ai-sdk/openai";
5import { createAnthropic } from "npm:@ai-sdk/anthropic@0.0.48";
6import { google, createGoogleGenerativeAI } from 'npm:@ai-sdk/google';
30});
31
32const openai = createOpenAI({
33 // apiKey = Deno.env.get("OPENAI_API_KEY");
34 apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET")
35});
36
37
38const groq = createOpenAI({
39 baseURL: 'https://api.groq.com/openai/v1',
40 apiKey: Deno.env.get("GROQ_API_KEY"),
41});
42
43const perplexity = createOpenAI({
44 apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? '',
45 baseURL: 'https://api.perplexity.ai/',
57 this.memories = options.memories || [];
58 this.messages = options.messages || [];
59 this.defaultProvider = options.provider || 'openai';
60 this.defaultModel = options.model;
61 this.defaultMaxTokens = options.maxTokens;
122 let result;
123 switch (provider) {
124 case 'openai':
125 result = await this.generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, ...additionalSettings });
126 break;
127 case 'anthropic':
171 }
172
173 async generateOpenAIResponse({ model, prompt, maxTokens, temperature, streaming, schema, system, messages, tools, embed, value, dimensions, user, ...additionalSettings }) {
174 const modelId = model || 'gpt-3.5-turbo';
175
176 if (embed) {
177 let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
178 // console.log('embed!', result)
179 return result
181
182 const options = {
183 model: openai(modelId),
184 system,
185 temperature,
235 }
236
237 async generateOpenAIEmbedding({ model, value, dimensions, user }) {
238 const modelId = model || 'text-embedding-3-large';
239 const options = {
240 model: openai.embedding(modelId, {
241 dimensions,
242 user,
491
492app.get('/generate', async (c) => {
493 const provider = c.req.query('provider') || 'openai';
494 const model = c.req.query('model');
495 const prompt = c.req.query('prompt');
523 console.log("post/generate", { mode: 'post/generate', prompt, provider, model });
524 const response = await modelProvider.gen({
525 provider: provider || 'openai',
526 model,
527 prompt,

translateToEnglishWithOpenAI1 file match

@shlmtUpdated 1 day ago

testOpenAI1 file match

@stevekrouseUpdated 3 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",