Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=88&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1614 results for "openai"(547ms)

compassionateBlackCatfishmain.tsx3 matches

@gigmxUpdated 4 months ago
1import OpenAI from "https://esm.sh/openai@4.28.4";
2
3export default async function(req: Request): Promise<Response> {
309 try {
310 const LEPTON_API_TOKEN = Deno.env.get('LEPTON_API_TOKEN') || '';
311 const openai = new OpenAI({
312 apiKey: LEPTON_API_TOKEN,
313 baseURL: 'https://llama3-3-70b.lepton.run/api/v1/',
329 };
330
331 const response = await openai.chat.completions.create(apiConfig);
332
333 return new Response(JSON.stringify(response), {

openAIStreamingExamplemain.tsx3 matches

@peterqliuUpdated 4 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2export default async function(req: Request): Promise<Response> {
3 const openai = new OpenAI();
4 const stream = await openai.chat.completions.create({
5 stream: true,
6 messages: [{ role: "user", content: "Write a poem in the style of beowulf about the DMV" }],

generateFunctionmain.tsx3 matches

@wolfUpdated 4 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3function extractCode(str: string): string {
19 }
20
21 const openai = new OpenAI();
22 const prompt =
23 `Generate a TypeScript function named "${functionName}" with the following parameters: ${parameters}. ONLY RETURN VALID JAVASCRIPT DO NOT INCLUDE ANY CONTEXT.`;
24 const completion = await openai.chat.completions.create({
25 messages: [
26 {

laudableCrimsonCoyotemain.tsx4 matches

@slinfoUpdated 4 months ago
168 );
169
170 const { OpenAI } = await import("https://esm.town/v/std/openai");
171 const openai = new OpenAI();
172
173 try {
174 const imageAnalysis = await withTimeout(openai.chat.completions.create({
175 model: "gpt-4o",
176 messages: [
199 : '';
200
201 const story = await withTimeout(openai.chat.completions.create({
202 model: "gpt-4o-mini",
203 messages: [

Storyweavermain.tsx4 matches

@slinfoUpdated 4 months ago
168 );
169
170 const { OpenAI } = await import("https://esm.town/v/std/openai");
171 const openai = new OpenAI();
172
173 try {
174 const imageAnalysis = await withTimeout(openai.chat.completions.create({
175 model: "gpt-4o",
176 messages: [
199 : '';
200
201 const story = await withTimeout(openai.chat.completions.create({
202 model: "gpt-4o-mini",
203 messages: [

interview_practicemain.tsx3 matches

@slinfoUpdated 4 months ago
235 const { intervieweeResponse, interviewPosition } = await request.json();
236
237 const { OpenAI } = await import("https://esm.town/v/std/openai");
238 const openai = new OpenAI();
239
240 const completion = await openai.chat.completions.create({
241 messages: [
242 {

OpenAIREADME.md14 matches

@ianmenethilUpdated 4 months ago
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5For free Val Town users, [all calls are sent to `gpt-4o-mini`](https://www.val.town/v/std/openaiproxy?v=12#L85).
6
7## Basic Usage
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
58## Limits
59
60While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
61
62* **Usage Quota**: We limit each user to 10 requests per minute.
65If these limits are too low, let us know! You can also get around the limitation by using your own keys:
66
671. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
682. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
693. Use the `OpenAI` client from `npm:openai`:
70
71```ts title="Example" val
72import { OpenAI } from "npm:openai";
73
74const openai = new OpenAI();
75```
76
77
78[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
79
80Migrated from folder: Examples/OpenAI

OpenAImain.tsx9 matches

@ianmenethilUpdated 4 months ago
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 get beta() {
34 return {
35 chat: this.rawOpenAIClient.beta.chat,
36 };
37 }

ai_bounty_findermain.tsx4 matches

@thatsmeadarshUpdated 4 months ago
1import readline from "node:readline";
2import { createOpenAI } from "npm:@ai-sdk/openai";
3import { type CoreMessage, streamText } from "npm:ai";
4import { configDotenv } from "npm:dotenv";
9const toolKit = new VercelAIToolkit();
10const messages: CoreMessage[] = [];
11const openai = createOpenAI({
12 apiKey: Deno.env.get("OPENAI_API_KEY") || "",
13});
14
24
25 const result = streamText({
26 model: openai("gpt-4o"),
27 tools,
28 maxSteps: 10,

flowingBeigePigeonmain.tsx5 matches

@lilymachadoUpdated 4 months ago
71 setIsAnalyzing(true);
72 try {
73 // Dynamically import OpenAI to ensure server-side compatibility
74 const { OpenAI } = await import("https://esm.town/v/std/openai");
75 const openai = new OpenAI();
76
77 // Detailed AI prompt for comprehensive analysis
78 const response = await openai.chat.completions.create({
79 model: "gpt-4o",
80 messages: [
152
153 // Generate narrative description
154 const narrativeResponse = await openai.chat.completions.create({
155 model: "gpt-4o",
156 messages: [

translateToEnglishWithOpenAI1 file match

@shlmtUpdated 3 days ago

testOpenAI1 file match

@stevekrouseUpdated 5 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",