Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bsuccess?q=openai&page=46&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 3217 results for "openai"(1520ms)

openaiproxyusage.ts6 matches

@a01•Updated 1 month ago
17 COUNT(*) AS count
18 FROM
19 openai_usage,
20 params
21 WHERE
38};
39
40export class OpenAIUsage {
41 constructor() {}
42 async migrate() {
43 await sqlite.batch([`CREATE TABLE IF NOT EXISTS openai_usage (
44 id INTEGER PRIMARY KEY,
45 user_id TEXT NOT NULL,
52 }
53 async drop() {
54 await sqlite.batch([`DROP TABLE IF EXISTS openai_usage`]);
55 }
56 async writeUsage(ur: UsageRow) {
57 sqlite.execute({
58 sql:
59 "INSERT INTO openai_usage (user_id, handle, tier, tokens, model) VALUES (?, ?, ?, ?, ?)",
60 args: [ur.userId, ur.handle, ur.tier, ur.tokens, ur.model],
61 });
79 let resp = await sqlite.execute({
80 sql: `SELECT count(*)
81FROM openai_usage
82WHERE (
83 (model LIKE 'gpt-4%' AND model NOT LIKE '%mini%')

openaiproxyshell.ts2 matches

@a01•Updated 1 month ago
1import { OpenAIUsage } from "https://esm.town/v/std/OpenAIUsage";
2
3const client = new OpenAIUsage();
4
5const t0 = performance.now();

openaiproxyREADME.md3 matches

@a01•Updated 1 month ago
1# OpenAI Proxy
2
3This OpenAI API proxy injects Val Town's API keys. For usage documentation,
4check out https://www.val.town/v/std/openai
5
6Adapted from https://blog.r0b.io/post/creating-a-proxy-with-deno/

openaiproxymain.ts8 matches

@a01•Updated 1 month ago
1import { parseBearerString } from "https://esm.town/v/andreterron/parseBearerString";
2import { API_URL } from "https://esm.town/v/std/API_URL?v=5";
3import { OpenAIUsage } from "./usage.ts";
4const client = new OpenAIUsage();
5
6const allowedPathnames = [
30
31 // Proxy the request
32 const url = new URL("." + pathname, "https://api.openai.com");
33 url.search = search;
34
35 const headers = new Headers(req.headers);
36 headers.set("Host", url.hostname);
37 headers.set("Authorization", `Bearer ${Deno.env.get("OPENAI_API_KEY")}`);
38 headers.set("OpenAI-Organization", Deno.env.get("OPENAI_API_ORG"));
39
40 const modifiedBody = await limitFreeModel(req, user);
51 });
52
53 const openAIRes = await fetch(url, {
54 method: req.method,
55 headers,
59
60 // Remove internal header
61 const res = new Response(openAIRes.body, openAIRes);
62 res.headers.delete("openai-organization");
63 return res;
64}

mike-webaimlapi.ts1 match

@devmanmikey•Updated 1 month ago
1// AIMLAPI client (OpenAI-compatible) for Bagoodex Web Search
2// Server-only module: do not import in client code
3

Moldmain.tsx3 matches

@join•Updated 1 month ago
2// This version asks the user for additional context to provide a more comprehensive report.
3
4import { OpenAI } from "https://esm.town/v/std/openai";
5import { Hono } from "npm:hono@4.4.12";
6import type { Context } from "npm:hono@4.4.12";
317 `.trim();
318
319 const openai = new OpenAI();
320 const response = await openai.chat.completions.create({
321 model: "gpt-4o",
322 messages: [

re-searchmain.js4 matches

@yawnxyz•Updated 1 month ago
12// Settings configuration
13const settings = {
14 model: 'openai/gpt-oss-120b',
15 stream: false,
16 reasoningEffort: 'low'
190 language,
191 offset = 0,
192 model = 'openai/gpt-oss-120b',
193 reasoning_effort = 'medium',
194 tools = [{ type: 'browser_search' }],
317 language,
318 offset = 0,
319 model = 'openai/gpt-oss-120b',
320 reasoning_effort = 'medium',
321 tools = [{ type: 'browser_search' }],
383 start: async (controller) => {
384 try {
385 const upstream = await fetch("https://api.groq.com/openai/v1/chat/completions", {
386 method: 'POST',
387 headers: {

re-searchchatStreaming.js2 matches

@yawnxyz•Updated 1 month ago
6import "jsr:@std/dotenv/load";
7
8const API_URL = "https://api.groq.com/openai/v1/chat/completions";
9
10function getApiKey() {
28 },
29 ],
30 model: "openai/gpt-oss-120b",
31 stream: true,
32 reasoning_effort: "medium",

re-searchchatCompletion.js2 matches

@yawnxyz•Updated 1 month ago
3export async function groqChatCompletion(apiKey, payload) {
4 console.log('>>> [groqChatCompletion] Payload:', payload);
5 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
6 method: 'POST',
7 headers: {
53 try {
54 const res = await groqChatCompletion(apiKey, {
55 model: 'openai/gpt-oss-120b',
56 messages: [
57 { role: 'system', content: 'Classify the user request as either links or text. Respond with a single token: links or text. Use links if the user appears to want a search results list of sources; use text if the user is asking for an explanation/summary/definition.' },

re-searchjsonUtils.js10 matches

@yawnxyz•Updated 1 month ago
2// Converts arbitrary text into a strict JSON object and returns the `results` array
3export async function extractOrRepairJsonResults(rawText, apiKey, language = 'english') {
4 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
5 method: 'POST',
6 headers: {
9 },
10 body: JSON.stringify({
11 model: 'openai/gpt-oss-120b',
12 messages: [
13 {
75// This helper instructs the model to ONLY use URLs that appear in the provided text and never invent new ones.
76export async function extractResultsFromToolOutputs(toolText, apiKey, language = 'english') {
77 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
78 method: 'POST',
79 headers: {
82 },
83 body: JSON.stringify({
84 model: 'openai/gpt-oss-120b',
85 messages: [
86 {
162// Structured summary extraction to eliminate citation artifacts and enforce clean fields
163export async function extractStructuredSummary(rawText, apiKey) {
164 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
165 method: 'POST',
166 headers: {
169 },
170 body: JSON.stringify({
171 model: 'openai/gpt-oss-120b',
172 messages: [
173 {
293 if (reasoningText && String(reasoningText).trim()) parts.push('Reasoning text:\n' + String(reasoningText));
294
295 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
296 method: 'POST',
297 headers: {
300 },
301 body: JSON.stringify({
302 model: 'openai/gpt-oss-120b',
303 messages: [
304 {
380 if (reasoningText && String(reasoningText).trim()) parts.push('Reasoning text:\n' + String(reasoningText));
381
382 const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
383 method: 'POST',
384 headers: {
387 },
388 body: JSON.stringify({
389 model: 'openai/gpt-oss-120b',
390 messages: [
391 {

hello-realtime5 file matches

@jubertioai•Updated 2 days ago
Sample app for the OpenAI Realtime API

openai-gemini1 file match

@ledudu•Updated 1 week ago
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp
kwhinnery_openai