Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/...?q=openai&page=32&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 2265 results for "openai"(2261ms)

aiTodoAppai.ts6 matches

@dukky•Updated 2 weeks ago
1import { Hono } from "https://esm.sh/hono@3.11.7";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import type { AIPrioritizeAllResponse, AIPriorityResponse } from "../../shared/types.ts";
4import { getAllTodos, updateTodoAIPriority } from "../database/queries.ts";
5
6const ai = new Hono();
7const openai = new OpenAI();
8
9// Get AI priority suggestion for a single todo
35`;
36
37 const completion = await openai.chat.completions.create({
38 messages: [{ role: "user", content: prompt }],
39 model: "gpt-4o-mini",
44 const response = completion.choices[0]?.message?.content;
45 if (!response) {
46 throw new Error("No response from OpenAI");
47 }
48
100`;
101
102 const completion = await openai.chat.completions.create({
103 messages: [{ role: "user", content: prompt }],
104 model: "gpt-4o-mini",
109 const response = completion.choices[0]?.message?.content;
110 if (!response) {
111 throw new Error("No response from OpenAI");
112 }
113

pineconenew-file-6541.tsx41 matches

@mees•Updated 2 weeks ago
1// MEES Assistant API Endpoint with v2 API
2// Remember to add your environment variables in Val settings:
3// - OPENAI_API_KEY
4// - PINECONE_API_KEY
5// - ASSISTANT_ID
6// - PARSER_ASSISTANT_ID (optional)
7
8import { OpenAI } from "https://deno.land/x/openai@v4.20.1/mod.ts";
9import { Pinecone } from "https://esm.sh/@pinecone-database/pinecone@2.0.0";
10
11// Use OpenAI client only for embeddings
12const openai = new OpenAI({
13 apiKey: Deno.env.get("OPENAI_API_KEY"),
14});
15
42async function parseQueryIntent(query: string) {
43 try {
44 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
45
46 // Create a quick thread for parsing
47 const threadResponse = await fetch("https://api.openai.com/v1/threads", {
48 method: "POST",
49 headers: {
50 "Authorization": `Bearer ${OPENAI_API_KEY}`,
51 "Content-Type": "application/json",
52 "OpenAI-Beta": "assistants=v2",
53 },
54 body: JSON.stringify({}),
58
59 // Add the query
60 await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages`, {
61 method: "POST",
62 headers: {
63 "Authorization": `Bearer ${OPENAI_API_KEY}`,
64 "Content-Type": "application/json",
65 "OpenAI-Beta": "assistants=v2",
66 },
67 body: JSON.stringify({
72
73 // Run the parser assistant
74 const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
75 method: "POST",
76 headers: {
77 "Authorization": `Bearer ${OPENAI_API_KEY}`,
78 "Content-Type": "application/json",
79 "OpenAI-Beta": "assistants=v2",
80 },
81 body: JSON.stringify({
100
101 const statusResponse = await fetch(
102 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
103 {
104 headers: {
105 "Authorization": `Bearer ${OPENAI_API_KEY}`,
106 "OpenAI-Beta": "assistants=v2",
107 },
108 },
114 // Get the response
115 const messagesResponse = await fetch(
116 `https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=1`,
117 {
118 headers: {
119 "Authorization": `Bearer ${OPENAI_API_KEY}`,
120 "OpenAI-Beta": "assistants=v2",
121 },
122 },
156
157 // Get embedding for query
158 const embeddingResponse = await openai.embeddings.create({
159 model: "text-embedding-3-small",
160 input: query,
327 });
328
329 // Step 4: Format data for OpenAI
330 debugInfo += `\n\nšŸ“¦ STEP 4: DATA FORMATTING FOR OPENAI\n${"-".repeat(40)}\n`;
331
332 const formattedResults = searchResults.slice(0, 10).map((match) => {
336 const mediaInText = extractMediaFromText(meta.text || "");
337
338 // Truncate text for OpenAI
339 let textContent = meta.text || "";
340 const maxTextLength = 500;
368 });
369
370 debugInfo += `āœ… Formatted ${formattedResults.length} results for OpenAI\n`;
371 debugInfo += `šŸ“„ Sample formatted result:\n${
372 JSON.stringify(formattedResults[0], null, 2).substring(0, 800)
542 }
543
544 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
545 if (!OPENAI_API_KEY) {
546 throw new Error("OPENAI_API_KEY not found in environment variables");
547 }
548
551 }
552
553 // IMPORTANT: All OpenAI API calls use these headers with assistants=v2
554 const baseHeaders = {
555 "Authorization": `Bearer ${OPENAI_API_KEY}`,
556 "Content-Type": "application/json",
557 "OpenAI-Beta": "assistants=v2",
558 };
559
560 console.log("Assistant ID:", ASSISTANT_ID);
561 console.log("Using OpenAI-Beta: assistants=v2");
562
563 // Parse the query first (if parser assistant is configured)
594 let thread;
595 if (threadId) {
596 const threadResponse = await fetch(`https://api.openai.com/v1/threads/${threadId}`, {
597 headers: baseHeaders,
598 });
602 thread = await threadResponse.json();
603 } else {
604 const threadResponse = await fetch("https://api.openai.com/v1/threads", {
605 method: "POST",
606 headers: baseHeaders,
627
628 // Add message to thread with v2 headers
629 const messageResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages`, {
630 method: "POST",
631 headers: baseHeaders,
641
642 // Run the assistant with v2 headers
643 const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
644 method: "POST",
645 headers: baseHeaders,
693 const mediaInText = extractMediaFromText(meta.text || "");
694
695 // OPTIMIZATION: Only send text excerpts to OpenAI
696 let textContent = meta.text || "";
697 const maxTextLength = 500;
876
877 const statusResponse = await fetch(
878 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
879 { headers: baseHeaders },
880 );
905 // Submit tool outputs with v2 headers
906 const toolOutputResponse = await fetch(
907 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}/submit_tool_outputs`,
908 {
909 method: "POST",
929 // Get the assistant's response with v2 headers
930 const messagesResponse = await fetch(
931 `https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=20`,
932 { headers: baseHeaders },
933 );

claudeGeneratedTODOmain.tsx3 matches

@dukky•Updated 2 weeks ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import React, { useEffect, useState } from "npm:react";
3
4const openai = new OpenAI();
5
6interface Todo {
85`;
86
87 const response = await openai.chat.completions.create({
88 model: "gpt-4",
89 messages: [{ role: "user", content: prompt }],

Contextualfork-thought.ts6 matches

@c15r•Updated 2 weeks ago
1import type { MCPTool, ThoughtFork, ThoughtExploration } from "../../../shared/types.ts";
2import type { AuthContext } from "../auth.ts";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import {
5 createThoughtFork,
12} from "../../database/queries.ts";
13
14const openai = new OpenAI();
15
16export const forkThoughtTools: MCPTool[] = [
152
153 try {
154 const completion = await openai.chat.completions.create({
155 messages: [
156 {
224
225 try {
226 const completion = await openai.chat.completions.create({
227 messages: [
228 {
275async function extractInsights(content: string): Promise<string[]> {
276 try {
277 const completion = await openai.chat.completions.create({
278 messages: [
279 {
360
361 try {
362 const completion = await openai.chat.completions.create({
363 messages: [
364 {

aiTodoAppApp.tsx1 match

@dukky•Updated 2 weeks ago
158
159 <footer className="mt-12 text-center text-gray-500 text-sm">
160 <p>Built with ā¤ļø on Val Town • Powered by OpenAI</p>
161 </footer>
162 </div>

aiTodoAppREADME.md1 match

@dukky•Updated 2 weeks ago
6
7- āœ… Create, read, update, and delete TODO items
8- šŸ¤– AI-powered task prioritization using OpenAI
9- šŸ“± Responsive web interface
10- šŸ’¾ SQLite database storage

TrdingXindex.ts3 matches

@Kkkhan•Updated 2 weeks ago
446 if (request.method === 'POST' && new URL(request.url).pathname === '/ai-analysis') {
447 try {
448 const { OpenAI } = await import("https://esm.town/v/std/openai");
449
450 const body: AIAnalysisRequest = await request.json();
460 }
461
462 const openai = new OpenAI();
463
464 const prompt = `Tu ek expert crypto trader aur market analyst hai.
476 Creative aur informative hinglish mein jawab do. Agar user ne koi specific coin mention kiya hai to uske baare mein detailed analysis do.`;
477
478 const completion = await openai.chat.completions.create({
479 messages: [{ role: "user", content: prompt }],
480 model: "gpt-4o-mini",

Glancer.cursorrules4 matches

@lightweight•Updated 2 weeks ago
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },

Towniesystem_prompt.txt4 matches

@chadparker•Updated 2 weeks ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

Townie.cursorrules4 matches

@chadparker•Updated 2 weeks ago
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },

openai-client4 file matches

@cricks_unmixed4u•Updated 18 hours ago

openai_enrichment6 file matches

@stevekrouse•Updated 1 week ago
kwhinnery_openai
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp