Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg%20%22Image%20title%22?q=openai&page=125&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 3227 results for "openai"(3396ms)

EgyptmeesMediaFetcher.tsx4 matches

@mees•Updated 3 months ago
2 const PINECONE_API_KEY = Deno.env.get("PINECONE_API_KEY");
3 const PINECONE_HOST = Deno.env.get("PINECONE_HOST");
4 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
5
6 console.log("PINECONE_HOST:", PINECONE_HOST);
7 console.log("Has API keys:", !!PINECONE_API_KEY, !!OPENAI_API_KEY);
8
9 try {
10 // Get embedding
11 const embResponse = await fetch("https://api.openai.com/v1/embeddings", {
12 method: "POST",
13 headers: {
14 "Authorization": `Bearer ${OPENAI_API_KEY}`,
15 "Content-Type": "application/json",
16 },

EgyptmeesSearchEngine.tsx4 matches

@mees•Updated 3 months ago
2export const searchEngine = {
3 async search(parsedQuery: any, originalQuery: string) {
4 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
5 const PINECONE_API_KEY = Deno.env.get("PINECONE_API_KEY");
6 const PINECONE_HOST = Deno.env.get("PINECONE_HOST");
18 console.log("Wants latest?", wantsLatest);
19
20 const embedding = await searchEngine.getEmbedding(originalQuery, OPENAI_API_KEY);
21
22 const filter = {};
94
95 async getEmbedding(text: string, apiKey: string) {
96 const response = await fetch("https://api.openai.com/v1/embeddings", {
97 method: "POST",
98 headers: {
108 if (!response.ok) {
109 const error = await response.text();
110 console.error("OpenAI embedding error:", error);
111 throw new Error(`Embedding generation failed: ${response.status}`);
112 }

EgyptmeesAnswerBot.tsx5 matches

@mees•Updated 3 months ago
4export const answerBot = {
5 async generateAnswer(query: string, searchResults: any, threadId?: string) {
6 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
7
8 // Get or create thread
80 ];
81
82 const response = await fetch("https://api.openai.com/v1/chat/completions", {
83 method: "POST",
84 headers: {
85 "Authorization": `Bearer ${OPENAI_API_KEY}`,
86 "Content-Type": "application/json",
87 },
98 if (!response.ok) {
99 const error = await response.text();
100 console.error("OpenAI API error:", error);
101 throw new Error(`OpenAI API failed: ${response.status}`);
102 }
103

EgyptmeesQueryParser.tsx3 matches

@mees•Updated 3 months ago
1export const queryParser = {
2 async parse(query: string) {
3 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
4
5 const response = await fetch("https://api.openai.com/v1/chat/completions", {
6 method: "POST",
7 headers: {
8 "Authorization": `Bearer ${OPENAI_API_KEY}`,
9 "Content-Type": "application/json",
10 },

EgypttestPinecone.tsx4 matches

@mees•Updated 3 months ago
2 const PINECONE_API_KEY = Deno.env.get("PINECONE_API_KEY");
3 const PINECONE_HOST = Deno.env.get("PINECONE_HOST");
4 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
5
6 console.log("PINECONE_HOST:", PINECONE_HOST);
7 console.log("Has API keys:", !!PINECONE_API_KEY, !!OPENAI_API_KEY);
8
9 try {
10 // Get embedding
11 const embResponse = await fetch("https://api.openai.com/v1/embeddings", {
12 method: "POST",
13 headers: {
14 "Authorization": `Bearer ${OPENAI_API_KEY}`,
15 "Content-Type": "application/json",
16 },

untitled-7983main.tsx3 matches

@dukky•Updated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2
3const openai = new OpenAI();
4
5const completion = await openai.chat.completions.create({
6 messages: [
7 { role: "user", content: "What are some interesting walks in Hertfordshire, starting in Hemel" },

aiTodoAppai.ts6 matches

@dukky•Updated 3 months ago
1import { Hono } from "https://esm.sh/hono@3.11.7";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import type { AIPrioritizeAllResponse, AIPriorityResponse } from "../../shared/types.ts";
4import { getAllTodos, updateTodoAIPriority } from "../database/queries.ts";
5
6const ai = new Hono();
7const openai = new OpenAI();
8
9// Get AI priority suggestion for a single todo
35`;
36
37 const completion = await openai.chat.completions.create({
38 messages: [{ role: "user", content: prompt }],
39 model: "gpt-4o-mini",
44 const response = completion.choices[0]?.message?.content;
45 if (!response) {
46 throw new Error("No response from OpenAI");
47 }
48
100`;
101
102 const completion = await openai.chat.completions.create({
103 messages: [{ role: "user", content: prompt }],
104 model: "gpt-4o-mini",
109 const response = completion.choices[0]?.message?.content;
110 if (!response) {
111 throw new Error("No response from OpenAI");
112 }
113

pineconenew-file-6541.tsx41 matches

@mees•Updated 3 months ago
1// MEES Assistant API Endpoint with v2 API
2// Remember to add your environment variables in Val settings:
3// - OPENAI_API_KEY
4// - PINECONE_API_KEY
5// - ASSISTANT_ID
6// - PARSER_ASSISTANT_ID (optional)
7
8import { OpenAI } from "https://deno.land/x/openai@v4.20.1/mod.ts";
9import { Pinecone } from "https://esm.sh/@pinecone-database/pinecone@2.0.0";
10
11// Use OpenAI client only for embeddings
12const openai = new OpenAI({
13 apiKey: Deno.env.get("OPENAI_API_KEY"),
14});
15
42async function parseQueryIntent(query: string) {
43 try {
44 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
45
46 // Create a quick thread for parsing
47 const threadResponse = await fetch("https://api.openai.com/v1/threads", {
48 method: "POST",
49 headers: {
50 "Authorization": `Bearer ${OPENAI_API_KEY}`,
51 "Content-Type": "application/json",
52 "OpenAI-Beta": "assistants=v2",
53 },
54 body: JSON.stringify({}),
58
59 // Add the query
60 await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages`, {
61 method: "POST",
62 headers: {
63 "Authorization": `Bearer ${OPENAI_API_KEY}`,
64 "Content-Type": "application/json",
65 "OpenAI-Beta": "assistants=v2",
66 },
67 body: JSON.stringify({
72
73 // Run the parser assistant
74 const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
75 method: "POST",
76 headers: {
77 "Authorization": `Bearer ${OPENAI_API_KEY}`,
78 "Content-Type": "application/json",
79 "OpenAI-Beta": "assistants=v2",
80 },
81 body: JSON.stringify({
100
101 const statusResponse = await fetch(
102 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
103 {
104 headers: {
105 "Authorization": `Bearer ${OPENAI_API_KEY}`,
106 "OpenAI-Beta": "assistants=v2",
107 },
108 },
114 // Get the response
115 const messagesResponse = await fetch(
116 `https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=1`,
117 {
118 headers: {
119 "Authorization": `Bearer ${OPENAI_API_KEY}`,
120 "OpenAI-Beta": "assistants=v2",
121 },
122 },
156
157 // Get embedding for query
158 const embeddingResponse = await openai.embeddings.create({
159 model: "text-embedding-3-small",
160 input: query,
327 });
328
329 // Step 4: Format data for OpenAI
330 debugInfo += `\n\nšŸ“¦ STEP 4: DATA FORMATTING FOR OPENAI\n${"-".repeat(40)}\n`;
331
332 const formattedResults = searchResults.slice(0, 10).map((match) => {
336 const mediaInText = extractMediaFromText(meta.text || "");
337
338 // Truncate text for OpenAI
339 let textContent = meta.text || "";
340 const maxTextLength = 500;
368 });
369
370 debugInfo += `āœ… Formatted ${formattedResults.length} results for OpenAI\n`;
371 debugInfo += `šŸ“„ Sample formatted result:\n${
372 JSON.stringify(formattedResults[0], null, 2).substring(0, 800)
542 }
543
544 const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
545 if (!OPENAI_API_KEY) {
546 throw new Error("OPENAI_API_KEY not found in environment variables");
547 }
548
551 }
552
553 // IMPORTANT: All OpenAI API calls use these headers with assistants=v2
554 const baseHeaders = {
555 "Authorization": `Bearer ${OPENAI_API_KEY}`,
556 "Content-Type": "application/json",
557 "OpenAI-Beta": "assistants=v2",
558 };
559
560 console.log("Assistant ID:", ASSISTANT_ID);
561 console.log("Using OpenAI-Beta: assistants=v2");
562
563 // Parse the query first (if parser assistant is configured)
594 let thread;
595 if (threadId) {
596 const threadResponse = await fetch(`https://api.openai.com/v1/threads/${threadId}`, {
597 headers: baseHeaders,
598 });
602 thread = await threadResponse.json();
603 } else {
604 const threadResponse = await fetch("https://api.openai.com/v1/threads", {
605 method: "POST",
606 headers: baseHeaders,
627
628 // Add message to thread with v2 headers
629 const messageResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/messages`, {
630 method: "POST",
631 headers: baseHeaders,
641
642 // Run the assistant with v2 headers
643 const runResponse = await fetch(`https://api.openai.com/v1/threads/${thread.id}/runs`, {
644 method: "POST",
645 headers: baseHeaders,
693 const mediaInText = extractMediaFromText(meta.text || "");
694
695 // OPTIMIZATION: Only send text excerpts to OpenAI
696 let textContent = meta.text || "";
697 const maxTextLength = 500;
876
877 const statusResponse = await fetch(
878 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`,
879 { headers: baseHeaders },
880 );
905 // Submit tool outputs with v2 headers
906 const toolOutputResponse = await fetch(
907 `https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}/submit_tool_outputs`,
908 {
909 method: "POST",
929 // Get the assistant's response with v2 headers
930 const messagesResponse = await fetch(
931 `https://api.openai.com/v1/threads/${thread.id}/messages?order=desc&limit=20`,
932 { headers: baseHeaders },
933 );

claudeGeneratedTODOmain.tsx3 matches

@dukky•Updated 3 months ago
1import { OpenAI } from "https://esm.town/v/std/openai";
2import React, { useEffect, useState } from "npm:react";
3
4const openai = new OpenAI();
5
6interface Todo {
85`;
86
87 const response = await openai.chat.completions.create({
88 model: "gpt-4",
89 messages: [{ role: "user", content: prompt }],

Contextualfork-thought.ts6 matches

@c15r•Updated 3 months ago
1import type { MCPTool, ThoughtFork, ThoughtExploration } from "../../../shared/types.ts";
2import type { AuthContext } from "../auth.ts";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import {
5 createThoughtFork,
12} from "../../database/queries.ts";
13
14const openai = new OpenAI();
15
16export const forkThoughtTools: MCPTool[] = [
152
153 try {
154 const completion = await openai.chat.completions.create({
155 messages: [
156 {
224
225 try {
226 const completion = await openai.chat.completions.create({
227 messages: [
228 {
275async function extractInsights(content: string): Promise<string[]> {
276 try {
277 const completion = await openai.chat.completions.create({
278 messages: [
279 {
360
361 try {
362 const completion = await openai.chat.completions.create({
363 messages: [
364 {

openai-usage1 file match

@nbbaier•Updated 19 hours ago

hello-realtime5 file matches

@jubertioai•Updated 3 days ago
Sample app for the OpenAI Realtime API
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp
kwhinnery_openai