Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg%20%22Optional%20title%22?q=openai&page=65&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 3245 results for "openai"(1485ms)

Louai_performance_logger.ts53 matches

@jeffvincentUpdated 1 month ago
5 timestamp: string;
6 sessionId: string;
7 type: 'yahoo_api' | 'mlb_data' | 'openai_request' | 'openai_response' | 'error' | 'performance';
8 userId?: string;
9 leagueId?: string;
31 summary?: {
32 yahooApiCalls: number;
33 openaiRequests: number;
34 totalTokensUsed: number;
35 errors: number;
123
124 /**
125 * Log OpenAI request with full prompt
126 */
127 logOpenAiRequest(model: string, prompt: string, requestConfig: any, retryAttempt: number = 0): void {
128 this.addLogEntry({
129 type: 'openai_request',
130 data: {
131 model,
144
145 /**
146 * Log OpenAI response with full content
147 */
148 logOpenAiResponse(model: string, response: string, executionTime: number, tokenCount?: number): void {
149 this.addLogEntry({
150 type: 'openai_response',
151 data: {
152 model,
240
241 const yahooEntries = session.entries.filter(e => e.type === 'yahoo_api');
242 const openaiRequests = session.entries.filter(e => e.type === 'openai_request');
243 const openaiResponses = session.entries.filter(e => e.type === 'openai_response');
244 const errors = session.entries.filter(e => e.type === 'error');
245
256
257## AI Processing
258- OpenAI Requests: ${openaiRequests.length}
259- OpenAI Responses: ${openaiResponses.length}
260- Total Tokens Used: ${openaiResponses.reduce((sum, e) => sum + (e.metadata?.tokenCount || 0), 0)}
261- Average AI Response Time: ${openaiResponses.length > 0 ? Math.round(openaiResponses.reduce((sum, e) => sum + (e.metadata?.executionTime || 0), 0) / openaiResponses.length) : 0}ms
262
263## Errors
312
313 /**
314 * Extract all OpenAI prompts and responses for analysis
315 */
316 extractOpenAiData(sessionId: string): any {
317 const session = this.sessions.get(sessionId);
318 if (!session) return null;
319
320 const openaiRequests = session.entries.filter(e => e.type === 'openai_request');
321 const openaiResponses = session.entries.filter(e => e.type === 'openai_response');
322
323 return {
324 sessionId,
325 timestamp: session.startTime,
326 requests: openaiRequests.map((entry, index) => ({
327 requestId: index,
328 timestamp: entry.timestamp,
333 config: entry.data.config
334 })),
335 responses: openaiResponses.map((entry, index) => ({
336 responseId: index,
337 timestamp: entry.timestamp,
355
356 const yahooData = this.extractYahooData(sessionId);
357 const openaiData = this.extractOpenAiData(sessionId);
358
359 return `
366- Total Data Collected: ${yahooData?.yahooApiCalls?.reduce((sum, call) => sum + (call.dataSize || 0), 0)} bytes
367
368## OpenAI Processing Quality
369- Requests Made: ${openaiData?.requests?.length || 0}
370- Responses Received: ${openaiData?.responses?.length || 0}
371- Average Prompt Length: ${openaiData?.requests?.reduce((sum, req) => sum + (req.promptLength || 0), 0) / (openaiData?.requests?.length || 1)} characters
372- Parse Success Rate: ${openaiData?.responses?.filter(r => r.parsedSuccessfully).length / (openaiData?.responses?.length || 1) * 100}%
373- Average Response Time: ${openaiData?.responses?.reduce((sum, res) => sum + (res.executionTime || 0), 0) / (openaiData?.responses?.length || 1)}ms
374
375## Recommendations for Prompt Optimization
376${this.generatePromptOptimizationRecommendations(openaiData)}
377`;
378 }
396 },
397 yahooData: this.extractYahooData(sessionId),
398 openaiData: this.extractOpenAiData(sessionId),
399 performance: session.entries.filter(e => e.type === 'performance'),
400 errors: session.entries.filter(e => e.type === 'error'),
433 private generateSessionSummary(session: LogSession): LogSession['summary'] {
434 const yahooApiCalls = session.entries.filter(e => e.type === 'yahoo_api').length;
435 const openaiRequests = session.entries.filter(e => e.type === 'openai_request').length;
436 const totalTokensUsed = session.entries
437 .filter(e => e.type === 'openai_response')
438 .reduce((sum, e) => sum + (e.metadata?.tokenCount || 0), 0);
439 const errors = session.entries.filter(e => e.type === 'error').length;
441 return {
442 yahooApiCalls,
443 openaiRequests,
444 totalTokensUsed,
445 errors,
450 private calculateDataQualityScore(session: LogSession): number {
451 // Simple scoring based on successful responses and data completeness
452 const totalRequests = session.entries.filter(e => e.type === 'yahoo_api' || e.type === 'openai_request').length;
453 const errors = session.entries.filter(e => e.type === 'error').length;
454 const successfulResponses = session.entries.filter(e => e.type === 'openai_response' && e.data.parsedSuccessfully).length;
455
456 if (totalRequests === 0) return 0;
457
458 const errorRate = errors / totalRequests;
459 const successRate = successfulResponses / session.entries.filter(e => e.type === 'openai_request').length;
460
461 return Math.round((1 - errorRate) * successRate * 100);
464 private analyzeDataQuality(session: LogSession): string {
465 const yahooEntries = session.entries.filter(e => e.type === 'yahoo_api');
466 const openaiResponses = session.entries.filter(e => e.type === 'openai_response');
467
468 const insights = [];
475 }
476
477 if (openaiResponses.length > 0) {
478 const successfulParses = openaiResponses.filter(e => e.data.parsedSuccessfully).length;
479 const parseSuccessRate = (successfulParses / openaiResponses.length) * 100;
480 insights.push(`- OpenAI JSON parse success rate: ${Math.round(parseSuccessRate)}%`);
481 }
482
488
489 const yahooEntries = session.entries.filter(e => e.type === 'yahoo_api');
490 const openaiRequests = session.entries.filter(e => e.type === 'openai_request');
491 const openaiResponses = session.entries.filter(e => e.type === 'openai_response');
492 const errors = session.entries.filter(e => e.type === 'error');
493
498
499 // Check for large prompts
500 const avgPromptSize = openaiRequests.reduce((sum, e) => sum + (e.data.promptLength || 0), 0) / openaiRequests.length;
501 if (avgPromptSize > 5000) {
502 recommendations.push('- Consider reducing prompt size for faster OpenAI responses');
503 }
504
505 // Check for parsing errors
506 const parseFailures = openaiResponses.filter(e => !e.data.parsedSuccessfully).length;
507 if (parseFailures > 0) {
508 recommendations.push('- Improve prompt clarity to reduce JSON parsing failures');
510
511 // Check for slow responses
512 const slowResponses = openaiResponses.filter(e => (e.metadata?.executionTime || 0) > 15000).length;
513 if (slowResponses > 0) {
514 recommendations.push('- Consider using a faster OpenAI model or reducing prompt complexity');
515 }
516
522 }
523
524 private generatePromptOptimizationRecommendations(openaiData: any): string {
525 if (!openaiData?.requests?.length) {
526 return '- No OpenAI requests to analyze';
527 }
528
529 const recommendations = [];
530 const avgPromptLength = openaiData.requests.reduce((sum: number, req: any) => sum + (req.promptLength || 0), 0) / openaiData.requests.length;
531 const avgResponseTime = openaiData.responses?.reduce((sum: number, res: any) => sum + (res.executionTime || 0), 0) / (openaiData.responses?.length || 1);
532 const parseSuccessRate = (openaiData.responses?.filter((r: any) => r.parsedSuccessfully).length || 0) / (openaiData.responses?.length || 1);
533
534 if (avgPromptLength > 8000) {

Louai_performance_openai_client.ts53 matches

@jeffvincentUpdated 1 month ago
1// Lou Fantasy Baseball OpenAI AI Analysis Client
2// Robust OpenAI API integration for fantasy baseball performance analysis
3// Val.town Compatible TypeScript Implementation
4
7
8// ============================================================================
9// OPENAI CLIENT INTERFACES
10// ============================================================================
11
12/**
13 * AI Performance Analysis Response Interface
14 * Structured output from OpenAI analysis of fantasy baseball performance
15 */
16export interface AIPerformanceAnalysis {
90
91/**
92 * OpenAI API Configuration Interface
93 */
94export interface OpenAIConfig {
95 apiKey: string;
96 model: string;
102
103/**
104 * OpenAI API Error Interface
105 */
106export interface OpenAIError {
107 type: 'RATE_LIMIT' | 'API_ERROR' | 'TIMEOUT' | 'PARSE_ERROR' | 'AUTH_ERROR' | 'UNKNOWN';
108 message: string;
113
114// ============================================================================
115// OPENAI CLIENT IMPLEMENTATION
116// ============================================================================
117
118/**
119 * Lou OpenAI Client - Robust AI Analysis for Fantasy Baseball
120 *
121 * Features:
126 * - Val.town optimized implementation
127 */
128export class LouOpenAIClient {
129 private config: OpenAIConfig;
130 private requestQueue: Array<() => Promise<any>> = [];
131 private isProcessingQueue: boolean = false;
133 private readonly MIN_REQUEST_INTERVAL = 1000; // 1 second between requests
134
135 constructor(config?: Partial<OpenAIConfig>) {
136 this.config = {
137 apiKey: config?.apiKey || process.env.OPENAI_API_KEY || '',
138 model: config?.model || process.env.OPENAI_MODEL || 'o4-mini',
139 maxTokens: config?.maxTokens || 2000,
140 temperature: config?.temperature || 0.7,
144
145 if (!this.config.apiKey) {
146 console.error('❌ OpenAI API key not provided. Set OPENAI_API_KEY environment variable.');
147 }
148
149 console.log(`🤖 Lou OpenAI Client initialized with model: ${this.config.model}`);
150 }
151
180
181 // Log the request
182 aiLogger.logOpenAiRequest(this.config.model, prompt, this.config);
183
184 // Execute OpenAI request with retry logic
185 const response = await this.executeWithRetry(async () => {
186 return await this.makeOpenAIRequest(prompt, 'performance_analysis');
187 });
188
242
243 // Log the request
244 aiLogger.logOpenAiRequest(this.config.model, prompt, this.config);
245
246 // Execute OpenAI request
247 const response = await this.executeWithRetry(async () => {
248 return await this.makeOpenAIRequest(prompt, 'waiver_recommendations');
249 });
250
271
272 /**
273 * Execute OpenAI API request with comprehensive retry logic
274 */
275 private async executeWithRetry<T>(operation: () => Promise<T>): Promise<T | null> {
307
308 /**
309 * Make OpenAI API request with proper error handling
310 */
311 private async makeOpenAIRequest(prompt: string, requestType: string): Promise<string | null> {
312 const startTime = Date.now();
313
314 try {
315 console.log(`🤖 Making OpenAI request: ${requestType}`);
316
317 // Validate API key
318 if (!this.config.apiKey) {
319 throw new Error('OpenAI API key not configured');
320 }
321
342 const timeoutId = setTimeout(() => controller.abort(), this.config.requestTimeout);
343
344 const response = await fetch('https://api.openai.com/v1/chat/completions', {
345 method: 'POST',
346 headers: {
357 if (!response.ok) {
358 const errorText = await response.text();
359 throw this.createOpenAIError(response.status, errorText);
360 }
361
365
366 if (!content) {
367 throw new Error('Empty response from OpenAI API');
368 }
369
370 const duration = Date.now() - startTime;
371 console.log(`✅ OpenAI request completed in ${duration}ms`);
372
373 // Log the response
374 aiLogger.logOpenAiResponse(this.config.model, content, duration, data.usage?.total_tokens);
375
376 return content;
380
381 if (error.name === 'AbortError') {
382 console.error(`⏰ OpenAI request timeout after ${duration}ms`);
383 throw this.createOpenAIError(408, 'Request timeout');
384 }
385
386 console.error(`❌ OpenAI request failed after ${duration}ms:`, error);
387 throw error;
388 }
793
794 /**
795 * Create standardized OpenAI error
796 */
797 private createOpenAIError(statusCode: number, message: string): OpenAIError {
798 let type: OpenAIError['type'] = 'UNKNOWN';
799
800 if (statusCode === 401) type = 'AUTH_ERROR';
819
820 /**
821 * Validate OpenAI configuration
822 */
823 public validateConfig(): { valid: boolean; errors: string[] } {
825
826 if (!this.config.apiKey) {
827 errors.push('OpenAI API key is required');
828 }
829
830 if (!this.config.model) {
831 errors.push('OpenAI model is required');
832 }
833
849 * Get current configuration (without API key)
850 */
851 public getConfig(): Omit<OpenAIConfig, 'apiKey'> {
852 const { apiKey, ...configWithoutKey } = this.config;
853 return configWithoutKey;
855
856 /**
857 * Test OpenAI API connection
858 */
859 async testConnection(): Promise<{ success: boolean; error?: string; model?: string }> {
860 try {
861 console.log('🧪 Testing OpenAI API connection...');
862
863 const response = await this.makeOpenAIRequest(
864 'Please respond with a simple JSON object: {"status": "connected", "message": "API test successful"}',
865 'connection_test'
872 const parsed = JSON.parse(response);
873 if (parsed.status === 'connected') {
874 console.log('✅ OpenAI API connection successful');
875 return { success: true, model: this.config.model };
876 }
879
880 } catch (error) {
881 console.error('❌ OpenAI API connection test failed:', error);
882 return {
883 success: false,
893
894/**
895 * Create a configured LouOpenAIClient instance
896 * @param config Optional configuration overrides
897 * @returns Configured OpenAI client
898 */
899export function createOpenAIClient(config?: Partial<OpenAIConfig>): LouOpenAIClient {
900 return new LouOpenAIClient(config);
901}
902
906
907/**
908 * Validate performance data before sending to OpenAI
909 * @param data Performance data to validate
910 * @returns Validation result

Louai_performance_orchestrator.ts6 matches

@jeffvincentUpdated 1 month ago
8import { LouPerformanceAnalyzer, PerformanceAnalysisReport } from "./ai_performance_yahoo_integration.ts";
9import {
10 LouOpenAIClient,
11 createOpenAIClient,
12 AIPerformanceAnalysis,
13 WaiverRecommendation
14} from "./ai_performance_openai_client.ts";
15import { LouTokenStorage } from "../oauth/token_storage.tsx";
16import {
89
90 const performanceAnalyzer = new LouPerformanceAnalyzer();
91 const openaiClient = createOpenAIClient({
92 model: 'gpt-4o-mini', // More reliable than o4-mini
93 maxTokens: 1500, // Reduce tokens for speed
123 try {
124 // Run team analysis first (faster, more important)
125 aiAnalysis = await openaiClient.analyzeTeamPerformance({
126 teamStats: performanceReport.team_analysis.overall_performance,
127 playerStats: performanceReport.player_insights.top_performers.concat(performanceReport.player_insights.underperformers).slice(0, 8), // Limit players
143 const topCandidates = availableWaiverPlayers.slice(0, 6); // Limit to 6 candidates
144
145 waiverRecommendations = await openaiClient.getPickupRecommendations(
146 performanceReport.player_insights.underperformers.slice(0, 4), // Top 4 underperformers
147 topCandidates

blockFreeEmailActionCLAUDE.md4 matches

@stevekrouseUpdated 1 month ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

atproto-to-fediverse-testval-town.md4 matches

@tijsUpdated 1 month ago
95_2 or_3) to create a fresh table.
96
97### OpenAI
98
99```ts
100import { OpenAI } from "https://esm.town/v/std/openai";
101const openai = new OpenAI();
102const completion = await openai.chat.completions.create({
103 messages: [
104 { role: "user", content: "Say hello in a creative way" },

AICompletetest.ts2 matches

@wolfUpdated 1 month ago
1import OpenAI from "npm:openai";
2const client = new OpenAI();
3
4const response = await client.responses.create({

blockGmailActionCLAUDE.md4 matches

@nholdenUpdated 1 month ago
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },

analyze-github-issuesutils.ts1 match

@wolfUpdated 1 month ago
58 }))
59
60 const { categorizeIssues: categorizeWithAI } = await import('./openai.ts')
61 return categorizeWithAI(processedIssues)
62}

analyze-github-issuesREADME.md2 matches

@wolfUpdated 1 month ago
112. **Environment Variables** (left sidebar)
12 ```bash
13 OPENAI_API_KEY=your_openai_api_key_here
14 ```
15
16## Customization
17
18Edit the system prompt in `openai.ts` to change how issues are categorized.
19
20## Output

analyze-github-issuesopenai.ts7 matches

@wolfUpdated 1 month ago
1import OpenAI from 'npm:openai'
2
3const openai = new OpenAI({
4 apiKey: Deno.env.get('OPENAI_API_KEY')
5})
6
7export async function categorizeIssues(issues: any[]) {
8 const completion = await openai.chat.completions.create({
9 model: "gpt-4o-mini",
10 messages: [{ role: "user", content: `Analyze these GitHub issues and categorize them into a two-level system:
36
37 const aiResponse = completion.choices[0].message.content
38 if (!aiResponse) throw new Error('OpenAI returned empty response')
39
40 try {
41 return JSON.parse(aiResponse)
42 } catch (error) {
43 console.error('Failed to parse OpenAI response:', aiResponse.substring(0, 1000) + '...')
44
45 // Try to repair truncated JSON by finding the last complete array
49 return JSON.parse(repairedResponse)
50 } catch (repairError) {
51 throw new Error(`Invalid JSON response from OpenAI: ${error.message}`)
52 }
53 }

openai-usage1 file match

@nbbaierUpdated 1 day ago

hello-realtime5 file matches

@jubertioaiUpdated 4 days ago
Sample app for the OpenAI Realtime API
reconsumeralization
import { OpenAI } from "https://esm.town/v/std/openai"; import { sqlite } from "https://esm.town/v/stevekrouse/sqlite"; /** * Practical Implementation of Collective Content Intelligence * Bridging advanced AI with collaborative content creation */ exp
kwhinnery_openai