You can access search results via JSON API by adding format=json
to your query:
https://codesearch.val.run/$%7BsvgDataUrl%7D?q=openai&page=7&format=json
For typeahead suggestions, use the /typeahead
endpoint:
https://codesearch.val.run/typeahead?q=openai
Returns an array of strings in format "username" or "username/projectName"
Found 2311 results for "openai"(2372ms)
18* is defined directly in the main function handler below.
19*
20* Assumes the 'openai' secret, containing your OpenAI API key, is set in your Val Town environment.
21*
22* Last Updated: 2024-05-22
651export default async function(req: Request) {
652// --- Dynamic Imports ---
653const { OpenAI } = await import("https://esm.town/v/std/openai");
654const { fetch } = await import("https://esm.town/v/std/fetch");
655const { PDFExtract } = await import("npm:pdf.js-extract");
662max_pdf_size_mb: 10,
663text_truncation_length: 25000,
664openai_model_name: "gpt-4o",
665contact_form_placeholders_en: { name: "Your Name", email: "Your Email", message: "Message" },
666contact_form_placeholders_es: { name: "Tu Nombre", email: "Tu Correo", message: "Mensaje" },
827}
828829// --- Helper Function: Call OpenAI API ---
830async function callOpenAI(
831openai: OpenAI,
832systemPrompt: string,
833userMessage: string,
834modelFromConfig = APP_CONFIG.openai_model_name || "gpt-4o",
835expectJson = false,
836): Promise<{ role: "assistant" | "system"; content: string | object }> {
837const model = modelFromConfig;
838try {
839const response = await openai.chat.completions.create({
840model,
841messages: [{ role: "system", content: systemPrompt }, { role: "user", content: userMessage }],
856}
857} catch (error) {
858console.error("OpenAI API Error:", error.message);
859return { role: "system", content: `AI Error: ${error.message}` };
860}
892log: LogEntry[],
893): Promise<LogEntry[]> {
894const openai = new OpenAI();
895log.push({ agent: "System", type: "step", message: "Workflow started." });
896943944const agentSystemPrompt = agentConfig.system_prompt.replace("{{document_text}}", truncText);
945const agentResult = await callOpenAI(
946openai,
947agentSystemPrompt,
948truncText,
949APP_CONFIG.openai_model_name,
950agentConfig.expects_json,
951);
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { sqlite } from "https://esm.town/v/std/sqlite";
3import { readFile } from "https://esm.town/v/std/utils/index.ts";
56}
5758// Initialize OpenAI
59const openai = new OpenAI();
6061// Initialize cache on startup with error handling
176}
177178console.log(`Fetching fresh data from OpenAI for: "${plantName}"`);
179// If not cached, fetch from OpenAI
180const prompt =
181`Please provide detailed information about the plant "${plantName}" in the following JSON format. Be specific and accurate:
194Only return the JSON object, no additional text.`;
195196const completion = await openai.chat.completions.create({
197messages: [
198{ role: "user", content: prompt },
206207if (!responseText) {
208return c.json({ error: "No response from OpenAI" }, 500);
209}
210218}
219220// Parse the JSON response from OpenAI
221const plantInfo: PlantInfo = JSON.parse(cleanedResponse);
222