Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/?q=openai&page=30&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 346 results for "openai"(403ms)

askSMHI

askSMHIREADME.md4 matches

@ljus•Updated 1 month ago
1# askSMHI
2Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish_Meteorological_and_Hydrological_Institute) api
3
4The API is instructed to use the current time in Europe/Stockholm timezone.
8## Relevant API documentation
9* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
10* [OPEN AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=chat&lang=javascript)
11
12## How to use this endpoint
41
42## Enviroment variables
43* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
44## Packages used
45* openai: For typesafe API request and responses
46* valibot: for describing the SMHI API response and function API input
47* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
askSMHI

askSMHIservice6 matches

@ljus•Updated 1 month ago
3import { offset, removeOffset } from "npm:@formkit/tempo";
4import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
5import { OpenAI } from "npm:openai";
6import * as v from "npm:valibot";
7import { openAIModeration } from "./gpt/moderation";
8import { getWeatherAtCoordinate } from "./smhi/forecast/service";
9import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
18 return { error: "Too many tokens in question" };
19 }
20 const { flagged } = await openAIModeration([question]);
21 if (flagged) {
22 return { error: "Be nice in your question" };
42 }[],
43 };
44 const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
45
46 const completion = await openai.chat.completions.create({
47 model: completionOptions.model,
48 store: completionOptions.store,
130 }]
131 : [];
132 const formattedFunctionResponseData = await openai.chat.completions.create({
133 model: completionOptions.model,
134 store: completionOptions.store,
askSMHI

askSMHImoderation6 matches

@ljus•Updated 1 month ago
1import { fetch } from "https://esm.town/v/std/fetch";
2import process from "node:process";
3import { OpenAI } from "npm:openai";
4
5export async function openAIModeration(publicMessages: string[]) {
6 const openAIKey = process.env.OPENAI_CHAT;
7 const headers = {
8 "Content-Type": "application/json",
9 Authorization: `Bearer ${openAIKey}`,
10 };
11 const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
12 const responseData = await openai.moderations.create({ input: publicMessages });
13 return { flagged: responseData.results.some((r) => r.flagged) };
14}

researchAgentemailHandler2 matches

@xdstudiosmedia•Updated 1 month ago
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",

townieBearindex.ts44 matches

@charmaine•Updated 1 month ago
2 * This is a Townie clone for Val Town.
3 * It creates a chat interface that helps users generate Val Town applications.
4 * Uses OpenAI's API to generate code responses in the proper format.
5 */
6/** @jsxImportSource https://esm.sh/react@18.2.0 */
91If you are changing a SQLite table's schema, you should also change the table's name so it creates a fresh table, ie by adding _2 or _3 after it everywhere. Ensure that tables are created before they are used.
92
93OPENAI
94
95Val Town includes a free, proxied OpenAI:
96
97import { OpenAI } from "https://esm.town/v/std/openai";
98const openai = new OpenAI();
99const completion = await openai.chat.completions.create({
100 messages: [
101 { role: "user", content: "Say hello in a creative way" },
176): Promise<string> {
177 try {
178 // Try using the proxied OpenAI first, if that fails, fall back to direct API call
179 try {
180 return await processWithValTownOpenAI(messages, systemPrompt, stream);
181 } catch (error) {
182 console.error("Error with Val Town OpenAI:", error);
183 console.log("Falling back to direct OpenAI API call");
184 return await processWithDirectOpenAI(messages, systemPrompt, stream);
185 }
186 } catch (error) {
191
192/**
193 * Process chat using Val Town's proxied OpenAI
194 */
195async function processWithValTownOpenAI(
196 messages: Message[],
197 systemPrompt: string,
199): Promise<string> {
200 try {
201 const { OpenAI } = await import("https://esm.town/v/std/openai");
202 const openai = new OpenAI();
203
204 // Prepare the messages array with system prompt
205 const openaiMessages = [
206 { role: "system", content: systemPrompt },
207 ...messages.map(m => ({ role: m.role, content: m.content }))
208 ];
209
210 console.log("Sending to Val Town OpenAI:", {
211 messageCount: openaiMessages.length,
212 firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
213 });
215 // If streaming is not required, get a complete response
216 if (!stream) {
217 const completion = await openai.chat.completions.create({
218 messages: openaiMessages,
219 model: "gpt-4o-mini", // Using Val Town's available model
220 temperature: 0.7,
223 return completion.choices[0]?.message?.content || "Sorry, I couldn't generate a response.";
224 } else {
225 // Streaming is not directly supported by Val Town OpenAI wrapper
226 // Falling back to direct API
227 throw new Error("Streaming not supported by Val Town OpenAI wrapper");
228 }
229 } catch (error) {
230 console.error("Error in processWithValTownOpenAI:", error);
231 throw error;
232 }
234
235/**
236 * Process chat using direct OpenAI API
237 */
238async function processWithDirectOpenAI(
239 messages: Message[],
240 systemPrompt: string,
242): Promise<string> {
243 // Get API key from environment
244 const apiKey = Deno.env.get("OPENAI_API_KEY");
245
246 if (!apiKey) {
247 throw new Error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.");
248 }
249
250 // Format messages for OpenAI API
251 const openaiMessages = [
252 { role: "system", content: systemPrompt },
253 ...messages.map(m => ({ role: m.role, content: m.content }))
254 ];
255
256 console.log("Sending to Direct OpenAI:", {
257 messageCount: openaiMessages.length,
258 firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
259 });
261 if (stream) {
262 // Stream the response if a stream is provided
263 return await streamChatResponse(openaiMessages, apiKey, stream);
264 } else {
265 // Otherwise, return the complete response
266 return await fetchChatResponse(openaiMessages, apiKey);
267 }
268}
269
270/**
271 * Fetch a complete chat response from OpenAI
272 */
273async function fetchChatResponse(messages: any[], apiKey: string): Promise<string> {
274 try {
275 const response = await fetch("https://api.openai.com/v1/chat/completions", {
276 method: "POST",
277 headers: {
288 if (!response.ok) {
289 const errorText = await response.text();
290 console.error("OpenAI API error response:", errorText);
291 try {
292 const errorData = JSON.parse(errorText);
293 throw new Error(`OpenAI API error: ${response.status} ${errorData.error?.message || errorText}`);
294 } catch (e) {
295 throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
296 }
297 }
306
307/**
308 * Stream a chat response from OpenAI
309 */
310async function streamChatResponse(messages: any[], apiKey: string, stream: any): Promise<string> {
311 try {
312 const response = await fetch("https://api.openai.com/v1/chat/completions", {
313 method: "POST",
314 headers: {
326 if (!response.ok) {
327 const errorText = await response.text();
328 console.error("OpenAI API streaming error:", errorText);
329 throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
330 }
331
374 status: "ok",
375 message: "Backend is working",
376 hasOpenAiKey: Boolean(Deno.env.get("OPENAI_API_KEY")),
377 hasValTownOpenAI: true
378 };
379});

MEDIANALIZE_PROMedicalreport.tsx5 matches

@UndiscoverableERA•Updated 1 month ago
224export default async function server(request: Request): Promise<Response> {
225 if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
226 const { OpenAI } = await import("https://esm.town/v/std/openai");
227 const openai = new OpenAI();
228
229 const body = await request.json();
234
235 try {
236 const completion = await openai.chat.completions.create({
237 messages: [
238 {
265 });
266 } catch (error) {
267 console.error('OpenAI Analysis Error:', error);
268 return new Response(JSON.stringify({
269 diagnosis: 'Unable to generate analysis',
298
299function parseAnalysis(analysis: string) {
300 // Basic parsing of OpenAI response
301 const sections = analysis.split('\n\n');
302 return {

MEDIANALIZE_PROhealthcare.tsx14 matches

@UndiscoverableERA•Updated 1 month ago
222 }
223
224 // Dynamically import OpenAI with error handling
225 let OpenAI;
226 try {
227 const module = await import("https://esm.town/v/std/openai");
228 OpenAI = module.OpenAI;
229 } catch (importError) {
230 console.error("OpenAI Import Error:", importError);
231 return new Response(
232 JSON.stringify({
233 error: "Failed to import OpenAI module",
234 details: String(importError),
235 }),
244 }
245
246 // Ensure OpenAI is available
247 if (!OpenAI) {
248 return new Response(
249 JSON.stringify({
250 error: "OpenAI module not found",
251 }),
252 {
260 }
261
262 // Create OpenAI instance
263 const openai = new OpenAI();
264
265 // Create OpenAI completion with comprehensive error handling
266 let completion;
267 try {
268 completion = await openai.chat.completions.create({
269 messages: body.messages,
270 model: "gpt-4o-mini",
272 });
273 } catch (completionError) {
274 console.error("OpenAI Completion Error:", completionError);
275 return new Response(
276 JSON.stringify({
cerebras_coder

cerebras_codermigrations5 matches

@Proking2005•Updated 1 month ago
1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import OpenAI from "openai";
3import { ITERATIONS_TABLE, KEY, PROJECTS_TABLE, SCHEMA_VERSION } from "./migrations";
4
5const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });
6
7export async function createProject(prompt: string) {
59}
60
61// Example of using OpenAI (you'll need to adapt this to your specific use case)
62export async function generateText(prompt: string) {
63 try {
64 const completion = await openai.chat.completions.create({
65 messages: [{ role: "user", content: prompt }],
66 model: "gpt-4-turbo-preview",
71 return completion.choices[0].message?.content || "No response";
72 } catch (error) {
73 console.error("OpenAI Error:", error);
74 return "Error generating text";
75 }
cerebras_coder

cerebras_coderqueries5 matches

@Proking2005•Updated 1 month ago
1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import OpenAI from "openai";
3import { ITERATIONS_TABLE, KEY, PROJECTS_TABLE, SCHEMA_VERSION } from "./migrations";
4
5const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });
6
7export async function createProject(prompt: string) {
59}
60
61// Example of using OpenAI (you'll need to adapt this to your specific use case)
62export async function generateText(prompt: string) {
63 try {
64 const completion = await openai.chat.completions.create({
65 messages: [{ role: "user", content: prompt }],
66 model: "gpt-4-turbo-preview",
70 return completion.choices[0].message?.content || "No response";
71 } catch (error) {
72 console.error("OpenAI Error:", error);
73 return "Error generating text";
74 }
cerebras_coder

cerebras_codergenerate-code5 matches

@Proking2005•Updated 1 month ago
1import OpenAI from "openai";
2import STARTER_PROMPTS from "../public/starter-prompts.js";
3
16 };
17 } else {
18 const openai = new OpenAI({
19 apiKey: Deno.env.get(
20 "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",
21 ),
22 });
23 const completion = await openai.chat.completions.create({
24 messages: [
25 {
46 },
47 ].filter(Boolean),
48 model: "gpt-4-turbo-preview", // Or another suitable OpenAI model
49 max_tokens: 2000, // Adjust as needed
50 });
52 return {
53 code: extractCodeFromFence(completion.choices[0].message.content),
54 time: 0, // OpenAI doesn't directly provide completion time
55 totalTokens: completion.usage?.total_tokens || 1,
56 };

stevens-openai5 file matches

@yash_ing•Updated 1 day ago

openaiPricing9 file matches

@nbbaier•Updated 1 week ago
Project created from val URLs