Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=121&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1580 results for "openai"(1323ms)

twitterDailyDigestmain.tsx3 matches

@geoffreylitt•Updated 10 months ago
1import { getTweets } from "https://esm.town/v/geoffreylitt/getTweets";
2import { email } from "https://esm.town/v/std/email?v=12";
3import { OpenAI } from "https://esm.town/v/std/openai?v=4";
4import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
5import { twitterSearch } from "https://esm.town/v/stevekrouse/twitterSearch";
25];
26
27const openai = new OpenAI();
28
29export async function twitterAlert({ lastRunAt }: Interval) {
37
38 async function filterTweets(tweets) {
39 const completion = await openai.chat.completions.create({
40 messages: [
41 {

semanticSearchmain.tsx4 matches

@yawnxyz•Updated 10 months ago
2import { cors } from "npm:hono/cors";
3import { embed, embedMany } from "npm:ai";
4import { openai } from "npm:@ai-sdk/openai";
5import lunr from "https://cdn.skypack.dev/lunr";
6
13}));
14
15openai.apiKey = Deno.env.get("OPENAI_API_KEY");
16
17class SemanticSearch {
55 async getEmbedding(text, modelName) {
56 const { embedding } = await embed({
57 model: openai.embedding(modelName),
58 value: text,
59 });
63 async getEmbeddings(texts, modelName) {
64 const { embeddings } = await embedMany({
65 model: openai.embedding(modelName),
66 values: texts,
67 });

createGeneratedValmain.tsx3 matches

@yawnxyz•Updated 10 months ago
1import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON?v=41";
2import { runVal } from "https://esm.town/v/std/runVal";
3import { OpenAI } from "https://esm.town/v/std/openai";
4const openai = new OpenAI();
5
6export const generateValCode = async (
19 \`\`\`
20 `;
21 const response = await openai.chat.completions.create({
22 model: "gpt-4o",
23 messages: [

generateValCodemain.tsx5 matches

@yawnxyz•Updated 10 months ago
1// import { openaiChatCompletion } from "https://esm.town/v/andreterron/openaiChatCompletion";
2import { OpenAI } from "https://esm.town/v/std/openai";
3const openai = new OpenAI();
4
5export const generateValCode = async (
20 \`\`\`
21 `;
22 const response = await openai.chat.completions.create({
23 openaiKey: key,
24 organization: org,
25 body: {

generateValCodeAPImain.tsx1 match

@yawnxyz•Updated 10 months ago
4export let generateValCodeAPI = (description: string) =>
5 generateValCode(
6 process.env.OPENAI_API_KEY,
7 description,
8 );

aiHonoHtmxAlpineStreamingExamplemain.tsx2 matches

@yawnxyz•Updated 10 months ago
2import { cors } from 'npm:hono/cors';
3import { stream, streamSSE } from "https://deno.land/x/hono@v4.3.11/helper.ts";
4import { OpenAI } from "npm:openai";
5import { ai } from "https://esm.town/v/yawnxyz/ai";
6
7
8const app = new Hono();
9const openai = new OpenAI();
10
11app.use('*', cors({

webgenmain.tsx1 match

@jonataaroeira•Updated 11 months ago
98 let pageResult = "";
99
100 // // 2. Do one OpenAI inference to expand that URL to a longer page description
101 const pageDescriptionStream = await togetherAI.inference("mistralai/Mixtral-8x7B-Instruct-v0.1", {
102 prompt: `

createGeneratedValREADME.md1 match

@yawnxyz•Updated 11 months ago
1# Use GPT to generate vals on your account!
2
3Describe the val that you need, call this function, and you'll get a new val on your workspace generated by OpenAI's API!
4
5First, ensure you have a [Val Town API Token](https://www.val.town/settings/api), then call `@andreterron.createGeneratedVal({...})` like this [example](https://www.val.town/v/andreterron.untitled_tomatoKiwi):

openAIHonoChatStreamExamplemain.tsx6 matches

@yawnxyz•Updated 11 months ago
2import { cors } from 'npm:hono/cors';
3import { stream } from "https://deno.land/x/hono@v4.3.11/helper.ts";
4import { OpenAI } from "npm:openai";
5
6const app = new Hono();
7const openai = new OpenAI();
8
9app.use('*', cors({
22
23const SOURCE_URL = ""; // leave blank for deno deploy / native
24// const SOURCE_URL = "https://yawnxyz-openAIHonoChatStreamSample.web.val.run"; // valtown as generator - no SSE
25// const SOURCE_URL = "https://funny-crow-81.deno.dev"; // deno deploy as generator
26
31 <head>
32 <meta charset="UTF-8" />
33 <title>OpenAI Streaming Example</title>
34 <style>
35 body {
48 </head>
49 <body>
50 <h1>OpenAI Streaming Example</h1>
51 <label for="prompt">Prompt:</label>
52 <input type="text" id="prompt" value="tell me a joke" />
178 return stream(c, async (stream) => {
179 try {
180 const chatStream = await openai.chat.completions.create({
181 model: "gpt-4",
182 messages: [{ role: "user", content: prompt }],

gettingOpenAiStreamingtoWorkmain.tsx3 matches

@yawnxyz•Updated 11 months ago
1import OpenAI from "npm:openai";
2
3const openai = new OpenAI();
4
5export default async (req) => {
22
23 // Generate the AI response
24 const stream = await openai.chat.completions.create({
25 model: "gpt-4o",
26 messages: [{

testOpenAI1 file match

@stevekrouse•Updated 1 day ago

testOpenAI1 file match

@shouser•Updated 3 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",