Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/$%7Bart_info.art.src%7D?q=openai&page=160&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1751 results for "openai"(700ms)

webscrapeWikipediaIntromain.tsx1 match

@vtdocs•Updated 1 year ago
4 const cheerio = await import("npm:cheerio");
5 const html = await fetchText(
6 "https://en.wikipedia.org/wiki/OpenAI",
7 );
8 const $ = cheerio.load(html);

gpt3main.tsx19 matches

@patrickjm•Updated 1 year ago
1import { trackOpenAiFreeUsage } from "https://esm.town/v/patrickjm/trackOpenAiFreeUsage";
2import { openAiTextCompletion } from "https://esm.town/v/patrickjm/openAiTextCompletion";
3import { openAiModeration } from "https://esm.town/v/patrickjm/openAiModeration";
4import { openAiFreeQuotaExceeded } from "https://esm.town/v/patrickjm/openAiFreeQuotaExceeded";
5import { openAiFreeUsageConfig } from "https://esm.town/v/patrickjm/openAiFreeUsageConfig";
6
7/**
8 * OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
9 *
10 * val.town has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
11 * To see if the quota has been met, you can run @patrickjm.openAiFreeQuotaExceeded()
12 *
13 * For full REST API access, see @patrickjm.openAiTextCompletion
14 */
15export let gpt3 = async (params: {
16 openAiKey?: string,
17 prompt: string,
18 maxTokens?: number,
21
22 // Determine whether to use provided apiKey or free usage apiKey based on daily quota.
23 const apiKey = params.openAiKey ?? openAiFreeUsageConfig.key;
24 const exceeded = await openAiFreeQuotaExceeded();
25 if (!params.openAiKey && exceeded) {
26 throw new Error(openAiFreeUsageConfig.quota_error);
27 }
28
29 // If using free token, first check inputs against moderation api
30 if (!params.openAiKey) {
31 const moderation = await openAiModeration({
32 apiKey,
33 input: params.prompt,
35 if (moderation.results.some((r) => r.flagged)) {
36 throw new Error(
37 "Sorry, this prompt was flagged by OpenAI moderation. If you provide your own API key, moderation will be turned off."
38 );
39 }
41
42 // Call completion API
43 const response = await openAiTextCompletion({
44 apiKey: apiKey,
45 prompt: params.prompt,
50 // If using free token, track usage against the quota.
51 try {
52 if (!params.openAiKey) {
53 await trackOpenAiFreeUsage(MODEL, response.usage.total_tokens);
54 }
55 } catch (e) {}

gpt4main.tsx1 match

@rlimit•Updated 1 year ago
3
4/**
5 * OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
6 *
7 * val.town and rlimit.com has generously provided a free daily quota. Until the quota is met, no need to provide an API key.

getOpenapiEmbeddingmain.tsx1 match

@wilt•Updated 1 year ago
9 query: string;
10}): Promise<number[]> =>
11 fetchJSON("https://api.openai.com/v1/embeddings", {
12 method: "POST",
13 headers: {

questionsWithGuidelinesChainmain.tsx6 matches

@jacoblee93•Updated 1 year ago
2
3export const questionsWithGuidelinesChain = (async () => {
4 const { ChatOpenAI } = await import(
5 "https://esm.sh/langchain@0.0.150/chat_models/openai"
6 );
7 const { LLMChain } = await import("https://esm.sh/langchain@0.0.150/chains");
19 ]);
20 const questionChain = questionPrompt
21 .pipe(new ChatOpenAI({
22 openAIApiKey: process.env.OPENAI_API_KEY,
23 })
24 .pipe(new StringOutputParser()));
31 const styleChain = stylePrompt
32 .pipe(
33 new ChatOpenAI({
34 openAIApiKey: process.env.OPENAI_API_KEY,
35 }),
36 )

askLeximain.tsx6 matches

@thomasatflexos•Updated 1 year ago
15 );
16 const { SupabaseVectorStore } = await import("npm:langchain/vectorstores");
17 const { ChatOpenAI } = await import("npm:langchain/chat_models");
18 const { OpenAIEmbeddings } = await import("npm:langchain/embeddings");
19 const { createClient } = await import(
20 "https://esm.sh/@supabase/supabase-js@2"
25 );
26 let streamedResponse = "";
27 const chat = new ChatOpenAI({
28 modelName: "gpt-3.5-turbo",
29 openAIApiKey: process.env.OPEN_API_KEY,
30 streaming: true,
31 callbacks: [{
35 });
36 const vectorStore = await SupabaseVectorStore.fromExistingIndex(
37 new OpenAIEmbeddings({
38 openAIApiKey: process.env.OPEN_API_KEY,
39 }),
40 {

generateValCodeAPImain.tsx1 match

@andreterron•Updated 1 year ago
4export let generateValCodeAPI = (description: string) =>
5 generateValCode(
6 process.env.VT_OPENAI_KEY,
7 description,
8 );

big_stories_ranksmain.tsx3 matches

@tmcw•Updated 1 year ago
61 "ranks": [[1689676050115, 8], [1689679650435, 9], [1689683250422, 9], [1689686850141, 9], [1689690449649, 9], [1689694050066, 10]]
62 },
63 "https://www.nytimes.com/2023/07/18/technology/openai-chatgpt-facial-recognition.html": {
64 "title": "OpenAI Worries About What Its Chatbot Will Say About People’s Faces",
65 "url": "https://www.nytimes.com/2023/07/18/technology/openai-chatgpt-facial-recognition.html",
66 "section": "technology",
67 "ranks": [[1689676050115, 9], [1689679650435, 10], [1689683250422, 10], [1689686850141, 10], [1689690449649, 10], [1689694050066, 11]]

unserializeableLogExmain.tsx4 matches

@stevekrouse•Updated 1 year ago
2
3export let unserializeableLogEx = (async () => {
4 const { Configuration, OpenAIApi } = await import("npm:openai");
5 const configuration = new Configuration({
6 apiKey: process.env.openai,
7 });
8 const openai = new OpenAIApi(configuration);
9 console.log(openai);
10})();

unserializeableLogExmain.tsx4 matches

@stevekrouse•Updated 1 year ago
2
3export let unserializeableLogEx = (async () => {
4 const { Configuration, OpenAIApi } = await import("npm:openai");
5 const configuration = new Configuration({
6 apiKey: process.env.openai,
7 });
8 const openai = new OpenAIApi(configuration);
9 console.log(openai);
10})();

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 1 week ago

testOpenAI1 file match

@stevekrouse•Updated 1 week ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",