Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg%20%22Image%20title%22?q=openai&page=141&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1589 results for "openai"(1815ms)

openAiTextCompletionmain.tsx7 matches

@patrickjm•Updated 1 year ago
1import { fetchJSON } from "https://esm.town/v/stevekrouse/fetchJSON?v=41";
2
3export let openAiTextCompletion = async (params: {
4 /** https://beta.openai.com/account/api-keys */
5 apiKey: string,
6 /** Optional. https://beta.openai.com/account/org-settings */
7 org?: string,
8
9 // REST args, see https://beta.openai.com/docs/api-reference/completions/create
10 prompt: string,
11 model?: string,
27 if (!params.apiKey) {
28 throw new Error(
29 "Please provide 'apiKey' param. See: https://beta.openai.com/account/api-keys "
30 );
31 }
33 args.stream = false;
34
35 const response = await fetchJSON("https://api.openai.com/v1/completions", {
36 method: "POST",
37 headers: {
38 Authorization: `Bearer ${params.apiKey}`,
39 ...(params.org ? { "OpenAI-Organization": params.org } : {}),
40 },
41 body: JSON.stringify(args),

getVectorStoreBuildermain.tsx1 match

@webup•Updated 1 year ago
5 type: "memory" | "baas";
6 provider?: "pinecone" | "milvus";
7} = { type: "memory" }, embed: "openai" | "huggingface" = "openai") {
8 const { cond, matches } = await import("npm:lodash-es");
9 const builder = await getModelBuilder({

completemain.tsx5 matches

@webup•Updated 1 year ago
2
3export const complete = async (prompt: string | object, options = {}) => {
4 // Initialize OpenAI API stub
5 const { Configuration, OpenAIApi } = await import("https://esm.sh/openai");
6 const configuration = new Configuration({
7 apiKey: process.env.OPENAI,
8 });
9 const openai = new OpenAIApi(configuration);
10 // Request chat completion
11 const completion = await openai.createCompletion({
12 model: "text-davinci-003",
13 prompt,

gpt3Unsafemain.tsx1 match

@stevekrouse•Updated 1 year ago
4export const gpt3Unsafe = runVal("patrickjm.gpt3", {
5 prompt: "Write a haiku about being cool:",
6 openAiKey: process.env.openai,
7});

gpt4_playgroundmain.tsx4 matches

@scio•Updated 1 year ago
2
3export const gpt4_playground = (async (query) => {
4 const { OpenAI } = await import("https://deno.land/x/openai/mod.ts");
5 const openAI = new OpenAI(process.env.OPENAI_KEY);
6 const chatCompletion = openAI.createChatCompletion({
7 model: "gpt-4",
8 messages: [
16 console.log("GPT-4 takes a while...");
17 return await chatCompletion;
18})("Please explain how OpenAI GPT-4 is better than GPT-3");

welcomeEmailmain.tsx1 match

@rodrigotello•Updated 1 year ago
23 <li style="margin-bottom:6px">Reference your vals: <div style="${CSScodeStyling};">@me.fizz.split('buzz').length</div></li>
24 <li style="margin-bottom:6px">Reference others' vals: <div style="${CSScodeStyling};">@stevekrouse.moreBuzz()</div></li>
25 <li style="margin-bottom:6px">Reference personal secrets: <div style="${CSScodeStyling};">@me.secrets.openai</div></li>
26 <li style="margin-bottom:6px">Import from npm: <div style="${CSScodeStyling};">const _ = await import("npm:lodash-es")</div></li>
27 <li>Run keyboard shortcut: <div style="${CSScodeStyling};">cmd+enter</div></li>

openaiCompletionmain.tsx4 matches

@fgeierst•Updated 1 year ago
1import process from "node:process";
2
3export const openaiCompletion = async (prompt) => {
4 const { OpenAI } = await import("https://deno.land/x/openai/mod.ts");
5 const openAI = new OpenAI(process.env.OPENAI_API_KEY);
6 const completion = openAI.createCompletion({
7 model: "text-davinci-003",
8 prompt: prompt,

demoOpenAIGPTSummarymain.tsx2 matches

@zzz•Updated 1 year ago
2import { runVal } from "https://esm.town/v/std/runVal";
3
4export let demoOpenAIGPTSummary = await runVal(
5 "zzz.OpenAISummary",
6 confession,
7 {

gpt3main.tsx4 matches

@yuval_dikerman•Updated 1 year ago
1import { fetch } from "https://esm.town/v/std/fetch";
2
3export let gpt3 = async (prompt: string, openAiApiKey: string): Promise<string> => {
4 if (!prompt || !openAiApiKey) {
5 let cat = await fetch("https://catfact.ninja/fact");
6 let { fact } = await cat.json();
9 );
10 }
11 const content = await fetch("https://api.openai.com/v1/chat/completions", {
12 method: "POST",
13 body: JSON.stringify({
17 }),
18 headers: {
19 "Authorization": `Bearer ${openAiApiKey}`,
20 "Content-Type": "application/json",
21 },

streamingTestmain.tsx3 matches

@jacoblee93•Updated 1 year ago
2
3export const streamingTest = (async () => {
4 const { OpenAI } = await import("https://esm.sh/langchain/llms/openai");
5 // To enable streaming, we pass in `streaming: true` to the LLM constructor.
6 // Additionally, we pass in a handler for the `handleLLMNewToken` event.
7 const chat = new OpenAI({
8 maxTokens: 25,
9 streaming: true,
10 openAIApiKey: process.env.OPENAI_API_KEY,
11 });
12 const response = await chat.call("Tell me a joke.", undefined, [

testOpenAI1 file match

@stevekrouse•Updated 1 day ago

testOpenAI1 file match

@shouser•Updated 4 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",