Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=openai&page=136&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=openai

Returns an array of strings in format "username" or "username/projectName"

Found 1602 results for "openai"(1416ms)

weatherGPTREADME.md1 match

@liaolile•Updated 1 year ago
1If you fork this, you'll need to set `OPENAI_API_KEY` in your [Val Town Secrets](https://www.val.town/settings/secrets).
2
3

gpt4Examplemain.tsx4 matches

@kyutarou•Updated 1 year ago
1import { OpenAI } from "npm:openai";
2Deno.env.get("OPENAI_API_KEY");
3
4const openai = new OpenAI();
5let chatCompletion = await openai.chat.completions.create({
6 messages: [{
7 role: "user",

gpt4ExampleREADME.md1 match

@kyutarou•Updated 1 year ago
3This uses the brand new `gpt-4-1106-preview`.
4
5To use this, set `OPENAI_API_KEY` in your [Val Town Secrets](https://www.val.town/settings/secrets).

get_weather_messagemain.tsx1 match

@cosmo•Updated 1 year ago
1import { chat } from "https://esm.town/v/cosmo/chat_openai";
2import { getCurrentWeather } from "https://esm.town/v/cosmo/get_current_weather";
3

chat_openaimain.tsx3 matches

@cosmo•Updated 1 year ago
1const { default: OpenAI } = await import("npm:openai");
2
3export async function chat(apiKey, messages) {
4 const openai = new OpenAI({ apiKey });
5
6 return openai.chat.completions.create({
7 messages,
8 model: "gpt-3.5-turbo",

draftReadmemain.tsx10 matches

@nbbaier•Updated 1 year ago
1import { fetch } from "https://esm.town/v/std/fetch?v=4";
2import OpenAI, { type ClientOptions } from "npm:openai";
3
4export interface WriterOptions extends ClientOptions {
40}
41
42async function performOpenAICall(prompt: string, model: string, openaiOptions: ClientOptions) {
43 const openai = new OpenAI(openaiOptions);
44
45 try {
46 const response = await openai.chat.completions.create({
47 messages: [{ role: "system", content: prompt }],
48 model: model,
50
51 if (!response.choices || response.choices.length === 0) {
52 throw new Error("No response from OpenAI");
53 }
54
56
57 if (!readme) {
58 throw new Error("No readme returned by OpenAI. Try again.");
59 }
60
83
84async function draftReadme(options: WriterOptions) {
85 const { username, valName, model = "gpt-3.5-turbo", userPrompt, ...openaiOptions } = options;
86 const { id, code } = await getVal(username, valName);
87 const prompt = createPrompt(code, userPrompt);
88 const readme = await performOpenAICall(prompt, model, openaiOptions);
89 return readme;
90}
91
92async function writeReadme(options: WriterOptions) {
93 const { username, valName, model = "gpt-3.5-turbo", userPrompt, ...openaiOptions } = options;
94 const { id, code } = await getVal(username, valName);
95 const prompt = createPrompt(code, userPrompt);
96 const readme = await performOpenAICall(prompt, model, openaiOptions);
97 try {
98 const update = await updateReadme(id, readme);

WriterOptionsmain.tsx1 match

@nbbaier•Updated 1 year ago
1import { type ClientOptions } from "npm:openai";
2
3export interface WriterOptions extends ClientOptions {

draftReadmeREADME.md3 matches

@nbbaier•Updated 1 year ago
1# Code Documentation Assistant
2
3The Code Documentation Assistant is an AI-powered tool that helps generate documentation for code. It uses the OpenAI GPT-3.5 Turbo model to generate readme files in GitHub-flavored markdown based on the provided code.
4
5## Usage
24 - `username` (string): The username of the code owner.
25 - `valName` (string): The name of the Val containing the code.
26 - `model` (optional, default: "gpt-3.5-turbo"): The OpenAI model to use for generating the readme.
27 - `userPrompt` (optional): Additional prompt to include in the documentation.
28
44 - `username` (string): The username of the code owner.
45 - `valName` (string): The name of the Val containing the code.
46 - `model` (optional, default: "gpt-3.5-turbo"): The OpenAI model to use for generating the readme.
47 - `userPrompt` (optional): Additional prompt to include in the documentation.
48

readmeGPTmain.tsx9 matches

@nbbaier•Updated 1 year ago
1import { type WriterOptions } from "https://esm.town/v/nbbaier/WriterOptions";
2import { fetch } from "https://esm.town/v/std/fetch?v=4";
3import OpenAI, { type ClientOptions } from "npm:openai";
4
5export class ReadmeWriter {
6 model: string;
7 openai: OpenAI;
8 apiKey: string;
9 valtownKey: string;
10
11 constructor(options: WriterOptions) {
12 const { model, ...openaiOptions } = options;
13 this.model = model ? model : "gpt-3.5-turbo";
14 this.openai = new OpenAI(openaiOptions);
15 this.valtownKey = Deno.env.get("valtown");
16 }
47 }
48
49 private async performOpenAICall(prompt: string) {
50 try {
51 const response = await this.openai.chat.completions.create({
52 messages: [{ role: "system", content: prompt }],
53 model: this.model,
55
56 if (!response.choices || response.choices.length === 0) {
57 throw new Error("No response from OpenAI");
58 }
59
61
62 if (!readme) {
63 throw new Error("No readme returned by OpenAI. Try again.");
64 }
65
92 const { id, code } = await this.getVal(username, valName);
93 const prompt = this.createPrompt(code, userPrompt);
94 const readme = await this.performOpenAICall(prompt);
95 return { id, readme };
96 }

sqliteWritermain.tsx11 matches

@nbbaier•Updated 1 year ago
1import { type WriterOptions } from "https://esm.town/v/nbbaier/WriterOptions";
2import { sqlite } from "https://esm.town/v/std/sqlite";
3import OpenAI from "npm:openai";
4
5interface QueryWriterOptons extends WriterOptions {
11 model: string;
12 apiKey: string;
13 openai: OpenAI;
14
15 constructor(options: QueryWriterOptons) {
16 const { table, model, ...openaiOptions } = options;
17 this.table = table;
18 this.model = model;
19 // this.apiKey = openaiOptions.apiKey ? openaiOptions.apiKey : Deno.env.get("OPENAI_API_KEY");
20 this.openai = new OpenAI(openaiOptions);
21 }
22
54
55 try {
56 const response = await this.openai.chat.completions.create({
57 messages: [{ role: "system", content: prompt }],
58 model: this.model,
60
61 if (!response.choices || response.choices.length === 0) {
62 throw new Error("No response from OpenAI");
63 }
64
66
67 if (!query) {
68 throw new Error("No SQL returned from OpenAI. Try again.");
69 }
70
80
81 try {
82 const response = await this.openai.chat.completions.create({
83 messages: [{ role: "system", content: prompt }],
84 model: this.model,
86
87 if (!response.choices || response.choices.length === 0) {
88 throw new Error("No response from OpenAI");
89 }
90
92
93 if (!query) {
94 throw new Error("No SQL returned from OpenAI. Try again.");
95 }
96

translateToEnglishWithOpenAI1 file match

@shlmt•Updated 21 hours ago

testOpenAI1 file match

@stevekrouse•Updated 2 days ago
lost1991
import { OpenAI } from "https://esm.town/v/std/openai"; export default async function(req: Request): Promise<Response> { if (req.method === "OPTIONS") { return new Response(null, { headers: { "Access-Control-Allow-Origin": "*",