translatorREADME.md1 match
3The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.
45Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.
1import { val } from 'https://esm.town/v/std/val';
2import { OpenAI } from 'https://esm.town/v/std/openai';
34// Initialize OpenAI
5const openai = new OpenAI();
67// Type definitions for Airtable attachments
166`;
167168const completion = await openai.chat.completions.create({
169messages: [
170{
contentTemplateAppmain.tsx3 matches
282} else if (url.pathname === "/api/analyze") {
283if (req.method === 'POST') {
284const { OpenAI } = await import("https://esm.town/v/std/openai");
285const openai = new OpenAI();
286287try {
292Full Content: ${fullContent}`;
293294const completion = await openai.chat.completions.create({
295messages: [{ role: "user", content: prompt }],
296model: "gpt-4o-mini",
3import { createGoogleGenerativeAI, google } from "npm:@ai-sdk/google";
4import { createMistral, mistral } from "npm:@ai-sdk/mistral";
5import { createOpenAI } from "npm:@ai-sdk/openai";
6import { embed, generateObject, generateText, streamText } from "npm:ai";
7import { cors } from "npm:hono/cors";
33});
3435const openai = createOpenAI({
36// apiKey = Deno.env.get("OPENAI_API_KEY");
37apiKey: Deno.env.get("OPENAI_API_KEY_COVERSHEET"),
38});
3940const groq = createOpenAI({
41baseURL: "https://api.groq.com/openai/v1",
42apiKey: Deno.env.get("GROQ_API_KEY"),
43});
4445const perplexity = createOpenAI({
46apiKey: Deno.env.get("PERPLEXITY_API_KEY") ?? "",
47baseURL: "https://api.perplexity.ai/",
59this.memories = options.memories || [];
60this.messages = options.messages || [];
61this.defaultProvider = options.provider || "openai";
62this.defaultModel = options.model;
63this.defaultMaxTokens = options.maxTokens;
125let result;
126switch (provider) {
127case "openai":
128result = await this.generateOpenAIResponse({
129model,
130prompt,
240}
241242async generateOpenAIResponse(
243{
244model,
261262if (embed) {
263let result = await this.generateOpenAIEmbedding({ model, value, dimensions, user });
264// console.log('embed!', result)
265return result;
267268const options = {
269model: openai(modelId),
270system,
271temperature,
320}
321322async generateOpenAIEmbedding({ model, value, dimensions, user }) {
323const modelId = model || "text-embedding-3-large";
324const options = {
325model: openai.embedding(modelId, {
326dimensions,
327user,
586587app.get("/generate", async (c) => {
588const provider = c.req.query("provider") || "openai";
589const model = c.req.query("model");
590const prompt = c.req.query("prompt");
646console.log("post/generate", { mode: "post/generate", prompt, provider, model });
647const response = await modelProvider.gen({
648provider: provider || "openai",
649model,
650prompt,
OpenAIChatCompletionREADME.md5 matches
1213* fork this val
14* use the fork's HTTP endpoint URL (in the form "https://XXX-openaichatcompletion.web.val.run") as AI server URL
15* **define an environment variable called "OpenAIChatCompletion"** with any kind of content (but without any blanks or control characters, e.g., a [UUID](https://rozek-uuidv4_generator.web.val.run)) and use that as your personal access key
1617Now, you can ask everybody to provide their AI credentials and still use the OpenAI access provided by Val.Town for your personal tests.
1819> **Nota bene: if the environment variable "OpenAIChatCompletion" has not been defined, access to your fork's endpoint is free for everybody!**
2021In addition to the described authorization, this val also provides resource "throttling" (using val [floatingQuotaTracker](https://www.val.town/v/rozek/floatingQuotaTracker) in sqlite tables "OpenAIChatCompletion_Info" and "OpenAIChatCompletion_Log") and calculates some access statistics (using val [InvocationTracker](https://www.val.town/v/rozek/InvocationTracker) in sqlite tables "OpenAIChatCompletion_Usage_Info" and "OpenAIChatCompletion_Usage_Log")
cloneChatGPTmain.tsx3 matches
91export default async function server(request: Request): Promise<Response> {
92const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
93const { OpenAI } = await import("https://esm.town/v/std/openai");
94const KEY = "cloneChatGPT";
95const SCHEMA_VERSION = 3;
104105if (request.method === "POST") {
106const openai = new OpenAI();
107const { messages } = await request.json();
108109try {
110const completion = await openai.chat.completions.create({
111messages: messages,
112model: "gpt-4o-mini",
cerebrasTemplateREADME.md2 matches
35Use Cerebras for AI on the backend like so:
3637const { OpenAI } = await import("https://esm.sh/openai");
38const client = new OpenAI({
39apiKey: Deno.env.get("CEREBRAS_API_KEY"),
40baseURL: "https://api.cerebras.ai/v1"
cerebrasTemplatemain.tsx2 matches
83const { messages } = await request.json();
8485const { OpenAI } = await import("https://esm.sh/openai");
86// Cerebras Inference
87// Very important: do not change this
88// Keep these comments so we remember not to change this
89const client = new OpenAI({
90apiKey: Deno.env.get("CEREBRAS_API_KEY"),
91baseURL: "https://api.cerebras.ai/v1",
Priyam28Gptmain.tsx4 matches
107if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
108try {
109const { OpenAI } = await import("https://esm.town/v/std/openai");
110const openai = new OpenAI();
111112const { messages } = await request.json();
113114const completion = await openai.chat.completions.create({
115messages: messages,
116model: "gpt-4o-mini",
122});
123} catch (error) {
124console.error("OpenAI Error:", error);
125return new Response(
126JSON.stringify({
OpenTowniegenerateCode.ts3 matches
1import OpenAI from "https://esm.sh/openai";
23function parseValResponse(response: string) {
28const system = await (await fetch(`${import.meta.url.split("/").slice(0, -1).join("/")}/system_prompt.txt`)).text();
2930const openai = new OpenAI({
31baseURL: "https://openrouter.ai/api/v1",
32apiKey: Deno.env.get("OPEN_ROUTER_KEY"),
33});
34console.log(messages);
35const completion = await openai.chat.completions.create({
36model: "deepseek/deepseek-r1",
37messages: [