38
39 // Proxy the request
40 const url = new URL("." + pathname, "https://api.openai.com");
41 url.search = search;
42
43 const headers = new Headers(req.headers);
44 headers.set("Host", url.hostname);
45 headers.set("Authorization", `Bearer ${Deno.env.get("OPENAI_API_KEY")}`);
46 headers.set("OpenAI-Organization", Deno.env.get("OPENAI_API_ORG"));
47
48 const openAIRes = await fetch(url, {
49 method: req.method,
50 headers,
53 });
54
55 const res = new Response(openAIRes.body, openAIRes);
56
57 // Remove internal header
58 res.headers.delete("openai-organization");
59
60 return res;
1# OpenAI Proxy
2
3This OpenAI API proxy injects Val Town's API keys. For usage documentation, check out https://www.val.town/v/std/openai
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 readonly beta = {
34 get chat(): RawOpenAI["beta"]["chat"] {
35 return this.rawOpenAIClient.beta.chat;
36 },
37 };
1# OpenAI - [Docs ↗](https://docs.val.town/std/openai)
2
3Use OpenAI's chat completion API with [`std/openai`](https://www.val.town/v/std/openai). This integration enables access to OpenAI's language models without needing to acquire API keys.
4
5Streaming is not yet supported. Upvote the [HTTP response streaming feature request](https://github.com/val-town/val-town-product/discussions/14) if you need it!
8
9```ts title="Example" val
10import { OpenAI } from "https://esm.town/v/std/openai";
11
12const openai = new OpenAI();
13
14const completion = await openai.chat.completions.create({
15 messages: [
16 { role: "user", content: "Say hello in a creative way" },
25## Limits
26
27While our wrapper simplifies the integration of OpenAI, there are a few limitations to keep in mind:
28
29* **Usage Quota**: We limit each user to 10 requests per minute.
32If these limits are too low, let us know! You can also get around the limitation by using your own keys:
33
341. Create your own API key on [OpenAI's website](https://platform.openai.com/api-keys)
352. Create an [environment variable](https://www.val.town/settings/environment-variables?adding=true) named `OPENAI_API_KEY`
363. Use the `OpenAI` client from `npm:openai`:
37
38```ts title="Example" val
39import { OpenAI } from "npm:openai";
40
41const openai = new OpenAI();
42```
43
44
45[📝 Edit docs](https://github.com/val-town/val-town-docs/edit/main/src/content/docs/std/openai.mdx)
2import cronstrue from "https://esm.sh/cronstrue";
3import React, { useState } from "https://esm.sh/react@18.2.0";
4import { chat } from "https://esm.town/v/stevekrouse/openai?v=19";
5import react_http from "https://esm.town/v/stevekrouse/react_http?v=6";
6
8* Hono JSX
9* HTML (probably overcomplicates things; should remove)
10* @stevekrouse/openai, which is a light wrapper around @std/openai
11
12## TODO
1/** @jsxImportSource npm:hono@3/jsx */
2import { chat } from "https://esm.town/v/stevekrouse/openai";
3import cronstrue from "npm:cronstrue";
4import { Hono } from "npm:hono@3";
8* Hono JSX
9* HTML (probably overcomplicates things; should remove)
10* @stevekrouse/openai, which is a light wrapper around @std/openai
11
12Migrated from folder: Archive/cron_client_side_script_fork
2import { html } from "npm:hono@3/html";
3import { cors } from 'npm:hono/cors';
4import { OpenAI } from "npm:openai";
5
6const app = new Hono();
7const openai = new OpenAI(Deno.env.get("OPENAI_API_KEY_VOICE"));
8
9class TranscriptionService {
10 async transcribeAudio(audioFile) {
11 try {
12 const transcription = await openai.audio.transcriptions.create({
13 file: audioFile,
14 model: "whisper-1",
18 return transcription;
19 } catch (error) {
20 console.error('OpenAI API error:', error);
21 throw error;
22 }
350
351 try {
352 const response = await openai.chat.completions.create({
353 model: "gpt-3.5-turbo",
354 messages: [
368 return c.text(translation);
369 } catch (error) {
370 console.error('OpenAI API error:', error);
371 return c.text('Error occurred during translation', 500);
372 }
383
384 try {
385 const mp3 = await openai.audio.speech.create({
386 model: "tts-1",
387 voice: voice,
395 });
396 } catch (error) {
397 console.error('OpenAI API error:', error);
398 return c.text('Error occurred during speech generation', 500);
399 }
3The app is set up so you can easily have a conversation between two people. The app will translate between the two selected languages, in each voice, as the speakers talk.
4
5Add your OpenAI API Key, and make sure to open in a separate window for Mic to work.
6
7Migrated from folder: Projects/VoiceTranslator/translator