1# OpenAI ChatGPT helper function
2
3This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.
4
5```ts
6import { chat } from "https://esm.town/v/stevekrouse/openai";
7
8const { content } = await chat("Hello, GPT!");
11
12```ts
13import { chat } from "https://esm.town/v/stevekrouse/openai";
14
15const { content } = await chat(
1import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
2
3async function getOpenAI() {
4 // if you don't have a key, use our std library version
5 if (Deno.env.get("OPENAI_API_KEY") === undefined) {
6 const { OpenAI } = await import("https://esm.town/v/std/openai");
7 return new OpenAI();
8 } else {
9 const { OpenAI } = await import("npm:openai");
10 return new OpenAI();
11 }
12}
13
14/**
15 * Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
16 * This function can handle both single string inputs and arrays of message objects.
17 * It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
25 options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
26): Promise<ChatCompletion & { content: string }> {
27 const openai = await getOpenAI();
28 const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
29 const createParams: ChatCompletionCreateParamsNonStreaming = {
33 messages,
34 };
35 const completion = await openai.chat.completions.create(createParams);
36
37 return { ...completion, content: completion.choices[0].message.content };
1import type { ChatCompletion, ChatCompletionCreateParamsNonStreaming, Message } from "npm:@types/openai";
2
3async function getOpenAI() {
4 // if you don't have a key, use our std library version
5 if (Deno.env.get("OPENAI_API_KEY") === @std/openai) {
6 const { OpenAI } = await import("https://esm.town/v/std/openai");
7 return new OpenAI();
8 } else {
9 const { OpenAI } = await import("npm:openai");
10 return new OpenAI();
11 }
12}
13
14/**
15 * Initiates a chat conversation with OpenAI's GPT model and retrieves the content of the first response.
16 * This function can handle both single string inputs and arrays of message objects.
17 * It supports various GPT models, allowing for flexibility in choosing the model based on the application's needs.
25 options?: Omit<ChatCompletionCreateParamsNonStreaming, "messages">,
26): Promise<ChatCompletion & { content: string }> {
27 const openai = await getOpenAI();
28 const messages = Array.isArray(input) ? input : [{ role: "user", content: input }];
29 const createParams: ChatCompletionCreateParamsNonStreaming = {
33 messages,
34 };
35 const completion = await openai.chat.completions.create(createParams);
36
37 return { ...completion, content: completion.choices[0].message.content };
1# OpenAI ChatGPT helper function
2
3This val uses your OpenAI token if you have one, and the @std/openai if not, so it provides limited OpenAI usage for free.
4
5```ts
6import { chat } from "https://esm.town/v/stevekrouse/openai";
7
8const { content } = await chat("Hello, GPT!");
11
12```ts
13import { chat } from "https://esm.town/v/stevekrouse/openai";
14
15const { content } = await chat(
1# ChatGPT Implemented in Val Town
2
3Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
4const openai = new OpenAI();
5import { Hono } from "npm:hono@3";
6
47const app = new Hono();
48app.get("/", async (c) => {
49 const thread = await openai.beta.threads.create();
50 const assistant = await openai.beta.assistants.create({
51 name: "",
52 instructions:
106 const message = c.req.query("message");
107
108 await openai.beta.threads.messages.create(
109 threadId,
110 { role: "user", content: message },
118 ));
119 };
120 const run = openai.beta.threads.runs.stream(threadId, {
121 assistant_id: assistantId,
122 // Make sure we only display messages we haven't seen yet.
1import { OpenAI } from "https://esm.town/v/std/openai";
2export default async function(req: Request): Promise<Response> {
3 const openai = new OpenAI();
4 const stream = await openai.chat.completions.create({
5 stream: true,
6 messages: [{ role: "user", content: "Write me a haiku about Deno" }],
1Migrated from folder: OpenAI/OpenAIStreaming
1Fork it and authenticate with your Val Town API token as the password. Needs an `OPENAI_API_KEY` env var to be set, and change the variables under "Set these to your own".
2
3https://x.com/JanPaul123/status/1812957150559211918
1import { zip } from "https://esm.town/v/pomdtr/sql";
2import { db } from "https://esm.town/v/sqlite/db";
3import OpenAI from "npm:openai";
4
5async function getVals(username, type, limit) {
22
23async function checkHTTPPreviewUpgrade(code) {
24 const openai = new OpenAI();
25 const completion = await openai.chat.completions.create({
26 messages: [
27 {