189export default async function server(request: Request) {
190 const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
191 const { OpenAI } = await import("https://esm.town/v/std/openai");
192 const KEY = "groundedWhiteFowl";
193 const SCHEMA_VERSION = 7; // Incrementing schema version for tracking
344 }
345
346 const openai = new OpenAI();
347
348 // Ensure messages are in the correct format for OpenAI
349 const formattedMessages = [
350 {
359 ];
360
361 const completion = await openai.chat.completions.create({
362 messages: formattedMessages,
363 model: "gpt-4o-mini",
6* Fork this val to your own profile.
7* Make a folder for the temporary vals that get generated, take the ID from the URL, and put it in `tempValsParentFolderId`.
8* If you want to use OpenAI models you need to set the `OPENAI_API_KEY` [env var](https://www.val.town/settings/environment-variables).
9* If you want to use Anthropic models you need to set the `ANTHROPIC_API_KEY` [env var](https://www.val.town/settings/environment-variables).
10* Create a [Val Town API token](https://www.val.town/settings/api), open the browser preview of this val, and use the API token as the password to log in.
184export default async function server(request: Request): Promise<Response> {
185 if (request.method === 'POST') {
186 const { OpenAI } = await import("https://esm.town/v/std/openai");
187 const openai = new OpenAI();
188
189 const formData = await request.formData();
196 const resumeText = await resumeFile.text();
197
198 const completion = await openai.chat.completions.create({
199 messages: [
200 {
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function emailValHandlerExtra(inboundEmail: Email) {
33 );
34
35 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
36 const response = await client.chat.completions.create({
37 model: "sonar",
2import { parseProject } from "https://esm.town/v/std/parseImportMeta/project";
3import { contentType } from "npm:mime-types@2.1.35";
4import openAI from "npm:openai";
5
6console.log("will this show up");
1import { blob } from "https://esm.town/v/std/blob";
2import { OpenAI } from "https://esm.town/v/std/openai";
3import Algebrite from "npm:algebrite";
4import { servePublicFile } from "./index.ts";
138 */
139async function handleProblemGeneration() {
140 const openai = new OpenAI();
141 const completion = await openai.chat.completions.create({
142 model: "gpt-4",
143 messages: [
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
5
6function pm(...lines: string[]): string {
16 );
17
18 const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19 const response = await client.chat.completions.create({
20 model: "sonar",
148export default async function server(request: Request): Promise<Response> {
149 if (request.method === 'POST' && new URL(request.url).pathname === '/ai-response') {
150 const { OpenAI } = await import("https://esm.town/v/std/openai");
151 const openai = new OpenAI();
152
153 const body = await request.json();
154 const conversationHistory = body.conversation || [];
155
156 const completion = await openai.chat.completions.create({
157 messages: [
158 {
1import { type ClientOptions, OpenAI as RawOpenAI } from "npm:openai";
2
3/**
4 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
5 */
6export class OpenAI {
7 private rawOpenAIClient: RawOpenAI;
8
9 /**
10 * API Client for interfacing with the OpenAI API. Uses Val Town credentials.
11 *
12 * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
19 */
20 constructor(options: Omit<ClientOptions, "baseURL" | "apiKey" | "organization"> = {}) {
21 this.rawOpenAIClient = new RawOpenAI({
22 ...options,
23 baseURL: "https://std-openaiproxy.web.val.run/v1",
24 apiKey: Deno.env.get("valtown"),
25 organization: null,
28
29 get chat() {
30 return this.rawOpenAIClient.chat;
31 }
32
33 readonly beta = {
34 get chat(): RawOpenAI["beta"]["chat"] {
35 return this.rawOpenAIClient.beta.chat;
36 },
37 };