2
3export const langchainEx = (async () => {
4 const { OpenAI } = await import("https://esm.sh/langchain/llms/openai");
5 const { PromptTemplate } = await import("https://esm.sh/langchain/prompts");
6 const { LLMChain } = await import("https://esm.sh/langchain/chains");
7 const model = new OpenAI({
8 temperature: 0.9,
9 openAIApiKey: process.env.openai,
10 maxTokens: 100,
11 });
2
3export const conversationalRetrievalQAChainSummaryMemory = (async () => {
4 const { ChatOpenAI } = await import(
5 "https://esm.sh/langchain/chat_models/openai"
6 );
7 const { OpenAIEmbeddings } = await import(
8 "https://esm.sh/langchain/embeddings/openai"
9 );
10 const { ConversationSummaryMemory } = await import(
17 "https://esm.sh/langchain/chains"
18 );
19 const chatModel = new ChatOpenAI({
20 openAIApiKey: process.env.OPENAI_API_KEY,
21 });
22 /* Create the vectorstore */
28 ],
29 [{ id: 2 }, { id: 1 }, { id: 3 }],
30 new OpenAIEmbeddings({
31 openAIApiKey: process.env.OPENAI_API_KEY,
32 }),
33 );
5
6// Create a summary from a given text using GPT 4
7export const OpenAISummary = async (text: string, config: {
8 apiKey?: string;
9 jwt?: string;
14 const { success, reset } = await upstashRateLimiter(
15 "anon",
16 "@zzz.OpenAISummary",
17 2,
18 "60 s",
31 }
32 const agent = await AIAgent(
33 apiKey || process.env.OPENAI_API_KEY_GPT4,
34 );
35 const response = await agent.summarize(text, modelName);
3export async function getModelBuilder(spec: {
4 type?: "llm" | "chat" | "embedding";
5 provider?: "openai" | "huggingface";
6} = { type: "llm", provider: "openai" }, options?: any) {
7 const { extend, cond, matches, invoke } = await import("npm:lodash-es");
8 // Set up LangSmith tracer
17 // Set up API key for each providers
18 const args = extend({ callbacks }, options);
19 if (spec?.provider === "openai")
20 args.openAIApiKey = process.env.OPENAI;
21 else if (spec?.provider === "huggingface")
22 args.apiKey = process.env.HUGGINGFACE;
24 const setup = cond([
25 [
26 matches({ type: "llm", provider: "openai" }),
27 async () => {
28 const { OpenAI } = await import("npm:langchain/llms/openai");
29 return new OpenAI(args);
30 },
31 ],
32 [
33 matches({ type: "chat", provider: "openai" }),
34 async () => {
35 const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
36 return new ChatOpenAI(args);
37 },
38 ],
39 [
40 matches({ type: "embedding", provider: "openai" }),
41 async () => {
42 const { OpenAIEmbeddings } = await import(
43 "npm:langchain/embeddings/openai"
44 );
45 return new OpenAIEmbeddings(args);
46 },
47 ],
3
4export const elevenlabsTTS = async (req, res) => {
5 // https://platform.openai.com/docs/api-reference/images/create
6 // https://ale_annini-elevenlabstts.express.val.run/?args=[%22{\%22text\%22:\%22it%20beautiful\%22}%22]
7 const payload = {
4 let repsonse_obj = await chatGPT(
5 "hello assistant",
6 [], // this can be an empty list, or if you're using this to continue a conversation, you can pass in someting of the form: https://platform.openai.com/docs/guides/chat/introduction
7 API_KEY
8 );
4 const cheerio = await import("npm:cheerio");
5 const html = await fetchText(
6 "https://en.wikipedia.org/wiki/OpenAI",
7 );
8 const $ = cheerio.load(html);
1import { trackOpenAiFreeUsage } from "https://esm.town/v/patrickjm/trackOpenAiFreeUsage";
2import { openAiTextCompletion } from "https://esm.town/v/patrickjm/openAiTextCompletion";
3import { openAiModeration } from "https://esm.town/v/patrickjm/openAiModeration";
4import { openAiFreeQuotaExceeded } from "https://esm.town/v/patrickjm/openAiFreeQuotaExceeded";
5import { openAiFreeUsageConfig } from "https://esm.town/v/patrickjm/openAiFreeUsageConfig";
6
7/**
8 * OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
9 *
10 * val.town has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
11 * To see if the quota has been met, you can run @patrickjm.openAiFreeQuotaExceeded()
12 *
13 * For full REST API access, see @patrickjm.openAiTextCompletion
14 */
15export let gpt3 = async (params: {
16 openAiKey?: string,
17 prompt: string,
18 maxTokens?: number,
21
22 // Determine whether to use provided apiKey or free usage apiKey based on daily quota.
23 const apiKey = params.openAiKey ?? openAiFreeUsageConfig.key;
24 const exceeded = await openAiFreeQuotaExceeded();
25 if (!params.openAiKey && exceeded) {
26 throw new Error(openAiFreeUsageConfig.quota_error);
27 }
28
29 // If using free token, first check inputs against moderation api
30 if (!params.openAiKey) {
31 const moderation = await openAiModeration({
32 apiKey,
33 input: params.prompt,
35 if (moderation.results.some((r) => r.flagged)) {
36 throw new Error(
37 "Sorry, this prompt was flagged by OpenAI moderation. If you provide your own API key, moderation will be turned off."
38 );
39 }
41
42 // Call completion API
43 const response = await openAiTextCompletion({
44 apiKey: apiKey,
45 prompt: params.prompt,
50 // If using free token, track usage against the quota.
51 try {
52 if (!params.openAiKey) {
53 await trackOpenAiFreeUsage(MODEL, response.usage.total_tokens);
54 }
55 } catch (e) {}
3
4/**
5 * OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
6 *
7 * val.town and rlimit.com has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
9 query: string;
10}): Promise<number[]> =>
11 fetchJSON("https://api.openai.com/v1/embeddings", {
12 method: "POST",
13 headers: {