getModelBuildermain.tsx14 matches
3export async function getModelBuilder(spec: {
4type?: "llm" | "chat" | "embedding";
5provider?: "openai" | "huggingface";
6} = { type: "llm", provider: "openai" }, options?: any) {
7const { extend, cond, matches, invoke } = await import("npm:lodash-es");
8// Set up LangSmith tracer
17// Set up API key for each providers
18const args = extend({ callbacks }, options);
19if (spec?.provider === "openai")
20args.openAIApiKey = process.env.OPENAI;
21else if (spec?.provider === "huggingface")
22args.apiKey = process.env.HUGGINGFACE;
24const setup = cond([
25[
26matches({ type: "llm", provider: "openai" }),
27async () => {
28const { OpenAI } = await import("npm:langchain/llms/openai");
29return new OpenAI(args);
30},
31],
32[
33matches({ type: "chat", provider: "openai" }),
34async () => {
35const { ChatOpenAI } = await import("npm:langchain/chat_models/openai");
36return new ChatOpenAI(args);
37},
38],
39[
40matches({ type: "embedding", provider: "openai" }),
41async () => {
42const { OpenAIEmbeddings } = await import(
43"npm:langchain/embeddings/openai"
44);
45return new OpenAIEmbeddings(args);
46},
47],
elevenlabsTTSmain.tsx1 match
34export const elevenlabsTTS = async (req, res) => {
5// https://platform.openai.com/docs/api-reference/images/create
6// https://ale_annini-elevenlabstts.express.val.run/?args=[%22{\%22text\%22:\%22it%20beautiful\%22}%22]
7const payload = {
chatGPTExamplemain.tsx1 match
4let repsonse_obj = await chatGPT(
5"hello assistant",
6[], // this can be an empty list, or if you're using this to continue a conversation, you can pass in someting of the form: https://platform.openai.com/docs/guides/chat/introduction
7API_KEY
8);
4const cheerio = await import("npm:cheerio");
5const html = await fetchText(
6"https://en.wikipedia.org/wiki/OpenAI",
7);
8const $ = cheerio.load(html);
1import { trackOpenAiFreeUsage } from "https://esm.town/v/patrickjm/trackOpenAiFreeUsage";
2import { openAiTextCompletion } from "https://esm.town/v/patrickjm/openAiTextCompletion";
3import { openAiModeration } from "https://esm.town/v/patrickjm/openAiModeration";
4import { openAiFreeQuotaExceeded } from "https://esm.town/v/patrickjm/openAiFreeQuotaExceeded";
5import { openAiFreeUsageConfig } from "https://esm.town/v/patrickjm/openAiFreeUsageConfig";
67/**
8* OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
9*
10* val.town has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
11* To see if the quota has been met, you can run @patrickjm.openAiFreeQuotaExceeded()
12*
13* For full REST API access, see @patrickjm.openAiTextCompletion
14*/
15export let gpt3 = async (params: {
16openAiKey?: string,
17prompt: string,
18maxTokens?: number,
2122// Determine whether to use provided apiKey or free usage apiKey based on daily quota.
23const apiKey = params.openAiKey ?? openAiFreeUsageConfig.key;
24const exceeded = await openAiFreeQuotaExceeded();
25if (!params.openAiKey && exceeded) {
26throw new Error(openAiFreeUsageConfig.quota_error);
27}
2829// If using free token, first check inputs against moderation api
30if (!params.openAiKey) {
31const moderation = await openAiModeration({
32apiKey,
33input: params.prompt,
35if (moderation.results.some((r) => r.flagged)) {
36throw new Error(
37"Sorry, this prompt was flagged by OpenAI moderation. If you provide your own API key, moderation will be turned off."
38);
39}
4142// Call completion API
43const response = await openAiTextCompletion({
44apiKey: apiKey,
45prompt: params.prompt,
50// If using free token, track usage against the quota.
51try {
52if (!params.openAiKey) {
53await trackOpenAiFreeUsage(MODEL, response.usage.total_tokens);
54}
55} catch (e) {}
34/**
5* OpenAI text completion. https://platform.openai.com/docs/api-reference/completions
6*
7* val.town and rlimit.com has generously provided a free daily quota. Until the quota is met, no need to provide an API key.
getOpenapiEmbeddingmain.tsx1 match
9query: string;
10}): Promise<number[]> =>
11fetchJSON("https://api.openai.com/v1/embeddings", {
12method: "POST",
13headers: {
questionsWithGuidelinesChainmain.tsx6 matches
23export const questionsWithGuidelinesChain = (async () => {
4const { ChatOpenAI } = await import(
5"https://esm.sh/langchain@0.0.150/chat_models/openai"
6);
7const { LLMChain } = await import("https://esm.sh/langchain@0.0.150/chains");
19]);
20const questionChain = questionPrompt
21.pipe(new ChatOpenAI({
22openAIApiKey: process.env.OPENAI_API_KEY,
23})
24.pipe(new StringOutputParser()));
31const styleChain = stylePrompt
32.pipe(
33new ChatOpenAI({
34openAIApiKey: process.env.OPENAI_API_KEY,
35}),
36)
15);
16const { SupabaseVectorStore } = await import("npm:langchain/vectorstores");
17const { ChatOpenAI } = await import("npm:langchain/chat_models");
18const { OpenAIEmbeddings } = await import("npm:langchain/embeddings");
19const { createClient } = await import(
20"https://esm.sh/@supabase/supabase-js@2"
25);
26let streamedResponse = "";
27const chat = new ChatOpenAI({
28modelName: "gpt-3.5-turbo",
29openAIApiKey: process.env.OPEN_API_KEY,
30streaming: true,
31callbacks: [{
35});
36const vectorStore = await SupabaseVectorStore.fromExistingIndex(
37new OpenAIEmbeddings({
38openAIApiKey: process.env.OPEN_API_KEY,
39}),
40{
generateValCodeAPImain.tsx1 match
4export let generateValCodeAPI = (description: string) =>
5generateValCode(
6process.env.VT_OPENAI_KEY,
7description,
8);