1# OpenAI Proxy Metrics
2
3We write openAI usage data to a `openai_usage` sqlite table. This script val is imported into the openai proxy. Use this val to run administrative scripts: https://www.val.town/v/std/OpenAIUsageScript
4
5Migrated from folder: openai/OpenAIUsage
109 <path stroke-linecap="round" stroke-linejoin="round" d="M20.25 8.511c.884.284 1.5 1.128 1.5 2.097v4.286c0 1.136-.847 2.1-1.98 2.193-.34.027-.68.052-1.02.072v3.091l-3-3c-1.354 0-2.694-.055-4.02-.163a2.115 2.115 0 0 1-.825-.242m9.345-8.334a2.126 2.126 0 0 0-.476-.095 48.64 48.64 0 0 0-8.048 0c-1.131.094-1.976 1.057-1.976 2.192v4.286c0 .837.46 1.58 1.155 1.951m9.345-8.334V6.637c0-1.621-1.152-3.026-2.76-3.235A48.455 48.455 0 0 0 11.25 3c-2.115 0-4.198.137-6.24.402-1.608.209-2.76 1.614-2.76 3.235v6.226c0 1.621 1.152 3.026 2.76 3.235.577.075 1.157.14 1.74.194V21l4.155-4.155"></path>
110 </svg>
111 <div class="font-semibold group-hover:text-black">OpenAI chat completion</div>
112 </button>
113 <button class="group bg-gray-50 border-gray-300 flex items-center gap-2 overflow-hidden rounded border p-3 text-left transition-colors hover:border-blue-500 hover:shadow-md">
106 // checkManualProfanityList is optional and defaults to false; it checks for the words in lang.ts (if under 50 words) before hitting the AI model. Note that this affects performance.
107 checkManualProfanityList: false,
108 // provider defaults to "google-perspective-api" (Google's Perspective API); it can also be "openai" (OpenAI Moderation API) or "google-natural-language-api" (Google's Natural Language API)
109 provider: "google-perspective-api",
110 };
107 // checkManualProfanityList is optional and defaults to false; it checks for the words in lang.ts (if under 50 words) before hitting the AI model. Note that this affects performance.
108 checkManualProfanityList: false,
109 // provider defaults to "google-perspective-api" (Google's Perspective API); it can also be "openai" (OpenAI Moderation API) or "google-natural-language-api" (Google's Natural Language API)
110 provider: "google-perspective-api",
111 };
1import { embed, embedMany } from "npm:ai";
2import { openai } from "npm:@ai-sdk/openai";
3import lunr from "https://cdn.skypack.dev/lunr";
4
8 console.log(`Getting embedding for: ${text}`);
9 const { embedding } = await embed({
10 model: openai.embedding('text-embedding-3-small'),
11 value: text,
12 });
19 console.log(`Getting embeddings for texts: ${texts}`);
20 const { embeddings } = await embedMany({
21 model: openai.embedding('text-embedding-3-small'),
22 values: texts,
23 });
1In-memory semantic search; load it up with valtown KV.
2
3This is a "dumb" version of vector search, for prototyping RAG responses and UIs — with both regular search (w/ Lunr) and vector search (with OpenAI embeddings + cosine similarity)
4
5Usage:
1This is an example of in-memory search, using a combination of lunr, OpenAI embeddings, and cosine similarity
2
3Migrated from folder: Libraries/SemanticSearch/embeddingsSearchExample
57 const characterModel = new ModelProvider({
58 id: `${characterName}`,
59 // for web search / tool use, use openai
60 provider: 'openai',
61 model: 'gpt-4o',
62
3import { modifyImage } from "https://esm.town/v/stevekrouse/modifyImage";
4import { Hono } from "npm:hono@3";
5import { OpenAI } from "npm:openai";
6
7const openai = new OpenAI();
8
9function esmTown(url) {
95 const dataURL = await fileToDataURL(file);
96 try {
97 const response = await openai.chat.completions.create({
98 messages: [
99 {
6
7```
8<a href="https://www.val.town/settings/environment-variables?name=OpenAI&value=sk-123...">
9 Add OpenAI key to Val Town
10</a>
11```