45} & Partial<Record<Month, Day>>;
4647export let openAiUsageTypes;
openaiPricingfetchOpenAiUsageData5 matches
1import { fetch } from "https://esm.town/v/std/fetch?v=4";
23const fetchOpenAiUsageData = async (today: string) => {
4const response = await fetch(
5`https://api.openai.com/v1/usage?date=${today}`,
6{
7method: "GET",
8headers: {
9Authorization: `Bearer ${Deno.env.get("OPENAI_API_KEY")}`,
10},
11},
1314if (!response.ok) {
15throw new Error("Failed to fetch openAI usage data.");
16}
1719};
2021export { fetchOpenAiUsageData };
openaiPricingcreateDayTotal1 match
2import { getImageTotals } from "./getImageTotals";
3import { getTextTotals } from "./getTextTotals";
4import { DayTotal, UsageDB } from "./openAiUsageTypes";
56const createDayTotal = async (data, whisper_api_data, dalle_api_data) => {
openaiPricingopenAiPricing2 matches
10});
1112interface OpenAiPricing {
13textPricing: {
14[key: string]: {
29}
3031export const openAiPricing: OpenAiPricing = {
32textPricing: {
33"gpt-3.5-turbo-16k-0613": createPricing(3000, 4000),
1import { createDayTotal } from "./createDayTotal";
2import { cronEvalLogger as logger } from "https://esm.town/v/nbbaier/cronLogger";
3import { fetchOpenAiUsageData } from "./fetchOpenAiUsageData";
4import { updateBlobUsageDB } from "./updateBlobUsageDB";
5import { blob } from "https://esm.town/v/std/blob?v=11";
7import { DateTime } from "npm:luxon";
89const fetchAndStoreOpenAiUsage = async (interval: Interval) => {
10const timeZone = "America/Chicago";
11const date = DateTime.now();
1516try {
17const { data, whisper_api_data, dalle_api_data } = await fetchOpenAiUsageData(today);
1819const day_total = await createDayTotal(data, whisper_api_data, dalle_api_data);
27};
2829export default logger(fetchAndStoreOpenAiUsage);
IcarusBot.cursorrules4 matches
100Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
101102### OpenAI
103```ts
104import { OpenAI } from "https://esm.town/v/std/openai";
105const openai = new OpenAI();
106const completion = await openai.chat.completions.create({
107messages: [
108{ role: "user", content: "Say hello in a creative way" },
1import { OpenAI } from "https://esm.town/v/std/openai";
23export default async function anthropicCompletion(
4prompt: string,
5) {
6const openai = new OpenAI();
78try {
9const completion = await openai.chat.completions.create({
10messages: [
11{ role: "user", content: prompt },
my-first-val04_email.tsx4 matches
34import { email } from "https://esm.town/v/std/email";
5import { OpenAI } from "https://esm.town/v/std/OpenAI";
67// ------------------------------ Email Address ------------------------------
19console.log(e);
2021// Use OpenAI provided by Val Town to reply to the email
22const openai = new OpenAI();
23let chatCompletion = await openai.chat.completions.create({
24messages: [{
25role: "user",
my-first-val03_cron.tsx5 matches
5// ---------------- Val Town Standard Library ----------------
6// Val Town provides limited free hosted services, including
7// functions for sending emails and using OpenAI
8import { email } from "https://esm.town/v/std/email";
9import { OpenAI } from "https://esm.town/v/std/OpenAI";
1011// --------------------- Get weather data --------------------
2223export default async function() {
24// Use OpenAI provided by Val Town to get weather reccomendation
25// Experiment with changing the prompt
26const openai = new OpenAI();
27let chatCompletion = await openai.chat.completions.create({
28messages: [{
29role: "user",
5* Allows users to specify hat sequence (e.g., "W,R,B,Y,G,B") or use default.
6* Simulates workflow visualization via structured text logs.
7* Uses OpenAI via @std/openai for agent responses.
8* Based on previous multi-agent simulation structures.
9*
334// --- Main Request Handler (Server Code for ThinkingFlow MVP) ---
335export default async function(req: Request) {
336// Dynamic Import of OpenAI Library
337const { OpenAI } = await import("https://esm.town/v/std/openai");
338339// --- OpenAI API Call Helper (Reused) ---
340async function callOpenAI(
341systemPrompt: string,
342userMessage: string, // Can be query or intermediate context
344): Promise<{ role: "assistant" | "system"; content: string }> {
345try {
346const openai = new OpenAI();
347348const response = await openai.chat.completions.create({
349model: model,
350messages: [
356357if (!response.choices?.[0]?.message?.content) {
358console.error("OpenAI API returned unexpected structure:", JSON.stringify(response));
359throw new Error("Received invalid or empty response from AI model.");
360}
361return { role: "assistant", content: response.choices[0].message.content };
362} catch (error) {
363console.error(`OpenAI API call failed for model ${model}. Error:`, error.message, error.response?.data);
364let errorMessage = `Error with AI model (${model}).`;
365let statusCode = error.status || error.response?.status;
366if (statusCode === 401) errorMessage = "OpenAI Auth Error (401). Check Val Town 'openai' secret.";
367else if (statusCode === 429) errorMessage = "OpenAI Rate Limit/Quota Error (429). Check OpenAI plan.";
368else if (statusCode === 400) errorMessage = `OpenAI Bad Request (400). Details: ${error.message}`;
369else if (statusCode >= 500) errorMessage = `OpenAI Server Error (${statusCode}). Try again later.`;
370else if (error.code === "ENOTFOUND" || error.code === "ECONNREFUSED")
371errorMessage = `Network Error (${error.code}). Cannot connect to OpenAI.`;
372else if (error.message.includes("secret")) errorMessage = error.message;
373else errorMessage += ` Details: ${error.message}`;
464currentPrompt = currentPrompt.replace("{{CONTEXT}}", ""); // No inter-hat context for MVP
465466const hatResponse = await callOpenAI(currentPrompt, trimmedQuery); // Pass original query as user message
467468// Log response or error
495.replace("{{COLLECTED_OUTPUTS}}", outputsText);
496497const summaryResponse = await callOpenAI(finalPrompt, "Synthesize the collected outputs.");
498499conversationLog.push({ agent: "Blue Hat", message: summaryResponse.content }); // Log summary or error