weatherDashboardREADME.md1 match
1213Frontend: React 18, TailwindCSS
14APIs: Open-Meteo, OpenAI GPT-4o
15Hosting: ESM-based module imports
360361export default async function server(request: Request): Promise<Response> {
362const { OpenAI } = await import("https://esm.town/v/std/openai");
363364// Enhanced server-side logging
375}
376377const openai = new OpenAI();
378379const url = new URL(request.url);
421422// Call AgentA to create the tool definition
423const agentACompletion = await openai.chat.completions.create({
424model: "gpt-4o-mini",
425response_format: { type: "json_object" },
470`;
471472const agentBCompletion = await openai.chat.completions.create({
473model: "gpt-4o-mini",
474response_format: { type: "json_object" },
519});
520521// Use AgentB (a separate OpenAI instance) to process the tool request
522const agentBPrompt = `
523You are AgentB, a specialized tool agent designed to process specific information requests.
538`;
539540const agentBCompletion = await openai.chat.completions.create({
541model: "gpt-4o-mini",
542response_format: { type: "json_object" },
582});
583584// Fallback response if OpenAI fails
585let agentAResponse: AgentResponse = {
586actionType: "direct_response",
627`;
628629const agentACompletion = await openai.chat.completions.create({
630model: "gpt-4o-mini",
631response_format: { type: "json_object" },
248249export default async function server(request: Request): Promise<Response> {
250const { OpenAI } = await import("https://esm.town/v/std/openai");
251const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
252276};
277278const openai = new OpenAI();
279const KEY = extractKey(new URL(import.meta.url));
280const SCHEMA_VERSION = 3; // Increment schema version for tool support
531532// Call AgentA to create the tool definition
533const agentACompletion = await openai.chat.completions.create({
534model: "gpt-4o-mini",
535response_format: { type: "json_object" },
596`;
597598const agentBCompletion = await openai.chat.completions.create({
599model: "gpt-4o-mini",
600response_format: { type: "json_object" },
710}
711712// Use AgentB (a separate OpenAI instance) to process the tool request
713const agentBPrompt = `
714You are AgentB, a specialized tool agent designed to process specific information requests.
729`;
730731const agentBCompletion = await openai.chat.completions.create({
732model: "gpt-4o-mini",
733response_format: { type: "json_object" },
794});
795796// Fallback response if OpenAI fails
797let agentAResponse: AgentResponse = {
798actionType: "direct_response",
841`;
842843const agentACompletion = await openai.chat.completions.create({
844model: "gpt-4o-mini",
845response_format: { type: "json_object" },
6566export default async function server(request: Request): Promise<Response> {
67const { OpenAI } = await import("https://esm.town/v/std/openai");
68const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
6970const openai = new OpenAI();
71const url = new URL(request.url);
72const path = url.pathname.split("/").filter(Boolean);
128`;
129130const completion = await openai.chat.completions.create({
131model: "gpt-4o-mini",
132messages: [{ role: "user", content: componentPrompt }],
blah_migratedmain.tsx1 match
6"description": "A sample blah manifest demonstrating various tool types and configurations.",
7"env": {
8"OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
9},
10"tools": [
talk_about_conspiracymain.tsx2 matches
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
38try {
9const { text: fact } = await generateText({
10model: openai("gpt-4o-mini"),
11system: "You are an expert in conspiracy.",
12prompt: `Provide an interesting conspiracy for fun`,
1# askSMHI
2Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish_Meteorological_and_Hydrological_Institute) api
34The API is instructed to use the current time in Europe/Stockholm timezone.
8## Relevant API documentation
9* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
10* [OPEN AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=chat&lang=javascript)
1112## How to use this endpoint
4142## Enviroment variables
43* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
44## Packages used
45* openai: For typesafe API request and responses
46* valibot: for describing the SMHI API response and function API input
47* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
3import { offset, removeOffset } from "npm:@formkit/tempo";
4import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
5import { OpenAI } from "npm:openai";
6import * as v from "npm:valibot";
7import { openAIModeration } from "./gpt/moderation";
8import { getWeatherAtCoordinate } from "./smhi/forecast/service";
9import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
18return { error: "Too many tokens in question" };
19}
20const { flagged } = await openAIModeration([question]);
21if (flagged) {
22return { error: "Be nice in your question" };
42}[],
43};
44const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
4546const completion = await openai.chat.completions.create({
47model: completionOptions.model,
48store: completionOptions.store,
130}]
131: [];
132const formattedFunctionResponseData = await openai.chat.completions.create({
133model: completionOptions.model,
134store: completionOptions.store,
askSMHImoderation6 matches
1import { fetch } from "https://esm.town/v/std/fetch";
2import process from "node:process";
3import { OpenAI } from "npm:openai";
45export async function openAIModeration(publicMessages: string[]) {
6const openAIKey = process.env.OPENAI_CHAT;
7const headers = {
8"Content-Type": "application/json",
9Authorization: `Bearer ${openAIKey}`,
10};
11const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
12const responseData = await openai.moderations.create({ input: publicMessages });
13return { flagged: responseData.results.some((r) => r.flagged) };
14}
researchAgentemailHandler2 matches
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
56function pm(...lines: string[]): string {
16);
1718const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19const response = await client.chat.completions.create({
20model: "sonar",