openaistreaminghtmlmain.tsx3 matches
1import OpenAI from "npm:openai";
23const openai = new OpenAI();
45export default async (req) => {
6try {
7const stream = await openai.chat.completions.create({
8model: "gpt-3.5-turbo",
9messages: [{
poorvis885README.md1 match
1213Frontend: React 18, TailwindCSS
14APIs: Open-Meteo, OpenAI GPT-4o
15Hosting: ESM-based module imports
poorvis885main.tsx3 matches
5354// Fetch AI-powered weather insight
55const { OpenAI } = await import("https://esm.town/v/std/openai");
56const openai = new OpenAI();
57
58const aiResponse = await openai.chat.completions.create({
59messages: [
60{
weatherDashboardmain.tsx3 matches
5354// Fetch AI-powered weather insight
55const { OpenAI } = await import("https://esm.town/v/std/openai");
56const openai = new OpenAI();
57
58const aiResponse = await openai.chat.completions.create({
59messages: [
60{
weatherDashboardREADME.md1 match
1213Frontend: React 18, TailwindCSS
14APIs: Open-Meteo, OpenAI GPT-4o
15Hosting: ESM-based module imports
360361export default async function server(request: Request): Promise<Response> {
362const { OpenAI } = await import("https://esm.town/v/std/openai");
363364// Enhanced server-side logging
375}
376377const openai = new OpenAI();
378379const url = new URL(request.url);
421422// Call AgentA to create the tool definition
423const agentACompletion = await openai.chat.completions.create({
424model: "gpt-4o-mini",
425response_format: { type: "json_object" },
470`;
471472const agentBCompletion = await openai.chat.completions.create({
473model: "gpt-4o-mini",
474response_format: { type: "json_object" },
519});
520521// Use AgentB (a separate OpenAI instance) to process the tool request
522const agentBPrompt = `
523You are AgentB, a specialized tool agent designed to process specific information requests.
538`;
539540const agentBCompletion = await openai.chat.completions.create({
541model: "gpt-4o-mini",
542response_format: { type: "json_object" },
582});
583584// Fallback response if OpenAI fails
585let agentAResponse: AgentResponse = {
586actionType: "direct_response",
627`;
628629const agentACompletion = await openai.chat.completions.create({
630model: "gpt-4o-mini",
631response_format: { type: "json_object" },
blah_migratedmain.tsx1 match
6"description": "A sample blah manifest demonstrating various tool types and configurations.",
7"env": {
8"OPENAI_API_KEY": Deno.env.get("OPENAI_API_KEY"),
9},
10"tools": [
talk_about_conspiracymain.tsx2 matches
1import { openai } from "npm:@ai-sdk/openai";
2import { generateText } from "npm:ai";
38try {
9const { text: fact } = await generateText({
10model: openai("gpt-4o-mini"),
11system: "You are an expert in conspiracy.",
12prompt: `Provide an interesting conspiracy for fun`,
1# askSMHI
2Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish_Meteorological_and_Hydrological_Institute) api
34The API is instructed to use the current time in Europe/Stockholm timezone.
8## Relevant API documentation
9* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
10* [OPEN AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=chat&lang=javascript)
1112## How to use this endpoint
4142## Enviroment variables
43* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
44## Packages used
45* openai: For typesafe API request and responses
46* valibot: for describing the SMHI API response and function API input
47* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
3import { offset, removeOffset } from "npm:@formkit/tempo";
4import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
5import { OpenAI } from "npm:openai";
6import * as v from "npm:valibot";
7import { openAIModeration } from "./gpt/moderation";
8import { getWeatherAtCoordinate } from "./smhi/forecast/service";
9import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
18return { error: "Too many tokens in question" };
19}
20const { flagged } = await openAIModeration([question]);
21if (flagged) {
22return { error: "Be nice in your question" };
42}[],
43};
44const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
4546const completion = await openai.chat.completions.create({
47model: completionOptions.model,
48store: completionOptions.store,
130}]
131: [];
132const formattedFunctionResponseData = await openai.chat.completions.create({
133model: completionOptions.model,
134store: completionOptions.store,