1# askSMHI
2Using OpenAI chat completion with function calls to [SMHI](https://en.wikipedia.org/wiki/Swedish_Meteorological_and_Hydrological_Institute) api
34The API is instructed to use the current time in Europe/Stockholm timezone.
8## Relevant API documentation
9* [SMHI, forecast documentation](https://opendata.smhi.se/apidocs/metfcst/get-forecast.html)
10* [OPEN AI, GPT function calling documentation](https://platform.openai.com/docs/guides/function-calling?api-mode=chat&lang=javascript)
1112## How to use this endpoint
4142## Enviroment variables
43* OPENAI_CHAT: Needs to be authorized to write chat completions and to the moderation API.
44## Packages used
45* openai: For typesafe API request and responses
46* valibot: for describing the SMHI API response and function API input
47* valibot/to-json-schema: Transform the schema to json schema (readable by the GPT API)
3import { offset, removeOffset } from "npm:@formkit/tempo";
4import { isWithinTokenLimit } from "npm:gpt-tokenizer/model/gpt-4o";
5import { OpenAI } from "npm:openai";
6import * as v from "npm:valibot";
7import { openAIModeration } from "./gpt/moderation";
8import { getWeatherAtCoordinate } from "./smhi/forecast/service";
9import { getSmhiForecastResponseZodSchema } from "./smhi/schema";
18return { error: "Too many tokens in question" };
19}
20const { flagged } = await openAIModeration([question]);
21if (flagged) {
22return { error: "Be nice in your question" };
42}[],
43};
44const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
4546const completion = await openai.chat.completions.create({
47model: completionOptions.model,
48store: completionOptions.store,
130}]
131: [];
132const formattedFunctionResponseData = await openai.chat.completions.create({
133model: completionOptions.model,
134store: completionOptions.store,
askSMHImoderation6 matches
1import { fetch } from "https://esm.town/v/std/fetch";
2import process from "node:process";
3import { OpenAI } from "npm:openai";
45export async function openAIModeration(publicMessages: string[]) {
6const openAIKey = process.env.OPENAI_CHAT;
7const headers = {
8"Content-Type": "application/json",
9Authorization: `Bearer ${openAIKey}`,
10};
11const openai = new OpenAI({ apiKey: process.env.OPENAI_CHAT });
12const responseData = await openai.moderations.create({ input: publicMessages });
13return { flagged: responseData.results.some((r) => r.flagged) };
14}
researchAgentemailHandler2 matches
2import process from "node:process";
3import { marked } from "npm:marked";
4import { OpenAI } from "npm:openai";
56function pm(...lines: string[]): string {
16);
1718const client = new OpenAI({ apiKey: process.env.PERPLEXITY_API_KEY, baseURL: "https://api.perplexity.ai" });
19const response = await client.chat.completions.create({
20model: "sonar",
townieBearindex.ts44 matches
2* This is a Townie clone for Val Town.
3* It creates a chat interface that helps users generate Val Town applications.
4* Uses OpenAI's API to generate code responses in the proper format.
5*/
6/** @jsxImportSource https://esm.sh/react@18.2.0 */
91If you are changing a SQLite table's schema, you should also change the table's name so it creates a fresh table, ie by adding _2 or _3 after it everywhere. Ensure that tables are created before they are used.
9293OPENAI
9495Val Town includes a free, proxied OpenAI:
9697import { OpenAI } from "https://esm.town/v/std/openai";
98const openai = new OpenAI();
99const completion = await openai.chat.completions.create({
100messages: [
101{ role: "user", content: "Say hello in a creative way" },
176): Promise<string> {
177try {
178// Try using the proxied OpenAI first, if that fails, fall back to direct API call
179try {
180return await processWithValTownOpenAI(messages, systemPrompt, stream);
181} catch (error) {
182console.error("Error with Val Town OpenAI:", error);
183console.log("Falling back to direct OpenAI API call");
184return await processWithDirectOpenAI(messages, systemPrompt, stream);
185}
186} catch (error) {
191192/**
193* Process chat using Val Town's proxied OpenAI
194*/
195async function processWithValTownOpenAI(
196messages: Message[],
197systemPrompt: string,
199): Promise<string> {
200try {
201const { OpenAI } = await import("https://esm.town/v/std/openai");
202const openai = new OpenAI();
203
204// Prepare the messages array with system prompt
205const openaiMessages = [
206{ role: "system", content: systemPrompt },
207...messages.map(m => ({ role: m.role, content: m.content }))
208];
209
210console.log("Sending to Val Town OpenAI:", {
211messageCount: openaiMessages.length,
212firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
213});
215// If streaming is not required, get a complete response
216if (!stream) {
217const completion = await openai.chat.completions.create({
218messages: openaiMessages,
219model: "gpt-4o-mini", // Using Val Town's available model
220temperature: 0.7,
223return completion.choices[0]?.message?.content || "Sorry, I couldn't generate a response.";
224} else {
225// Streaming is not directly supported by Val Town OpenAI wrapper
226// Falling back to direct API
227throw new Error("Streaming not supported by Val Town OpenAI wrapper");
228}
229} catch (error) {
230console.error("Error in processWithValTownOpenAI:", error);
231throw error;
232}
234235/**
236* Process chat using direct OpenAI API
237*/
238async function processWithDirectOpenAI(
239messages: Message[],
240systemPrompt: string,
242): Promise<string> {
243// Get API key from environment
244const apiKey = Deno.env.get("OPENAI_API_KEY");
245
246if (!apiKey) {
247throw new Error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.");
248}
249
250// Format messages for OpenAI API
251const openaiMessages = [
252{ role: "system", content: systemPrompt },
253...messages.map(m => ({ role: m.role, content: m.content }))
254];
255
256console.log("Sending to Direct OpenAI:", {
257messageCount: openaiMessages.length,
258firstUserMessage: messages[0]?.content?.substring(0, 20) + "..."
259});
261if (stream) {
262// Stream the response if a stream is provided
263return await streamChatResponse(openaiMessages, apiKey, stream);
264} else {
265// Otherwise, return the complete response
266return await fetchChatResponse(openaiMessages, apiKey);
267}
268}
269270/**
271* Fetch a complete chat response from OpenAI
272*/
273async function fetchChatResponse(messages: any[], apiKey: string): Promise<string> {
274try {
275const response = await fetch("https://api.openai.com/v1/chat/completions", {
276method: "POST",
277headers: {
288if (!response.ok) {
289const errorText = await response.text();
290console.error("OpenAI API error response:", errorText);
291try {
292const errorData = JSON.parse(errorText);
293throw new Error(`OpenAI API error: ${response.status} ${errorData.error?.message || errorText}`);
294} catch (e) {
295throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
296}
297}
306307/**
308* Stream a chat response from OpenAI
309*/
310async function streamChatResponse(messages: any[], apiKey: string, stream: any): Promise<string> {
311try {
312const response = await fetch("https://api.openai.com/v1/chat/completions", {
313method: "POST",
314headers: {
326if (!response.ok) {
327const errorText = await response.text();
328console.error("OpenAI API streaming error:", errorText);
329throw new Error(`OpenAI API error: ${response.status} ${errorText}`);
330}
331
374status: "ok",
375message: "Backend is working",
376hasOpenAiKey: Boolean(Deno.env.get("OPENAI_API_KEY")),
377hasValTownOpenAI: true
378};
379});
MEDIANALIZE_PROMedicalreport.tsx5 matches
224export default async function server(request: Request): Promise<Response> {
225if (request.method === 'POST' && new URL(request.url).pathname === '/analyze') {
226const { OpenAI } = await import("https://esm.town/v/std/openai");
227const openai = new OpenAI();
228229const body = await request.json();
234235try {
236const completion = await openai.chat.completions.create({
237messages: [
238{
265});
266} catch (error) {
267console.error('OpenAI Analysis Error:', error);
268return new Response(JSON.stringify({
269diagnosis: 'Unable to generate analysis',
298299function parseAnalysis(analysis: string) {
300// Basic parsing of OpenAI response
301const sections = analysis.split('\n\n');
302return {
MEDIANALIZE_PROhealthcare.tsx14 matches
222}
223224// Dynamically import OpenAI with error handling
225let OpenAI;
226try {
227const module = await import("https://esm.town/v/std/openai");
228OpenAI = module.OpenAI;
229} catch (importError) {
230console.error("OpenAI Import Error:", importError);
231return new Response(
232JSON.stringify({
233error: "Failed to import OpenAI module",
234details: String(importError),
235}),
244}
245246// Ensure OpenAI is available
247if (!OpenAI) {
248return new Response(
249JSON.stringify({
250error: "OpenAI module not found",
251}),
252{
260}
261262// Create OpenAI instance
263const openai = new OpenAI();
264265// Create OpenAI completion with comprehensive error handling
266let completion;
267try {
268completion = await openai.chat.completions.create({
269messages: body.messages,
270model: "gpt-4o-mini",
272});
273} catch (completionError) {
274console.error("OpenAI Completion Error:", completionError);
275return new Response(
276JSON.stringify({
cerebras_codermigrations5 matches
1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import OpenAI from "openai";
3import { ITERATIONS_TABLE, KEY, PROJECTS_TABLE, SCHEMA_VERSION } from "./migrations";
45const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });
67export async function createProject(prompt: string) {
59}
6061// Example of using OpenAI (you'll need to adapt this to your specific use case)
62export async function generateText(prompt: string) {
63try {
64const completion = await openai.chat.completions.create({
65messages: [{ role: "user", content: prompt }],
66model: "gpt-4-turbo-preview",
71return completion.choices[0].message?.content || "No response";
72} catch (error) {
73console.error("OpenAI Error:", error);
74return "Error generating text";
75}
cerebras_coderqueries5 matches
1import { sqlite } from "https://esm.town/v/stevekrouse/sqlite";
2import OpenAI from "openai";
3import { ITERATIONS_TABLE, KEY, PROJECTS_TABLE, SCHEMA_VERSION } from "./migrations";
45const openai = new OpenAI({ apiKey: Deno.env.get("OPENAI_API_KEY") });
67export async function createProject(prompt: string) {
59}
6061// Example of using OpenAI (you'll need to adapt this to your specific use case)
62export async function generateText(prompt: string) {
63try {
64const completion = await openai.chat.completions.create({
65messages: [{ role: "user", content: prompt }],
66model: "gpt-4-turbo-preview",
70return completion.choices[0].message?.content || "No response";
71} catch (error) {
72console.error("OpenAI Error:", error);
73return "Error generating text";
74}
cerebras_codergenerate-code5 matches
1import OpenAI from "openai";
2import STARTER_PROMPTS from "../public/starter-prompts.js";
316};
17} else {
18const openai = new OpenAI({
19apiKey: Deno.env.get(
20"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",
21),
22});
23const completion = await openai.chat.completions.create({
24messages: [
25{
46},
47].filter(Boolean),
48model: "gpt-4-turbo-preview", // Or another suitable OpenAI model
49max_tokens: 2000, // Adjust as needed
50});
52return {
53code: extractCodeFromFence(completion.choices[0].message.content),
54time: 0, // OpenAI doesn't directly provide completion time
55totalTokens: completion.usage?.total_tokens || 1,
56};