1// Val Town HTTP val: OpenAI-compatible streaming (Chat Completions)
2// - Edit LONG_RESPONSE (or pass { override_response }) to change output text.
3// - Add per-chunk delay with CHUNK_DELAY_MS (or pass { chunk_delay_ms }).
4// - Adjust chunk size with CHUNK_SIZE (or pass { chunk_size }).
5// - If "stream": false, returns a non-streaming OpenAI-style response.
6//
7// Example POST body (OpenAI style):
8// {
9// "model": "gpt-4o-mini",
47
48 if (req.method !== "POST") {
49 return new Response("Use POST with an OpenAI-style body", {
50 status: 405,
51 headers: corsHeaders,
78 : CHUNK_DELAY_MS;
79
80 // Non-streaming OpenAI-style response if stream=false
81 if (!streamRequested) {
82 const now = Math.floor(Date.now() / 1000);
110 }
111
112 // Streaming (SSE) per OpenAI Chat Completions (chat.completion.chunk)
113 const id = `chatcmpl-${crypto.randomUUID()}`;
114 const created = Math.floor(Date.now() / 1000);
141 }, 15000);
142
143 // Initial role delta (matches OpenAI behavior)
144 send({
145 id,
17import { cors } from "npm:hono/cors";
18// @ts-ignore
19import { OpenAI } from "https://esm.town/v/std/openai?v=4";
20// @ts-ignore
21import { blob } from "https://esm.town/v/std/blob?v=11";
95 const { description } = await c.req.json();
96 if (!description) return c.json({ error: "Description is required" }, 400);
97 const openai = new OpenAI();
98 const completion = await openai.chat.completions.create({
99 model: "gpt-4o",
100 messages: [
123 );
124 }
125 const openai = new OpenAI();
126 const completion = await openai.chat.completions.create({
127 model: "gpt-4o",
128 messages: [
1// @ts-ignore
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
3
4// --- TYPE DEFINITIONS ---
937 if (req.method === "POST" && action === "wizard") {
938 try {
939 const openai = new OpenAI();
940 const body = await req.json();
941 const phase = body.phase;
973 }
974
975 const completion = await openai.chat.completions.create({
976 model: modelToUse,
977 messages: promptMessages,
2import { slack } from "./slack.ts";
3import { Hono } from "npm:hono";
4import { icp } from "./openai.ts";
5
6const app = new Hono();
1import { OpenAI } from "https://esm.town/v/std/openai";
2import { z } from "npm:zod@3.23.8";
3import { zodResponseFormat } from "npm:openai@5.12.2/helpers/zod";
4
5const openai = new OpenAI();
6
7const ICPResult = z.object({
19 }];
20
21 const resp = await openai.chat.completions.parse({
22 model: "gpt-5-mini",
23 messages,
88Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
89
90### OpenAI
91
92```ts
93import { OpenAI } from "https://esm.town/v/std/openai";
94const openai = new OpenAI();
95const completion = await openai.chat.completions.create({
96 messages: [
97 { role: "user", content: "Say hello in a creative way" },
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
95
96### OpenAI
97
98```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102 messages: [
103 { role: "user", content: "Say hello in a creative way" },
36 language,
37 offset = 0,
38 model = 'openai/gpt-oss-120b',
39 reasoning_effort = 'medium',
40 tools = [],
53 }
54
55 const isGptOss = typeof model === 'string' && model.startsWith('openai/gpt-oss');
56 const supportsReasoning = isGptOss;
57 const supportsTools = isGptOss;
119 const encoder = new TextEncoder();
120 const baseUrl = (typeof base_url === 'string' && base_url.trim()) ? base_url.trim().replace(/\/$/, '') : '';
121 let endpoint = 'https://api.groq.com/openai/v1/chat/completions';
122 if (baseUrl) {
123 if (/(\/v\d+)?\/(chat\/)?completions$/i.test(baseUrl)) {
37 language,
38 offset = 0,
39 model = 'openai/gpt-oss-120b',
40 reasoning_effort = 'medium',
41 tools,
54 }
55
56 const isGptOss = typeof model === 'string' && model.startsWith('openai/gpt-oss');
57 const supportsReasoning = isGptOss;
58 const supportsTools = isGptOss;
6import "jsr:@std/dotenv/load";
7
8const DEFAULT_API_URL = "https://api.groq.com/openai/v1/chat/completions";
9
10function getApiKey() {
28 },
29 ],
30 model: "openai/gpt-oss-120b",
31 stream: true,
32 reasoning_effort: "medium",