medicalChatBotWebsitemain.tsx3 matches
98export default async function server(request: Request): Promise<Response> {
99if (request.method === "POST" && new URL(request.url).pathname === "/chat") {
100const { OpenAI } = await import("https://esm.town/v/std/openai");
101const openai = new OpenAI();
102103const { message } = await request.json();
104105const completion = await openai.chat.completions.create({
106messages: [
107{
OpenTownieNormalsystem_prompt.txt4 matches
137Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
138139### OpenAI
140```ts
141import { OpenAI } from "https://esm.town/v/std/openai";
142const openai = new OpenAI();
143const completion = await openai.chat.completions.create({
144messages: [
145{ role: "user", content: "Say hello in a creative way" },
Women_In_STEMmain.tsx5 matches
2import React, { useState, useEffect } from "https://esm.sh/react@18.2.0";
3import { createRoot } from "https://esm.sh/react-dom@18.2.0/client";
4import { OpenAI } from "https://esm.town/v/std/openai";
56function WomenInSTEMBlog() {
80export default async function server(request: Request): Promise<Response> {
81const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
82const { OpenAI } = await import("https://esm.town/v/std/openai");
83const openai = new OpenAI();
84const KEY = "Women_In_STEM";
85const SCHEMA_VERSION = 5;
147for (const woman of womenAndInventions) {
148// Generate concise, inspirational biography
149const biographyCompletion = await openai.chat.completions.create({
150messages: [{
151role: "user",
157158// Generate detailed invention description
159const inventionCompletion = await openai.chat.completions.create({
160messages: [{
161role: "user",
bedtimeStoriesChatRoommain.tsx3 matches
108export default async function server(request: Request): Promise<Response> {
109const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
110const { OpenAI } = await import("https://esm.town/v/std/openai");
111const KEY = "bedtimeStoriesChatRoom";
112const openai = new OpenAI();
113114// Increment version to force table recreation
149// Generate bedtime story
150if (url.pathname === "/story") {
151const completion = await openai.chat.completions.create({
152messages: [{
153role: "user",
EchoPromptermain.tsx9 matches
938if (typeof document !== "undefined") { client(); }
939export default async function server(request: Request): Promise<Response> {
940const { OpenAI } = await import("https://esm.town/v/std/openai");
941const { sqlite } = await import("https://esm.town/v/stevekrouse/sqlite");
942const openai = new OpenAI();
943944// Use the val's URL as a unique key for database tables
1041const echoPrompt = getEchoPrompt();
10421043const completion = await openai.chat.completions.create({
1044messages: [
1045{
10971098// First, generate the agent prompt
1099const completion2 = await openai.chat.completions.create({
1100messages: [
1101{
11231124// Then, generate commands for the agent based on its purpose and description
1125const commandsCompletion = await openai.chat.completions.create({
1126messages: [
1127{
1230}
12311232// Format the history into OpenAI message format
1233const messages = [
1234{
1240// Add conversation history if it exists
1241if (history && history.length > 0) {
1242// Filter out system messages and map to OpenAI format
1243history.forEach(msg => {
1244if (msg.role !== "system") {
12581259// Chat with the agent using the stored prompt and history
1260const completion = await openai.chat.completions.create({
1261messages: messages,
1262model: "gpt-4o-mini",
13771378// Execute the command with the AI
1379const completion = await openai.chat.completions.create({
1380messages: messages,
1381model: "gpt-4o-mini",
1415try {
16const { OpenAI } = await import("https://esm.town/v/std/openai");
17const openai = new OpenAI();
1819const completion = await openai.chat.completions.create({
20messages: [
21{
1import { OpenAI } from "https://esm.town/v/std/openai";
23// Telegram Bot Token - you would replace this with your actual bot token
5const TELEGRAM_API_URL = `https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}`;
67// OpenAI for generating research insights
8const openai = new OpenAI();
910// Helper function to send Telegram message
27async function processResearchRequest(text: string) {
28try {
29const completion = await openai.chat.completions.create({
30messages: [
31{
44return completion.choices[0].message.content || "I couldn't generate a response.";
45} catch (error) {
46console.error("OpenAI Error:", error);
47return "Sorry, there was an error processing your research request.";
48}
AIContentHashtagsGeneratormain.tsx3 matches
125if (request.method === "POST") {
126try {
127const { OpenAI } = await import("https://esm.town/v/std/openai");
128const openai = new OpenAI();
129130const { topic } = await request.json();
131132const completion = await openai.chat.completions.create({
133messages: [
134{
daringPlumOctopusmain.tsx3 matches
125if (request.method === "POST") {
126try {
127const { OpenAI } = await import("https://esm.town/v/std/openai");
128const openai = new OpenAI();
129130const { topic } = await request.json();
131132const completion = await openai.chat.completions.create({
133messages: [
134{
125if (request.method === "POST") {
126try {
127const { OpenAI } = await import("https://esm.town/v/std/openai");
128const openai = new OpenAI();
129130const { topic } = await request.json();
131132const completion = await openai.chat.completions.create({
133messages: [
134{