GROKPROMPTmain.tsx11 matches
526* Server-side logic for generating prompts.
527* The server expects a POST request to "/generate-prompts" with either text or image form data.
528* It uses OpenAI API to generate detailed and concise prompts.
529*/
530export default async function server(request: Request): Promise<Response> {
531if (request.method === "POST" && new URL(request.url).pathname === "/generate-prompts") {
532try {
533const { OpenAI } = await import("https://esm.town/v/std/openai");
534const openai = new OpenAI();
535const formData = await request.formData();
536const inputType = formData.get("inputType") as string;
627}
628629console.log("Sending request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
630631const completion = await openai.chat.completions.create({
632model: "chatgpt-4o-latest",
633messages: analysisMessages,
635});
636637console.log("Received response from OpenAI:", JSON.stringify(completion, null, 2));
638639const content = completion.choices[0].message.content || "";
695if (request.method === "POST" && new URL(request.url).pathname === "/clarify-prompts") {
696try {
697const { OpenAI } = await import("https://esm.town/v/std/openai");
698const openai = new OpenAI();
699const { inputType, originalInput, clarificationAnswer } = await request.json();
700749const analysisMessages = [systemMessage, userMessage];
750751console.log("Sending clarification request to OpenAI:", JSON.stringify(analysisMessages, null, 2));
752753const completion = await openai.chat.completions.create({
754model: "chatgpt-4o-latest",
755messages: analysisMessages,
757});
758759console.log("Received clarification response from OpenAI:", JSON.stringify(completion, null, 2));
760761const content = completion.choices[0].message.content || "";
VEOPROMPTERmain.tsx5 matches
307const url = new URL(request.url);
308if (url.pathname === "/generate-prompts") {
309const { OpenAI } = await import("https://esm.sh/openai@4.11.1");
310const openai = new OpenAI({
311apiKey: Deno.env.get("OPENAI_API_KEY"),
312});
313316317try {
318const completion = await openai.chat.completions.create({
319model: "chatgpt-4o-latest", // Changed from "gpt-4o-latest" to "chatgpt-4o-latest"
320messages: [
364365if (!response) {
366throw new Error("No response from OpenAI");
367}
368
5options = {},
6) => {
7// Initialize OpenAI API stub
8const { Configuration, OpenAIApi } = await import(
9"https://esm.sh/openai@3.3.0"
10);
11const configuration = new Configuration({
12apiKey: process.env.OPENAI,
13});
14const openai = new OpenAIApi(configuration);
15// Request chat completion
16const messages = typeof prompt === "string"
17? [{ role: "user", content: prompt }]
18: prompt;
19const { data } = await openai.createChatCompletion({
20model: "gpt-3.5-turbo-0613",
21messages,
1112### Toolings
13* Llms can uses [tools](https://platform.openai.com/docs/guides/function-calling), meaning you can make this an agent and a whole lot more useful.
14
15
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { email } from "https://esm.town/v/std/email";
4import { OpenAI } from "https://esm.town/v/std/openai";
56export default async function(e: Email) {
7const client = new OpenAI();
89const Messages = z.object({
chatgptchessmain.tsx4 matches
1import { OpenAI } from "https://esm.town/v/std/openai?v=5"
2import { sqlite } from "https://esm.town/v/std/sqlite?v=6"
3import { Chess } from "npm:chess.js"
131return c.html(`<body>
132<div class='p-4'>
133<h2 class='font-bold'>OpenAI Chess</h2>
134<p class='pb-4'>Play chess against ChatGPT-4</p>
135<div id="myBoard" style="width: 400px"></div>
171chess.move(san)
172173const openai = new OpenAI()
174175let messages = []
182args: [c.req.param().id, `Requesting response to ${san}`],
183})
184const completion = await openai.chat.completions.create({
185messages: [
186{
valTownChatGPTREADME.md1 match
1# ChatGPT Implemented in Val Town
23Demonstrated how to use assistants and threads with the OpenAI SDK and how to stream the response with Server-Sent Events.
45<p align=center>
valTownChatGPTmain.tsx8 matches
1/** @jsxImportSource https://esm.sh/react */
2import OpenAI from "npm:openai";
3import { renderToString } from "npm:react-dom/server";
45// This uses by personal API key, you'll need to provide your own if
6// you fork this. We'll be adding support to the std/openai lib soon!
7const openai = new OpenAI();
8import { Hono } from "npm:hono@3";
938});
3940// Setup the SSE connection and stream back the response. OpenAI handles determining
41// which message is the correct response based on what was last read from the
42// thread. This is likely vulnerable to race conditions.
58const app = new Hono();
59app.get("/", async (c) => {
60const thread = await openai.beta.threads.create();
61const assistant = await openai.beta.assistants.create({
62name: "",
63instructions:
114app.post("/post-message", async (c) => {
115let message = await c.req.text();
116await openai.beta.threads.messages.create(
117c.req.query("threadId"),
118{ role: "user", content: message },
132));
133};
134const run = openai.beta.threads.runs.stream(threadId, {
135assistant_id: assistantId,
136// Make sure we only display messages we haven't seen yet.
twitterAlertmain.tsx5 matches
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
5import { socialDataSearch, Tweet } from "https://esm.town/v/stevekrouse/socialDataSearch";
11.join(" OR ") + " " + excludes;
1213const openai = new OpenAI();
1415const RelevanceSchema = z.object({
2930try {
31const completion = await openai.beta.chat.completions.parse({
32model: "gpt-4o-mini",
33messages: [
42return completion.choices[0].message.parsed;
43} catch (error) {
44console.error("Error parsing OpenAI response:", error);
45return { isRelevant: false, confidence: 0, reason: "Error in processing" };
46}
neatEmeraldVicunamain.tsx5 matches
1import { zodResponseFormat } from "https://esm.sh/openai/helpers/zod";
2import { z } from "https://esm.sh/zod";
3import { OpenAI } from "https://esm.town/v/std/openai";
4import { discordWebhook } from "https://esm.town/v/stevekrouse/discordWebhook";
5import { socialDataSearch, Tweet } from "https://esm.town/v/stevekrouse/socialDataSearch";
11.join(" OR ") + " " + excludes;
1213const openai = new OpenAI();
1415const RelevanceSchema = z.object({
2930try {
31const completion = await openai.beta.chat.completions.parse({
32model: "gpt-4o-mini",
33messages: [
42return completion.choices[0].message.parsed;
43} catch (error) {
44console.error("Error parsing OpenAI response:", error);
45return { isRelevant: false, confidence: 0, reason: "Error in processing" };
46}