Townie.cursorrules4 matches
94Note: When changing a SQLite table's schema, change the table's name (e.g., add _2 or _3) to create a fresh table.
9596### OpenAI
9798```ts
99import { OpenAI } from "https://esm.town/v/std/openai";
100const openai = new OpenAI();
101const completion = await openai.chat.completions.create({
102messages: [
103{ role: "user", content: "Say hello in a creative way" },
chatterchatCompletion.js2 matches
3export async function groqChatCompletion(apiKey, payload) {
4console.log('>>> [groqChatCompletion] Payload:', payload);
5const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
6method: 'POST',
7headers: {
53try {
54const res = await groqChatCompletion(apiKey, {
55model: 'openai/gpt-oss-120b',
56messages: [
57{ role: 'system', content: 'Classify the user request as either links or text. Respond with a single token: links or text. Use links if the user appears to want a search results list of sources; use text if the user is asking for an explanation/summary/definition.' },
34export const settings = {
5model: "openai/gpt-oss-120b",
6stream: false,
7reasoningEffort: "low",
1// @ts-ignore
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
3// @ts-ignore
4import { Hono } from "npm:hono@4.4.12";
833debugLog("Final AI Payload:", userInput);
834835const openai = new OpenAI();
836const completion = await openai.chat.completions.create({
837model: "gpt-4o",
838messages: [{ role: "system", content: PORTFOLIO_ANALYST_PROMPT }, {
883];
884885const openai = new OpenAI();
886const response = await openai.chat.completions.create({
887model: "gpt-4o",
888messages: messages,
new-val-notificationsREADME.md3 matches
8- `main.tsx` - Main cron function and Discord webhook
9- `fetch-vals.ts` - Fetches and filters new vals using Val Town SDK
10- `ai-summarizer.ts` - Generates OpenAI summaries for vals without READMEs
1112## Additional logic
16- Filter out commonly remixed utility vals like Blob Admin, SQLiteExplorer
17- Skip shallow remixes (forks with ≤3 versions)
18- Prioritize `main.tsx` files for AI summaries and limit code sent to OpenAI to 5KB to avoid token limits
1920## Setup
21221. Set `DISCORD_WEBHOOK` environment variable
232. Set `OPENAI_API_KEY` environment variable
243. Set `testMode = false` in main.tsx for production
2import { Hono } from "npm:hono@4.4.12";
3// @ts-ignore
4import { OpenAI } from "https://esm.town/v/std/openai";
5import type { Context } from "npm:hono@4.4.12";
6import { streamText } from "npm:hono/streaming";
618if (!industry) return c.json({ error: "Industry is required" }, 400);
619try {
620const openai = new OpenAI();
621const completion = await openai.chat.completions.create({
622model: "gpt-4o",
623messages: [{ role: "system", content: DYNAMIC_LIST_GENERATOR_PROMPT }, {
641if (!occupation) return c.json({ error: "Occupation is required" }, 400);
642try {
643const openai = new OpenAI();
644const completion = await openai.chat.completions.create({
645model: "gpt-4o",
646messages: [{ role: "system", content: DYNAMIC_LIST_GENERATOR_PROMPT }, {
667}\n\nOccupation: ${occupation_title}\n\nTask: ${task}`;
668try {
669const openai = new OpenAI();
670const completion = await openai.chat.completions.create({
671model: "gpt-4o",
672messages: [{ role: "system", content: PROMPT_REFINER_SYSTEM_PROMPT }, {
688}
689try {
690const openai = new OpenAI();
691const completion = await openai.chat.completions.create({
692model: "gpt-4o",
693messages: [{ role: "system", content: INPUT_EXTRACTOR_SYSTEM_PROMPT }, {
709}
710try {
711const openai = new OpenAI();
712const completion = await openai.chat.completions.create({
713model: "gpt-4o",
714messages: [
751752try {
753const openai = new OpenAI();
754const agentStream = await openai.chat.completions.create({
755model: "gpt-4o",
756messages: [{ role: "system", content: systemPromptWithContext }, {
776if (!raw_text) return c.json({ error: "raw_text is required" }, 400);
777try {
778const openai = new OpenAI();
779const htmlCompletion = await openai.chat.completions.create({
780model: "gpt-4o-mini", // Using a faster model for simple formatting
781messages: [{ role: "system", content: HTML_FORMATTER_SYSTEM_PROMPT }, {
1// @ts-ignore
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
34// --- AI BEHAVIORAL GUIDELINES ---
461}
462463const openai = new OpenAI();
464465const completion = await openai.chat.completions.create({
466model: "gpt-4o",
467messages: [
1// @ts-ignore
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
34// --- AI BEHAVIORAL GUIDELINES ---
477if (req.method === "POST" && action === "getAstrology") {
478try {
479// The user wants to use OpenAI, so we instantiate it.
480// NOTE: This requires the OPENAI_API_KEY environment variable to be set.
481const openai = new OpenAI();
482const { planetaryData } = await req.json();
483493}`;
494495const completion = await openai.chat.completions.create({
496model: "gpt-4o",
497messages: [
1// @ts-ignore
2import { OpenAI } from "https://esm.town/v/std/openai?v=4";
34// --- AI BEHAVIORAL GUIDELINES ---
477if (req.method === "POST" && action === "getAstrology") {
478try {
479// The user wants to use OpenAI, so we instantiate it.
480// NOTE: This requires the OPENAI_API_KEY environment variable to be set.
481const openai = new OpenAI();
482const { planetaryData } = await req.json();
483493}`;
494495const completion = await openai.chat.completions.create({
496model: "gpt-4o",
497messages: [
12// Settings configuration
13const settings = {
14model: 'openai/gpt-oss-120b',
15stream: false,
16reasoningEffort: 'low'
387// Optionally set language or prompt here
388389const resp = await fetch('https://api.groq.com/openai/v1/audio/transcriptions', {
390method: 'POST',
391headers: { 'Authorization': 'Bearer ' + apiKey },
526language,
527offset = 0,
528model = 'openai/gpt-oss-120b',
529reasoning_effort = 'medium',
530tools = [{ type: 'browser_search' }],
653language,
654offset = 0,
655model = 'openai/gpt-oss-120b',
656reasoning_effort = 'medium',
657tools = [{ type: 'browser_search' }],
719start: async (controller) => {
720try {
721const upstream = await fetch("https://api.groq.com/openai/v1/chat/completions", {
722method: 'POST',
723headers: {