11};
1213const NASA_API_KEY = Deno.env.get("NASA_API_KEY");
1415export async function GetAPOD(req: Request): Promise<Response> {
16const url = `https://api.nasa.gov/planetary/apod?api_key=${NASA_API_KEY}&thumbs=true`;
17const cacheKey = "nasa_apod";
18const cacheMinutes = 60;
2122if (!data) {
23return new Response("No data from Nasa API", { status: 404 });
24}
25
groqAudioChatmain.tsx48 matches
6import "jsr:@std/dotenv/load"; // needed for deno run; not req for smallweb or valtown
78// Function to handle audio transcription using Groq's Whisper API
9export const audioTranscriptionHandler = async (c) => {
10console.log("๐ค Audio transcription request received");
20}
2122// Get API key from environment variable
23const apiKey = Deno.env.get("GROQ_API_KEY");
24if (!apiKey) {
25console.error("โ Transcription error: Missing API key");
26return c.json({ error: "API key not configured" }, 500);
27}
2838
39// If the file doesn't have a proper name or type, add one
40// This ensures the file has the right extension for the API
41if (!audioFile.name || !audioFile.type.startsWith('audio/')) {
42const newFile = new File(
50}
5152// Prepare the form data for Groq API
53const groqFormData = new FormData();
54
65groqFormData.append("timestamp_granularities[]", "word");
6667// Call Groq API
68console.log("๐ค Sending request to Groq Whisper API");
69const start = Date.now();
70const response = await fetch("https://api.groq.com/openai/v1/audio/transcriptions", {
71method: "POST",
72headers: {
73"Authorization": `Bearer ${apiKey}`
74},
75body: groqFormData
76});
77const elapsed = Date.now() - start;
78console.log(`๐ค Groq Whisper API response received in ${elapsed}ms, status: ${response.status}`);
7980// Get response content type
99errorMessage = `Server error: ${response.status} ${response.statusText}`;
100// Log the full response for debugging
101console.error("โ Transcription API error response:", {
102status: response.status,
103statusText: response.statusText,
108}
109} catch (parseError) {
110console.error("โ Error parsing Groq API response:", parseError);
111errorMessage = "Failed to parse error response from server";
112}
113114return c.json({
115error: `Groq API error: ${errorMessage}`,
116status: response.status
117}, response.status);
150console.log(`๐ต Last user message: "${messages.find(m => m.role === 'user')?.content?.substring(0, 50)}..."`);
151
152const GROQ_API_KEY = Deno.env.get("GROQ_API_KEY");
153if (!GROQ_API_KEY) {
154console.error("โ Missing GROQ_API_KEY environment variable");
155return c.json({ error: "GROQ_API_KEY environment variable is not set" }, 500);
156}
157158console.log("๐ต Sending request to Groq API");
159const start = Date.now();
160const response = await fetch("https://api.groq.com/openai/v1/chat/completions", {
161method: "POST",
162headers: {
163"Content-Type": "application/json",
164"Authorization": `Bearer ${GROQ_API_KEY}`
165},
166body: JSON.stringify({
171});
172const elapsed = Date.now() - start;
173console.log(`๐ต Groq API response received in ${elapsed}ms, status: ${response.status}`);
174175if (!response.ok) {
176const errorData = await response.json();
177console.error("โ Chat API error:", errorData);
178return c.json({ error: "Failed to get chat completion", details: errorData }, response.status);
179}
204}
205206// Get API key from environment variable
207const apiKey = Deno.env.get("GROQ_API_KEY");
208if (!apiKey) {
209console.error("โ TTS error: Missing API key");
210return c.json({ error: "API key not configured" }, 500);
211}
212213// Call Groq Speech API
214console.log("๐ Sending request to Groq Speech API");
215const start = Date.now();
216const response = await fetch("https://api.groq.com/openai/v1/audio/speech", {
217method: "POST",
218headers: {
219"Content-Type": "application/json",
220"Authorization": `Bearer ${apiKey}`
221},
222body: JSON.stringify({
228});
229const elapsed = Date.now() - start;
230console.log(`๐ Groq Speech API response received in ${elapsed}ms, status: ${response.status}`);
231232if (!response.ok) {
235const errorData = await response.json();
236errorMessage = errorData.error?.message || JSON.stringify(errorData);
237console.error("โ TTS API error:", errorData);
238} catch (e) {
239// If response is not JSON
240errorMessage = `Server error: ${response.status} ${response.statusText}`;
241console.error("โ TTS API non-JSON error:", errorMessage);
242}
243
601// Now immediately send this message to get AI response
602try {
603// Prepare messages for the API
604const apiMessages = this.messages.map(({ role, content }) => ({ role, content }));
605
606// Ensure first message is always the correct system message for current mode
607if (apiMessages.length > 0 && apiMessages[0].role === 'system') {
608const systemMessage = this.chatMode === 'concise'
609? 'You are a helpful assistant powered by the Llama-3.3-70b-versatile model. Keep your responses short, concise and conversational. Aim for 1-3 sentences when possible.'
610: 'You are a helpful assistant powered by the Llama-3.3-70b-versatile model. Respond conversationally and accurately to the user.';
611
612apiMessages[0].content = systemMessage;
613}
614
616method: 'POST',
617headers: { 'Content-Type': 'application/json' },
618body: JSON.stringify({ messages: apiMessages })
619});
620
679this.statusMessage = 'Thinking...';
680
681// Prepare messages for the API (excluding UI-only properties)
682const apiMessages = this.messages.map(({ role, content }) => ({ role, content }));
683
684// Ensure first message is always the correct system message for current mode
685if (apiMessages.length > 0 && apiMessages[0].role === 'system') {
686const systemMessage = this.chatMode === 'concise'
687? 'You are a helpful assistant powered by the Llama-3.3-70b-versatile model. Keep your responses short, concise and conversational. Aim for 1-3 sentences when possible.'
688: 'You are a helpful assistant powered by the Llama-3.3-70b-versatile model. Respond conversationally and accurately to the user.';
689
690apiMessages[0].content = systemMessage;
691}
692
695method: 'POST',
696headers: { 'Content-Type': 'application/json' },
697body: JSON.stringify({ messages: apiMessages })
698});
699
967968<p class="text-center text-sm text-gray-600 mt-4">
969Powered by Llama-3.3-70b-versatile through Groq API. Audio transcription and speech synthesis provided by Groq. Text-to-speech provided through PlayHT. <a class="underline" href="https://console.groq.com/docs/speech-to-text" target="_blank" rel="noopener noreferrer">Documentation here</a>. <a class="underline" href="https://www.val.town/v/yawnxyz/groqAudioChat" target="_blank" rel="noopener noreferrer">Code here</a>
970</p>
971<div class="text-center text-sm text-gray-600 mt-4 w-full mx-auto">
groqAudioWordLevelmain.tsx15 matches
6import "jsr:@std/dotenv/load"; // needed for deno run; not req for smallweb or valtown
78// Function to handle audio transcription using Groq's Whisper API
9export const audioTranscriptionHandler = async (c) => {
10try {
17}
1819// Get API key from environment variable
20const apiKey = Deno.env.get("GROQ_API_KEY");
21if (!apiKey) {
22return c.json({ error: "API key not configured" }, 500);
23}
2425// Prepare the form data for Groq API
26const groqFormData = new FormData();
27
36groqFormData.append("timestamp_granularities[]", "word");
3738// Call Groq API
39const response = await fetch("https://api.groq.com/openai/v1/audio/transcriptions", {
40method: "POST",
41headers: {
42"Authorization": `Bearer ${apiKey}`
43},
44body: groqFormData
65errorMessage = `Server error: ${response.status} ${response.statusText}`;
66// Log the full response for debugging
67console.error("Groq API error response:", {
68status: response.status,
69statusText: response.statusText,
74}
75} catch (parseError) {
76console.error("Error parsing Groq API response:", parseError);
77errorMessage = "Failed to parse error response from server";
78}
7980return c.json({
81error: `Groq API error: ${errorMessage}`,
82status: response.status
83}, response.status);
990<title>Audio Transcription with Word Timestamps</title>
991<meta property="og:title" content="Audio Transcription with Word Timestamps" />
992<meta property="og:description" content="Upload your audio and we'll transcribe it with word-level timestamps using Groq Whisper API" />
993<meta name="twitter:card" content="summary_large_image" />
994<meta name="twitter:title" content="Audio Transcription with Word Timestamps" />
995<meta name="twitter:description" content="Upload your audio and we'll transcribe it with word-level timestamps using Groq Whisper API" />
996<script src="https://cdn.tailwindcss.com"></script>
997<script src="https://unpkg.com/dexie@3.2.2/dist/dexie.js"></script>
1256</div>
1257<p class="text-center text-sm text-gray-600 mt-4">
1258Uses Groq Whisper API for fast and accurate speech-to-text transcription with word-level timestamps. <a class="underline" href="https://console.groq.com/docs/speech-to-text" target="_blank" rel="noopener noreferrer">Documentation here</a>. <a class="underline" href="https://www.val.town/v/yawnxyz/groqAudioWordLevel" target="_blank" rel="noopener noreferrer">Code here</a>
1259</p>
1260<div class="text-center text-sm text-gray-600 mt-4 w-full mx-auto">
OpenTownieuseProjectFiles.ts1 match
1import { useState, useEffect } from "https://esm.sh/react@18.2.0?dev";
2import { fetchProjectFiles } from "../utils/api.ts";
34interface UseProjectFilesProps {
OpenTownieuseChatLogic.ts4 matches
6project: any;
7branchId: string | undefined;
8anthropicApiKey: string;
9bearerToken: string;
10selectedFiles: string[];
16project,
17branchId,
18anthropicApiKey,
19bearerToken,
20selectedFiles,
35status,
36} = useChat({
37api: "/api/send-message",
38body: {
39project,
40branchId,
41anthropicApiKey,
42selectedFiles,
43images: images
OpenTownieTODOs.md1 match
6- [ ] Rebuild as React Router?
7- [ ] opentownie as a pr bot
8- [ ] give it the ability to see its own client-side and server-side logs by building a middleware that shoves them into a SQL light database date and then give it a tool to access them or use our trpc API in that tool
9- [ ] do a browser use or screenshot thing to give it access to its own visual output
10- [ ] Have it default to creating a new branch off main and then embedding and iframe to the resulting http val and give you a link to a pr opening url
OpenTowniesystem_prompt.txt7 matches
98- For AI-generated images, use: `https://maxm-imggenurl.web.val.run/the-description-of-your-image`
99- **Storage:** DO NOT use the Deno KV module for storage
100- **Browser APIs:** DO NOT use the `alert()`, `prompt()`, or `confirm()` methods
101- **Weather Data:** Use open-meteo for weather data (doesn't require API keys) unless otherwise specified
102- **View Source:** Add a view source link with `import.meta.url.replace("esm.town", "val.town")` and include `target="_top"` attribute
103- **Error Debugging:** Add `<script src="https://esm.town/v/std/catch"></script>` to HTML to capture client-side errors
104- **Error Handling:** Only use try...catch when there's a clear local resolution; avoid catches that merely log or return 500s - let errors bubble up with full context
105- **Environment Variables:** Use `Deno.env.get('keyname')` and minimize their use - prefer APIs without keys
106- **Imports:** Use `https://esm.sh` for npm and Deno dependencies to ensure compatibility on server and browser
107- **Storage Strategy:** Only use backend storage if explicitly required; prefer simple static client-side sites
188189### Backend (Hono) Best Practices
190- Hono is the recommended API framework (similar to Express, Flask, or Sinatra)
191- Main entry point should be `backend/index.ts`
192- **Static asset serving:** Use the utility functions to read and serve project files:
209});
210```
211- Create RESTful API routes for CRUD operations
212- Be careful with error handling as Hono tends to swallow errors
213- Consider re-throwing errors to see full stack traces:
226- Use React 18.2.0 consistently in all imports and the `@jsxImportSource` pragma
227- Follow the React component pattern from the example project
228- Handle API calls properly with proper error catching
229230### Database Patterns
257- For files in the project, use `readFile` helpers
2582595. **API Design:**
260- `fetch` handler is the entry point for HTTP vals
261- Run the Hono app with `export default app.fetch // This is the entry point for HTTP vals`
OpenTowniesoundEffects.ts4 matches
45/**
6* Plays a bell sound notification using the Web Audio API
7* @returns A Promise that resolves when the sound has started playing
8*/
13const AudioContext = window.AudioContext || (window as any).webkitAudioContext;
14if (!AudioContext) {
15console.warn("Web Audio API not supported in this browser");
16resolve();
17return;
6566/**
67* Plays a simple notification sound using the Web Audio API
68* This is a simpler, shorter bell sound
69* @returns A Promise that resolves when the sound has started playing
75const AudioContext = window.AudioContext || (window as any).webkitAudioContext;
76if (!AudioContext) {
77console.warn("Web Audio API not supported in this browser");
78resolve();
79return;
OpenTownieREADME.md11 matches
9- **File Browser**: Select specific files to include in the context window for more focused AI assistance
10- **Branch Management**: View, select, and create branches without leaving the app
11- **Cost Tracking**: See estimated API usage costs for each interaction
12- **Sound Notifications**: Get alerted when Claude finishes responding
13- **Mobile-Friendly**: Works on both desktop and mobile devices
15## How It Works
16171. **Login**: Authenticate with your Val Town API token and Anthropic API key
182. **Select a Project**: Choose which Val Town project you want to work on
193. **Select Files**: Browse your project files and select which ones to include in the context window
25### Prerequisites
2627- A Val Town account with API access
28- An Anthropic API key (Claude 3.7 Sonnet)
2930### Setup
31321. Visit the OpenTownie app
332. Enter your Val Town API token (with `projects:write` and `users:read` permissions)
343. Enter your Anthropic API key
354. Click "Login" to access your projects
3647OpenTownie is built with:
48- React frontend with TypeScript
49- Hono API server backend
50- Tailwind CSS for styling
51- Web Audio API for sound notifications
52- AI SDK for Claude integration
5354The application proxies requests to the Anthropic API and Val Town API, allowing Claude to view and edit your project files directly.
5556## Privacy & Security
5758- Your Val Town API token and Anthropic API key are stored locally in your browser
59- No data is stored on our servers
60- All communication with the APIs is done directly from your browser
OpenTownieProjects.tsx1 match
1011async function loader({ bearerToken }: { bearerToken: string }) {
12const data = await (await fetch("/api/projects-loader", {
13headers: {
14"Authorization": "Bearer " + bearerToken,