GLM-4-5-Omnimain.tsx27 matches
5const INPUT_TOKEN_COST = 500;
6const OUTPUT_TOKEN_COST = 500;
7const INPUT_IMAGE_COST = 750;
8const INPUT_VIDEO_COST = 1000;
9const INPUT_FILE_COST = 750;
100]);
101102const SUPPORTED_IMAGE_EXTENSIONS = new Set([
103".jpg",
104".jpeg",
148interface ProcessedFile {
149name: string;
150type: "text" | "image" | "video" | "document" | "unknown";
151content: string;
152metadata: {
176if (SUPPORTED_TEXT_EXTENSIONS.has(extension)) {
177return this.processTextFile(buffer, name, metadata);
178} else if (SUPPORTED_IMAGE_EXTENSIONS.has(extension)) {
179return this.processImageFile(buffer, name, metadata);
180} else if (SUPPORTED_VIDEO_EXTENSIONS.has(extension)) {
181return this.processVideoFile(buffer, name, metadata);
231}
232233static processImageFile(
234buffer: Uint8Array,
235name: string,
240return {
241name,
242type: "image",
243content:
244`Image File: ${name}\nFormat: ${metadata.extension.toUpperCase()}\nSize: ${metadata.size} bytes\nBase64 Data Available: Yes`,
245metadata: { ...metadata, base64 },
246};
339const responses = {
340"which model are you":
341"I'm an omni-capable AI assistant that can process text, images, videos, and documents using various state-of-the-art language models.",
342"which version are you":
343"I'm an omni-capable AI assistant powered by multiple advanced language models including DeepSeek, Llama-4, and others.",
345"I'm functioning optimally and ready to help with your questions and file processing needs.",
346"what can you do":
347"I can analyze text, images, videos, and documents. I can also engage in conversations, answer questions, write code, and help with various tasks.",
348};
349373attachmentContext += `\n\n--- ${processed.name} ---\n${processed.content}`;
374375if (processed.type === "image" && processed.metadata.base64) {
376attachmentContext += `\n[Base64 image data available for vision models]`;
377}
378}
389for (const attachment of attachments) {
390const extension = FileProcessor.getExtension(attachment.name);
391if (SUPPORTED_IMAGE_EXTENSIONS.has(`.${extension}`)) {
392try {
393const response = await fetch(attachment.url);
396397messageContent.push({
398type: "image_url",
399_image_url: {
400url: `data:${attachment.content_type};base64,${base64}`,
401},
402get image_url() {
403return this._image_url;
404},
405set image_url(value) {
406this._image_url = value;
407},
408});
409} catch (error) {
410console.error(`Failed to process image ${attachment.name}:`, error);
411}
412}
421attachments: FileAttachment[] = [],
422): Promise<Response> {
423const hasImages = attachments.some((att) =>
424SUPPORTED_IMAGE_EXTENSIONS.has(`.${FileProcessor.getExtension(att.name)}`)
425);
426427let finalMessages = messages;
428429if (hasImages && config.supportsVision && messages.length > 0) {
430const lastMessage = messages[messages.length - 1];
431if (lastMessage.role === "user") {
524for (const attachment of msg.attachments) {
525const ext = `.${FileProcessor.getExtension(attachment.name)}`;
526if (SUPPORTED_IMAGE_EXTENSIONS.has(ext)) {
527inputCost += INPUT_IMAGE_COST;
528} else if (SUPPORTED_VIDEO_EXTENSIONS.has(ext)) {
529inputCost += INPUT_VIDEO_COST;
565send("text", {
566text:
567"Hi! I'm an omni-capable AI assistant. I can help you with text, images, videos, and documents.",
568});
569send("done", {});
646`| Input Text | ${INPUT_TOKEN_COST} points/1k tokens |\n` +
647`| Output Text | ${OUTPUT_TOKEN_COST} points/1k tokens |\n` +
648`| Input Image | ${INPUT_IMAGE_COST} points/file |\n` +
649`| Input Video | ${INPUT_VIDEO_COST} points/file |\n` +
650`| Input Document | ${INPUT_DOCUMENT_COST} points/file |\n` +
198// --- REFACTORED: HTML & MODERN UI ---
199function generateHtml(sourceUrl: string) {
200return `<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width, initial-scale=1.0"><title>Dynamic Multi-Agent Analysis</title><link rel="preconnect" href="https://fonts.googleapis.com"><link rel="preconnect" href="https://fonts.gstatic.com" crossorigin><link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Source+Code+Pro:wght@400;600&display=swap" rel="stylesheet"><style>:root{--bg-dark:#0a0a0f;--bg-med:#101018;--bg-light:#1a1a23;--border-color:rgba(255,255,255,0.1);--text-light:#f0f0f5;--text-med:#a0a0b0;--text-dark:#6c6c7a;--accent-primary:#007bff;--accent-secondary:#8a2be2;--status-green:#00c853;--status-red:#ff3d00;--status-orange:#ffab00;--font-sans:'Inter',sans-serif;--font-mono:'Source Code Pro',monospace;--shadow-sm:0 4px 6px rgba(0,0,0,0.1);--shadow-md:0 10px 15px rgba(0,0,0,0.1)}*,*::before,*::after{box-sizing:border-box}body{font-family:var(--font-sans);background-color:var(--bg-dark);color:var(--text-light);margin:0;padding:2rem;display:grid;place-items:center;min-height:100vh;background-image:radial-gradient(circle at 10% 10%,rgba(0,123,255,0.1),transparent 30%),radial-gradient(circle at 90% 80%,rgba(138,43,226,0.1),transparent 30%)}main{width:100%;max-width:900px;display:flex;flex-direction:column;gap:2rem}header{text-align:center}h1{font-size:2.25rem;font-weight:700;margin:0;color:var(--text-light)}h1 span{font-family:var(--font-mono);font-size:1rem;color:var(--text-med);margin-left:0.5rem;font-weight:400}.card{border:1px solid var(--border-color);border-radius:12px;padding:1.5rem 2rem;background:rgba(16,16,24,0.6);backdrop-filter:blur(10px);-webkit-backdrop-filter:blur(10px);box-shadow:var(--shadow-sm)}.form-group{display:flex;flex-direction:column;gap:.5rem;margin-bottom:1rem}label{font-weight:600;font-size:.9rem;color:var(--text-med)}textarea,input{background-color:var(--bg-med);border:1px solid var(--border-color);border-radius:8px;padding:.75rem;color:var(--text-light);font-family:var(--font-sans);font-size:1rem;transition:border-color .2s,box-shadow .2s}textarea:focus,input:focus{outline:none;border-color:var(--accent-primary);box-shadow:0 0 0 3px rgba(0,123,255,0.2)}textarea{min-height:100px;font-family:var(--font-mono);resize:vertical}.form-grid{display:grid;grid-template-columns:1fr 1fr;gap:1rem}.full-width{grid-column:1/-1}button{grid-column:1/-1;background-image:linear-gradient(90deg,var(--accent-primary),var(--accent-secondary));color:white;border:none;padding:1rem;font-size:1.1rem;font-weight:600;border-radius:8px;cursor:pointer;transition:transform .2s,opacity .2s,box-shadow .2s;box-shadow:var(--shadow-sm)}button:hover{transform:translateY(-2px);box-shadow:var(--shadow-md)}button:disabled{opacity:.5;cursor:not-allowed;transform:none;box-shadow:none}#results{display:none}#round-status{text-align:center;color:var(--text-med);margin:0 0 1.5rem;font-weight:500;font-size:1.1rem}#workflow-container{display:flex;flex-direction:column;gap:1.5rem}.report-card{opacity:0;transform:translateY(20px);transition:opacity 0.5s ease,transform 0.5s ease;position:relative;padding-left:2.5rem}.report-card.visible{opacity:1;transform:translateY(0)}.report-card::before{content:'';position:absolute;left:1rem;top:1rem;width:2px;background:var(--border-color);height:calc(100% - 1rem)}.report-card:last-child::before{display:none}.report-card::after{content:'';position:absolute;left:calc(1rem - 9px);top:0.75rem;width:20px;height:20px;border-radius:50%;background-color:var(--bg-med);border:2px solid var(--border-color);z-index:1}.report-card-header{display:flex;align-items:center;gap:0.75rem;margin-bottom:0.75rem}.report-card-header h3{margin:0;font-size:1.2rem;color:var(--text-light)}.report-card-body p{margin:0;color:var(--text-med);line-height:1.6}.report-card-body strong{color:var(--text-light);font-weight:600}.report-card[data-status="pending"]::after{background-color:var(--bg-dark);border-color:var(--accent-primary)}.report-card[data-status="success"]::after{background-color:var(--status-green);border-color:var(--status-green)}.report-card[data-status="failure"]::after{background-color:var(--status-red);border-color:var(--status-red)}.report-card[data-agent-type="summary"]::after{border-color:var(--accent-secondary)}.report-card[data-agent-type="audit"]::after{border-color:var(--status-orange)}.status-icon{font-size:1.1rem}.boolean-true{color:var(--status-green)}.boolean-false{color:var(--status-red)}.spinner{width:16px;height:16px;border:2px solid var(--text-dark);border-top-color:var(--accent-primary);border-radius:50%;animation:spin 1s linear infinite}@keyframes spin{to{transform:rotate(360deg)}}</style></head><body><main><header><h1>Dynamic Agent Workflow <span>v5.0</span></h1></header><section class="card"><form id="analysis-form"><div class="form-grid"><div class="form-group full-width"><label for="prompt">Original Prompt</label><textarea id="prompt" name="prompt" required>Calculate the total distance a car travels if it moves at 60 mph for 2 hours and then 70 mph for 1 hour.</textarea></div><div class="form-group"><label for="supposed_correct_response">Supposed Correct Response</label><input id="supposed_correct_response" name="supposed_correct_response" value="190 miles" required></div><div class="form-group"><label for="final_answer">Final Answer to Check</label><input id="final_answer" name="final_answer" value="190 miles" required></div><div class="form-group full-width"><label for="reasoning_steps">Reasoning Steps</label><textarea id="reasoning_steps" name="reasoning_steps" required>1. First part of the trip: distance = speed × time = 60 mph × 2 hours = 120 miles.
2012. Second part of the trip: distance = speed × time = 70 mph × 1 hour = 70 miles.
2023. Total distance = 120 miles + 70 miles = 190 miles.</textarea></div><button type="submit" id="submit-btn">Analyze Response</button></div></form></section><section id="results" class="card"><h2 id="round-status"></h2><div id="workflow-container"></div></section></main><script>
HTML_Lesson1Backup.html1 match
1112<!-- reference the webpage's favicon. note: currently only svg is supported in val town files -->
13<link rel="icon" href="/favicon.svg" sizes="any" type="image/svg+xml">
1415<!-- import the webpage's javascript file -->
wa-hono-reactindex.html1 match
6<title>React Hono Val Town Starter</title>
7<!-- <link rel="stylesheet" href="/frontend/style.css"> -->
8<link rel="icon" href="/frontend/favicon.svg" type="image/svg+xml">
9<link
10rel="stylesheet"
26"title": "Markdown Editor",
27"code":
28"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Markdown Editor</title>\n <link href=\"https://cdn.jsdelivr.net/npm/tailwindcss@2.2.19/dist/tailwind.min.css\" rel=\"stylesheet\">\n</head>\n<body class=\"bg-white\">\n <div class=\"max-w-full mx-auto p-4 pt-6 md:p-6 lg:p-8\">\n <h1 class=\"text-3xl text-center mb-4\">Markdown Editor</h1>\n <div class=\"flex flex-row\">\n <div class=\"editor p-4 rounded-lg border border-gray-200 w-full md:w-1/2\">\n <textarea id=\"editor\" class=\"w-full h-screen p-2 border border-gray-200 rounded-lg\" placeholder=\"Type your Markdown here...\"></textarea>\n </div>\n <div class=\"preview p-4 rounded-lg border border-gray-200 w-full md:w-1/2 ml-2 md:ml-4 lg:ml-8\">\n <div id=\"preview\"></div>\n </div>\n </div>\n <p class=\"text-center mt-4\">Built on <a href=\"https://cerebrascoder.com\">Cerebras Coder</a></p>\n </div>\n\n <script>\n const editor = document.getElementById('editor');\n const preview = document.getElementById('preview');\n\n // Initialize textarea with default markdown\n const defaultMarkdown = `\n# Introduction to Markdown\nMarkdown is a lightweight markup language that is easy to read and write. It is often used for formatting text in plain text editors, chat applications, and even web pages.\n\n## Headers\nHeaders are denoted by the # symbol followed by a space. The number of # symbols determines the level of the header:\n# Heading 1\n## Heading 2\n### Heading 3\n\n## Emphasis\nYou can use emphasis to make your text **bold** or *italic*:\n*Italics*\n**Bold**\n\n## Lists\nYou can use lists to organize your text:\n* Item 1\n* Item 2\n* Item 3\nOr\n1. Item 1\n2. Item 2\n3. Item 3\n\n## Links\nYou can use links to reference external resources:\n[Google](https://www.google.com)\n\n## Images\nYou can use images to add visual content:\n\n`;\n editor.value = defaultMarkdown;\n\n // Update preview on input\n editor.addEventListener('input', () => {\n const markdown = editor.value;\n const html = markdownToHtml(markdown);\n preview.innerHTML = html;\n });\n\n // Initialize preview with default markdown\n const defaultHtml = markdownToHtml(defaultMarkdown);\n preview.innerHTML = defaultHtml;\n\n // Function to convert Markdown to HTML\n function markdownToHtml(markdown) {\n // Bold\n markdown = markdown.replace(/\\*\\*(.*?)\\*\\*/g, '<b>$1</b>');\n\n // Italic\n markdown = markdown.replace(/\\*(.*?)\\*/g, '<i>$1</i>');\n\n // Links\n markdown = markdown.replace(/\\[(.*?)\\]\\((.*?)\\)/g, '<a href=\"$2\">$1</a>');\n\n // Images\n markdown = markdown.replace(/!\\[(.*?)\\]\\((.*?)\\)/g, '<img src=\"$2\" alt=\"$1\">');\n\n // Headings\n markdown = markdown.replace(/(^#{1,6} )(.*)/gm, (match, level, text) => {\n return `<h${level.length}>${text}</h${level.length}>`;\n });\n\n // Lists\n markdown = markdown.replace(/^(\\*|\\d+\\.) (.*)/gm, (match, marker, text) => {\n if (marker.startsWith('*')) {\n return `<li>${text}</li>`;\n } else {\n return `<li>${text}</li>`;\n }\n });\n\n // Line breaks\n markdown = markdown.replace(/\\n/g, '<br>');\n\n // Fix for nested lists\n markdown = markdown.replace(/<li><li>/g, '<li>');\n markdown = markdown.replace(/<\\/li><\\/li>/g, '</li>');\n\n // Wrap lists in ul\n markdown = markdown.replace(/(<li>.*<\\/li>)/g, '<ul>$1</ul>');\n\n return markdown;\n }\n </script>\n</body>\n</html>",
29"performance": {
30"tokensPerSecond": 4092.96,
cerebras_coderindex.html1 match
21<meta property="og:description" content="Turn your ideas into fully functional apps in less than a second – powered by Llama3.3-70b on Cerebras's super-fast wafer chips. Code is 100% open-source, hosted on Val Town."">
22<meta property="og:type" content="website">
23<meta property="og:image" content="https://stevekrouse-blob_admin.web.val.run/api/public/CerebrasCoderOG.jpg">
24
25
67
68<!-- Twitter Card Meta Tags -->
69<meta name="twitter:card" content="summary_large_image" />
70<meta name="twitter:title" content="${title}" />
71<meta name="twitter:description" content="${description}" />
randomMediaUrlmain.tsx2 matches
45const files = {
6image: [
7// JPEGs
8"https://upload.wikimedia.org/wikipedia/commons/3/32/The_Employment_of_Women_in_Britain%2C_1914-1918_Q30962.jpg",
49return Response.json({
50error:
51`don't have files for ?format=${format}; try 'audio', 'video' or 'image'`,
52});
53}
Parse-my-PDF-02main.tsx16 matches
67const basePrompt =
8`Generate an image generation model prompt (description) for the previous image, in the Simpsons cartoon style
9Be extremely detailed in describing:
10- Hair color
19- Lighting and shadows
20- Any props or accessories
21The final result should result in a image of 2d Simpsons figures that look like the original.
22Output only the prompt and nothing else.`;
2324const settings = {
25text_bot: "gpt-4o",
26image_bot: "FLUX-pro-1.1",
27};
2838server_bot_dependencies: {
39[settings.text_bot]: 1,
40[settings.image_bot]: 1,
41},
42introduction_message: "Send me a picture and I will make it cartoon like.",
52const imgUrl = lastMsg?.attachments?.at?.(0)?.url;
53const url = new URL(httpReq.url);
54const imageDescriptionPrompt = basePrompt;
55
56if (!imgUrl) {
57yield "Please upload a image";
58yield events.done();
59return;
60}
61
62let baseStatus = "Analyzing image";
63const msgToForward = { ...req.query.at(-1) };
64msgToForward.content = "Describe the contents of this image";
65msgToForward.content = imageDescriptionPrompt;
66
67const query = {
69query: [{
70...lastMsg,
71content: imageDescriptionPrompt,
72role: "user",
73attachments: lastMsg?.attachments,
83let text = "";
84console.log("sending to gpt4o", query);
85yield `Analyzing image...`;
86
87for await (const msg of gpt4oQuery) {
98}
99
100const imageQuery = {
101...req,
102query: [{
106};
107
108const imageStream = forward({
109toBotName: settings.image_bot,
110query: imageQuery,
111accessKey: req.access_key,
112});
113
114for await (const msg of imageStream) {
115yield msg;
116}
postherousedit.html61 matches
182</button>
183<button
184onclick="switchTab('images')"
185id="images-tab"
186class="tab-button px-4 sm:px-6 py-3 font-medium text-sm sm:text-base"
187data-mobile-text="🖼️"
188>
189<span>🖼️ Images</span>
190</button>
191</div>
204<h4 class="font-medium text-blue-900 mb-2">💡 Editing Tips</h4>
205<ul class="text-sm text-blue-800 space-y-1">
206<li>• Use the Images tab to view and manage images for this post</li>
207<li>• HTML content supports full HTML tags and styling</li>
208<li>• Use the Preview tab to see how your content will look</li>
219</div>
220221<!-- Images Tab -->
222<div id="images-content" class="p-4 sm:p-6 hidden">
223<div class="mb-4">
224<h3 class="text-lg font-medium text-gray-900 mb-2">Post Images</h3>
225<p class="text-sm text-gray-600 mb-4">Images associated with this post. You can copy URLs to use in your content.</p>
226</div>
227<div id="images-list" class="space-y-4">
228<p class="text-gray-500 italic">Load a post to view its images...</p>
229</div>
230<div class="mt-6 p-4 bg-gray-50 border border-gray-200 rounded-lg">
231<h4 class="font-medium text-gray-900 mb-2">Upload New Image</h4>
232<p class="text-sm text-gray-600 mb-3">Upload a new image to use in this post.</p>
233<div class="flex flex-col sm:flex-row gap-3">
234<input
235type="file"
236id="image-upload"
237accept="image/*"
238class="flex-1 px-3 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
239>
240<input
241type="text"
242id="image-alt-text"
243placeholder="Alt text (optional)"
244class="flex-1 px-3 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
245>
246<button
247onclick="uploadImage()"
248id="upload-image-button"
249class="px-4 py-2 bg-green-600 text-white rounded-lg hover:bg-green-700 focus:ring-2 focus:ring-green-500 whitespace-nowrap"
250>
405const editTab = document.getElementById('edit-tab');
406const previewTab = document.getElementById('preview-tab');
407const imagesTab = document.getElementById('images-tab');
408const editContent = document.getElementById('edit-content');
409const previewContent = document.getElementById('preview-content');
410const imagesContent = document.getElementById('images-content');
411412console.log('Tab elements found:', {
413editTab: !!editTab,
414previewTab: !!previewTab,
415imagesTab: !!imagesTab,
416editContent: !!editContent,
417previewContent: !!previewContent,
418imagesContent: !!imagesContent
419});
420421// Reset all tabs - be defensive about missing elements
422const tabs = [editTab, previewTab, imagesTab].filter(t => t);
423const contents = [editContent, previewContent, imagesContent].filter(c => c);
424
425tabs.forEach(t => t.classList.remove('active'));
446showMessage('Preview updated with errors', 'error');
447}
448} else if (tab === 'images' && imagesTab && imagesContent) {
449imagesTab.classList.add('active');
450imagesContent.classList.remove('hidden');
451imagesContent.style.display = 'block';
452console.log('Images tab activated');
453try {
454loadPostImages();
455} catch (imagesError) {
456console.error('Error loading images:', imagesError);
457showMessage('Images tab loaded with errors', 'error');
458}
459} else {
618}
619620async function loadPostImages() {
621if (!currentPost) {
622document.getElementById('images-list').innerHTML = '<p class="text-gray-500 italic">Load a post first to view its images.</p>';
623return;
624}
625626try {
627const response = await fetch(`/api/posts/${encodeURIComponent(currentPost.slug)}/images`);
628
629if (!response.ok) {
630throw new Error('Failed to load images');
631}
632633const data = await response.json();
634const imagesList = document.getElementById('images-list');
635
636if (data.images.length === 0) {
637imagesList.innerHTML = '<p class="text-gray-500 italic">No images found for this post.</p>';
638return;
639}
640641imagesList.innerHTML = data.images.map(image => `
642<div class="border border-gray-200 rounded-lg p-4">
643<div class="flex flex-col sm:flex-row sm:items-start gap-4">
644<img src="${image.url}" alt="${escapeHtml(image.altText || image.originalFilename)}" class="w-full sm:w-24 h-48 sm:h-24 object-cover rounded-lg">
645<div class="flex-1 min-w-0">
646<h4 class="font-medium text-gray-900 truncate">${escapeHtml(image.originalFilename)}</h4>
647<p class="text-sm text-gray-500 mt-1">${image.mimeType} • ${Math.round(image.fileSize / 1024)}KB</p>
648${image.altText ? `<p class="text-sm text-gray-600 mt-1">${escapeHtml(image.altText)}</p>` : ''}
649<div class="mt-2">
650<input
651type="text"
652value="${image.url}"
653readonly
654class="w-full px-2 py-1 text-xs bg-gray-50 border border-gray-200 rounded font-mono"
659</div>
660<button
661onclick="copyImageUrl('${image.url}')"
662class="px-3 py-1 text-sm bg-blue-600 text-white rounded hover:bg-blue-700 whitespace-nowrap"
663title="Copy URL"
670
671} catch (error) {
672document.getElementById('images-list').innerHTML = `<p class="text-red-600">Error loading images: ${error.message}</p>`;
673}
674}
675676function copyImageUrl(url) {
677navigator.clipboard.writeText(url).then(() => {
678showMessage('Image URL copied to clipboard!', 'success');
679}).catch(() => {
680showMessage('Failed to copy URL', 'error');
682}
683684async function uploadImage() {
685if (!currentPost || !currentPassword) {
686showMessage('Please load a post first', 'error');
688}
689690const fileInput = document.getElementById('image-upload');
691const altTextInput = document.getElementById('image-alt-text');
692const file = fileInput.files[0];
693694if (!file) {
695showMessage('Please select an image file', 'error');
696return;
697}
698699const uploadButton = document.getElementById('upload-image-button');
700uploadButton.disabled = true;
701uploadButton.textContent = 'Uploading...';
703try {
704const formData = new FormData();
705formData.append('image', file);
706formData.append('password', currentPassword);
707formData.append('email', currentPost.author_email);
711}
712713const response = await fetch('/api/images/upload', {
714method: 'POST',
715body: formData
718if (!response.ok) {
719const errorData = await response.json();
720throw new Error(errorData.error || 'Failed to upload image');
721}
722723const data = await response.json();
724showMessage('Image uploaded successfully!', 'success');
725
726// Clear the form
728altTextInput.value = '';
729
730// Reload images list
731loadPostImages();
732
733} catch (error) {
734showMessage(`Error uploading image: ${error.message}`, 'error');
735} finally {
736uploadButton.disabled = false;