registry-valsfile3 matches
90});
9192// API endpoint to get specific file content
93app.get("/:zon/:file/raw", async (c: Context) => {
94const { zon, file } = c.req.param();
95const client = new ValTown({ bearerToken: Deno.env.get("VAL_TOWN_API_KEY") });
9697try {
221const { zon, file } = c.req.param();
222const { content } = await c.req.json();
223const client = new ValTown({ bearerToken: Deno.env.get("VAL_TOWN_API_KEY") });
224225try {
FileDumpThingupload.ts2 matches
3import { readStdinToBuffer } from "./utils.ts";
45const API_URL = `https://filedumpthing.val.run/api/upload`;
67async function main() {
50}
5152const response = await fetch(API_URL, {
53method: "POST",
54body: formData,
FileDumpThingREADME.md4 matches
50- `index.html`: HTML template
5152- **Backend**: Hono server for handling API requests
53- `index.ts`: API routes for file uploads and serving stored content
54- `api.ts`: API endpoints for file uploads
5556- **Shared**: Code shared between frontend and backend
65- Built with TypeScript for type safety
66- React for the frontend UI
67- Hono for the backend API
68- Supports various file types with appropriate MIME type detection
69- Smart content detection (checks for null bytes and UTF-8 validity)
FileDumpThingREADME.md1 match
811. Reads content from stdin
822. Automatically detects if it's text or binary
833. Uploads it to the API at `filedumpthing.val.run`
844. Returns a shareable URL
FileDumpThingindex.ts2 matches
3import { Hono } from "npm:hono";
4import { getContentTypeHeaders } from "../shared/mimetype.ts";
5import api from "./api.ts";
67const app = new Hono();
12.get("/frontend/**/*", c => serveFile(c.req.path, import.meta.url))
13.get("/shared/**/*", c => serveFile(c.req.path, import.meta.url))
14.route("/api", api)
15.get("/blob", async c => {
16const key = c.req.query("key");
FileDumpThingFileDumper.tsx2 matches
5455try {
56const response = await fetch("/api/upload", {
57method: "POST",
58body: formData,
7980try {
81const response = await fetch("/api/upload", {
82method: "POST",
83body: formData,
FileDumpThingapi.ts2 matches
3import { isTextContent } from "../shared/utils.ts";
45const api = new Hono()
6.post("/upload", async c => {
7try {
60});
6162export default api;
tsjScraperexampleDiv.html1 match
68<a href="https://timothydereuse.github.io/" target="_blank"><strong>Tim
69de Reuse:</strong></a> There’s been a zillion cultural artifacts
70produced in the last decade about capital-T Trauma in the last decade;
71it’s so zeitgeisty you can’t even make an animated Disney feature
72without fitting the word “intergenerational” somewhere on the back of
xxxclearinghouse_urlscraperindex.ts11 matches
2import { blob } from "https://esm.town/v/std/blob";
34interface ScrapingResult {
5products: any[];
6metadata: {
68};
6970// Advanced scraping function with error handling and rate limiting
71async function scrapeWebsite(query: string, websiteUrl: string): Promise<ScrapingResult> {
72const startTime = Date.now();
73const results: ScrapingResult = {
74products: [],
75metadata: {
96);
9798console.log(`Scraping ${urlsToScrape.length} URLs for ${hostname}`);
99100for (const url of urlsToScrape) {
126127} catch (error) {
128results.errors?.push(`Error scraping ${url}: ${error.message}`);
129console.error(`Error scraping ${url}:`, error);
130}
131}
155156} catch (error) {
157results.errors?.push(`General scraping error: ${error.message}`);
158results.metadata.duration = Date.now() - startTime;
159console.error('Scraping failed:', error);
160return results;
161}
251252// Main export function
253export default async function urlScrapeTemplate(query: string, website: string): Promise<ScrapingResult> {
254try {
255// Input validation
272}
273274// Perform scraping
275const result = await scrapeWebsite(query, normalizedWebsite);
276
xxxclearinghouse_orchestratorindex.ts13 matches
14}
1516interface ScrapingResult {
17website: string;
18status: 'success' | 'error';
28query: string;
29timestamp: string;
30results: ScrapingResult[];
31consolidatedProducts: any[];
32summary: {
136console.log(`Orchestrating scrape for "${query}" across ${websites.length} sites`);
137138// Parallel scraping with proper error handling
139const scrapingPromises = websites.map(async (website: string): Promise<ScrapingResult> => {
140try {
141const startTime = Date.now();
142
143// Call the URL scrape template
144const scrapingResult = await scrapeWebsite(query, website);
145const endTime = Date.now();
146148website,
149status: 'success',
150data: scrapingResult,
151metadata: {
152scrapedAt: new Date().toISOString(),
155};
156} catch (error) {
157console.error(`Error scraping ${website}:`, error);
158return {
159website,
168});
169170const scrapingResults = await Promise.all(scrapingPromises);
171172// Consolidate all successful results
173const allProducts: any[] = [];
174scrapingResults.forEach(result => {
175if (result.status === 'success' && result.data?.products) {
176allProducts.push(...result.data.products);
182183// Calculate summary statistics
184const summary = calculateSummary(scrapingResults, consolidatedProducts);
185186// Prepare final response
188query,
189timestamp: new Date().toISOString(),
190results: scrapingResults,
191consolidatedProducts,
192summary
267}
268269// Simplified scraping function that calls our URL scrape template
270async function scrapeWebsite(query: string, website: string): Promise<any> {
271// This would normally import and call the actual URL scrape template
365366// Calculate summary statistics
367function calculateSummary(results: ScrapingResult[], products: any[]): any {
368const successfulSites = results.filter(r => r.status === 'success').length;
369const totalProducts = products.length;