Val Town Code SearchReturn to Val Town

API Access

You can access search results via JSON API by adding format=json to your query:

https://codesearch.val.run/image-url.jpg?q=fetch&page=133&format=json

For typeahead suggestions, use the /typeahead endpoint:

https://codesearch.val.run/typeahead?q=fetch

Returns an array of strings in format "username" or "username/projectName"

Found 9764 results for "fetch"(609ms)

lightweightindex.ts2 matches

@bradnoble•Updated 1 week ago
99});
100
101// Export the app's fetch handler as the default export for Val Town's HTTP trigger
102export default app.fetch;

MaxCareViz2index.ts2 matches

@kvey•Updated 1 week ago
21});
22
23// HTTP vals expect an exported "fetch handler"
24// This is how you "run the server" in Val Town with Hono
25export default app.fetch;

sqliteAdminDashboardmain.tsx13 matches

@zylanera•Updated 1 week ago
20
21 useEffect(() => {
22 fetchTables();
23 }, []);
24
25 const fetchTables = async () => {
26 try {
27 const response = await fetch("/tables");
28 const data = await response.json();
29 setTables(data);
30 } catch (err) {
31 setError("Failed to fetch tables");
32 }
33 };
34
35 const fetchTableData = async (tableName) => {
36 try {
37 const dataResponse = await fetch(`/table/${tableName}`);
38 const data = await dataResponse.json();
39 setTableData(data);
40
41 const schemaResponse = await fetch(`/schema/${tableName}`);
42 const schema = await schemaResponse.json();
43 setTableSchema(schema);
51 setNewRow(emptyRow);
52 } catch (err) {
53 setError(`Failed to fetch data for table ${tableName}`);
54 }
55 };
61 const handleSave = async (index) => {
62 try {
63 const response = await fetch(`/update/${selectedTable}`, {
64 method: "POST",
65 headers: {
72 }
73 setEditingRow(null);
74 await fetchTableData(selectedTable);
75 } catch (err) {
76 setError(`Failed to update row: ${err.message}`);
90 const handleAddRow = async () => {
91 try {
92 const response = await fetch(`/insert/${selectedTable}`, {
93 method: "POST",
94 headers: {
100 throw new Error("Failed to add new row");
101 }
102 await fetchTableData(selectedTable);
103 // Reset newRow to empty values
104 const emptyRow = Object.keys(newRow).reduce((acc, key) => {
120 <li
121 key={table}
122 onClick={() => fetchTableData(table)}
123 className={selectedTable === table ? "active" : ""}
124 >

templateTwitterAlertmain.tsx1 match

@poarox•Updated 1 week ago
19 : Math.floor((Date.now() - 2 * 24 * 60 * 60 * 1000) / 1000);
20
21 // Fetch and log tweets
22 const response = await socialDataSearch(`${query} since_time:${timeFrame}`);
23 console.log("Response from socialDataSearch:", response);

MaxCareFeatureindex.ts2 matches

@kvey•Updated 1 week ago
21});
22
23// HTTP vals expect an exported "fetch handler"
24// This is how you "run the server" in Val Town with Hono
25export default app.fetch;

MaxCareVizindex.ts2 matches

@kvey•Updated 1 week ago
21});
22
23// HTTP vals expect an exported "fetch handler"
24// This is how you "run the server" in Val Town with Hono
25export default app.fetch;

blob_adminmain.tsx1 match

@robsalasco•Updated 1 week ago
199});
200
201export default lastlogin((request: Request) => app.fetch(request));

blob_adminapp.tsx22 matches

@robsalasco•Updated 1 week ago
231 const [isDragging, setIsDragging] = useState(false);
232
233 const fetchBlobs = useCallback(async () => {
234 setLoading(true);
235 try {
236 const response = await fetch(`/api/blobs?prefix=${encodeKey(searchPrefix)}&limit=${limit}`);
237 const data = await response.json();
238 setBlobs(data);
239 } catch (error) {
240 console.error("Error fetching blobs:", error);
241 } finally {
242 setLoading(false);
245
246 useEffect(() => {
247 fetchBlobs();
248 }, [fetchBlobs]);
249
250 const handleSearch = (e) => {
261 setBlobContentLoading(true);
262 try {
263 const response = await fetch(`/api/blob?key=${encodeKey(clickedBlob.key)}`);
264 const content = await response.text();
265 setSelectedBlob({ ...clickedBlob, key: decodeKey(clickedBlob.key) });
266 setEditContent(content);
267 } catch (error) {
268 console.error("Error fetching blob content:", error);
269 } finally {
270 setBlobContentLoading(false);
275 const handleSave = async () => {
276 try {
277 await fetch(`/api/blob?key=${encodeKey(selectedBlob.key)}`, {
278 method: "PUT",
279 body: editContent,
287 const handleDelete = async (key) => {
288 try {
289 await fetch(`/api/blob?key=${encodeKey(key)}`, { method: "DELETE" });
290 setBlobs(blobs.filter(b => b.key !== key));
291 if (selectedBlob && selectedBlob.key === key) {
304 const key = `${searchPrefix}${file.name}`;
305 formData.append("key", encodeKey(key));
306 await fetch("/api/blob", { method: "POST", body: formData });
307 const newBlob = { key, size: file.size, lastModified: new Date().toISOString() };
308 setBlobs([newBlob, ...blobs]);
312 }
313 }
314 fetchBlobs();
315 };
316
326 try {
327 const fullKey = `${searchPrefix}${key}`;
328 await fetch(`/api/blob?key=${encodeKey(fullKey)}`, {
329 method: "PUT",
330 body: "",
341 const handleDownload = async (key) => {
342 try {
343 const response = await fetch(`/api/blob?key=${encodeKey(key)}`);
344 const blob = await response.blob();
345 const url = window.URL.createObjectURL(blob);
360 if (newKey && newKey !== oldKey) {
361 try {
362 const response = await fetch(`/api/blob?key=${encodeKey(oldKey)}`);
363 const content = await response.blob();
364 await fetch(`/api/blob?key=${encodeKey(newKey)}`, {
365 method: "PUT",
366 body: content,
367 });
368 await fetch(`/api/blob?key=${encodeKey(oldKey)}`, { method: "DELETE" });
369 setBlobs(blobs.map(b => b.key === oldKey ? { ...b, key: newKey } : b));
370 if (selectedBlob && selectedBlob.key === oldKey) {
380 const newKey = `__public/${key}`;
381 try {
382 const response = await fetch(`/api/blob?key=${encodeKey(key)}`);
383 const content = await response.blob();
384 await fetch(`/api/blob?key=${encodeKey(newKey)}`, {
385 method: "PUT",
386 body: content,
387 });
388 await fetch(`/api/blob?key=${encodeKey(key)}`, { method: "DELETE" });
389 setBlobs(blobs.map(b => b.key === key ? { ...b, key: newKey } : b));
390 if (selectedBlob && selectedBlob.key === key) {
399 const newKey = key.slice(9); // Remove "__public/" prefix
400 try {
401 const response = await fetch(`/api/blob?key=${encodeKey(key)}`);
402 const content = await response.blob();
403 await fetch(`/api/blob?key=${encodeKey(newKey)}`, {
404 method: "PUT",
405 body: content,
406 });
407 await fetch(`/api/blob?key=${encodeKey(key)}`, { method: "DELETE" });
408 setBlobs(blobs.map(b => b.key === key ? { ...b, key: newKey } : b));
409 if (selectedBlob && selectedBlob.key === key) {

mandateui.ts7 matches

@salon•Updated 1 week ago
23<body>
24<h1>Superpowered Agent Platform Demo V2 (Parallel)</h1>
25<p>This demo runs the <code>enhancedAnalysis</code> workflow. Steps 1 (fetch) and 2 (summarize) can run in parallel. Step 3 (combine) waits for both.</p>
26<form id="demoForm">
27 <label for="userText">Enter text for summarization (Required):</label>
28 <textarea id="userText" name="userText" required>The quick brown fox jumps over the lazy dog. This is a longer sentence to ensure the summarizer has enough text to work with and demonstrate its functionality properly. This workflow will summarize this text and optionally fetch data from jsonplaceholder based on the checkbox below. The final step combines the results.</textarea>
29 <label class="checkbox-label">
30 <input type="checkbox" id="enableFetch" name="enableExternalFetch" value="true" checked>
31 Enable External Data Fetch (Conditional Step 1)?
32 </label><br/>
33 <button type="submit" id="submitButton">Run Enhanced Workflow</button>
39 const resultBox = document.getElementById('resultBox');
40 const logBox = document.getElementById('logBox');
41 const enableFetchCheckbox = document.getElementById('enableFetch');
42 const submitButton = document.getElementById('submitButton');
43
50 const initialPayload = {
51 userText: form.userText.value,
52 enableExternalFetch: enableFetchCheckbox.checked
53 };
54 console.log("Sending payload:", initialPayload);
55
56 try {
57 const res = await fetch(window.location.pathname, {
58 method: 'POST',
59 headers: { 'Content-Type': 'application/json' },

mandateagent_ecosystem.ts72 matches

@salon•Updated 1 week ago
1// agent_ecosystem.ts
2import { XMLParser } from "https://esm.sh/fast-xml-parser@4.2.5"; // For parsing RSS/Atom feeds
3// import { combinerAgent, fetchAgent, summarizerAgent } from "https://esm.town/v/salon/mandate/agents.ts";
4import { AgentContext, AgentInput, AgentOutput } from "https://esm.town/v/salon/mandate/interfaces.ts";
5import { fetch } from "https://esm.town/v/std/fetch";
6import { OpenAI } from "https://esm.town/v/std/openai";
7// If Deno.env is used, ensure it's for Val Town secrets if possible or clearly noted.
56}
57
58// Your provided fetchAgent (Note: "Workspaceing" seems like a typo, changed to "Fetching")
59export async function fetchAgent(
60 input: AgentInput<{ url_from_input?: string; maxHeadlines?: number }>,
61 context: AgentContext,
76
77 const DEFAULT_RSS_URL = "https://feeds.npr.org/1001/rss.xml";
78 const urlToFetch = payload?.url_from_input ?? config?.rssFeedUrl ?? DEFAULT_RSS_URL;
79 const maxHeadlines = Number(payload?.maxHeadlines ?? config?.maxHeadlines ?? 5);
80
81 log("INFO", "FetchAgent", `Workspaceing headlines from ${urlToFetch}, max: ${maxHeadlines}`);
82
83 try {
84 const resp = await fetch(urlToFetch, { headers: { "User-Agent": "NewsAnalysisBot/1.0 (ValTown)" } }); // Added User-Agent
85 const fetchedContentType = resp.headers.get("content-type")?.toLowerCase() || "";
86 const rawResponseText = await resp.text();
87
89 let errorBody = rawResponseText;
90 if (errorBody.length > 500) errorBody = errorBody.substring(0, 500) + "...";
91 throw new Error(`Workspace failed: ${resp.status} ${resp.statusText}. URL: ${urlToFetch}. Body: ${errorBody}`);
92 }
93
98
99 if (
100 fetchedContentType.includes("xml") || fetchedContentType.includes("rss") || urlToFetch.endsWith(".xml")
101 || urlToFetch.endsWith(".rss") || urlToFetch === DEFAULT_RSS_URL
102 ) {
103 try {
153 }
154 if (parsedSuccessfully)
155 log("INFO", "FetchAgent", `Successfully parsed ${headlines.length} headlines from "${feedTitle}".`);
156 else {
157 parsingMessage = "RSS/Atom structure not as expected or no items found.";
158 log("WARN", "FetchAgent", parsingMessage);
159 }
160 } catch (parseError: any) {
161 parsingMessage = `Failed to parse XML/RSS content. Error: ${parseError.message}`;
162 log("WARN", "FetchAgent", `${parsingMessage} from URL: ${urlToFetch}`);
163 }
164 } else {
165 parsingMessage =
166 `Content type "${fetchedContentType}" is not XML/RSS. Not attempting RSS parse. Raw text will be available.`;
167 log("INFO", "FetchAgent", parsingMessage);
168 }
169 const outputData = {
170 headlines,
171 feedTitle,
172 sourceUrl: urlToFetch,
173 ...(parsingMessage && !parsedSuccessfully ? { message: parsingMessage } : {}),
174 };
176 mandateId,
177 correlationId: taskId,
178 payload: { data: outputData, rawText: rawResponseText, contentType: fetchedContentType },
179 };
180 } catch (e: any) {
181 log("ERROR", "FetchAgent", `Workspace or processing failed: ${e.message}`, e);
182 return {
183 mandateId,
187 headlines: [],
188 feedTitle: "Error",
189 sourceUrl: urlToFetch,
190 message: `Workspace or processing failed: ${e.message}`,
191 },
219 try {
220 const summaryText = payload.summary ?? "N/A";
221 let externalDataDescription = "External Data: Not Fetched/Available or not in expected format.";
222 // ... (rest of your description logic)
223 if (
292}
293
294interface FetchedArticle {
295 id: string;
296 title: string;
300 sourceFeedName: string;
301 sourceFeedUrl: string;
302 rawContent?: string; // Full article text, if fetched
303 cleanedSummary?: string; // HTML-stripped summary for processing
304 isRelevant?: boolean;
314 config?: AnalysisConfig;
315 discoveredFeeds?: { name: string; url: string }[]; // Feeds to be used
316 fetchedArticles?: FetchedArticle[]; // Raw from fetch (multiple feeds)
317 processedArticles?: FetchedArticle[]; // Cleaned articles
318 relevantArticles?: FetchedArticle[]; // Filtered by relevance
319 articlesWithSentiment?: FetchedArticle[];
320 articlesWithThemes?: FetchedArticle[];
321 trendReport?: any;
322 anomalyReport?: any;
406}
407
408// 3. ParallelFetchAgent (This agent will call the user-provided fetchAgent for each feed)
409export async function parallelFetchAgent(
410 input: AgentInput<{ feedsToFetch: { name: string; url: string }[]; maxHeadlinesPerFeed: number }>,
411 context: AgentContext, // This context will be passed to the individual fetchAgent calls
412): Promise<AgentOutput<{ fetchedArticles: FetchedArticle[] }>> {
413 const { log } = context; // Main orchestrator log
414 const { feedsToFetch, maxHeadlinesPerFeed } = input.payload;
415 let allArticles: FetchedArticle[] = [];
416
417 log("INFO", "ParallelFetchAgent", `Starting parallel fetch from ${feedsToFetch.length} feeds.`);
418
419 for (const feed of feedsToFetch) {
420 log("INFO", "ParallelFetchAgent", `Dispatching fetch for ${feed.name} (${feed.url})`);
421 // Call the user-provided fetchAgent
422 const singleFetchOutput = await fetchAgent(
423 {
424 mandateId: input.mandateId,
433 );
434
435 if (singleFetchOutput.payload?.data?.headlines) {
436 const transformedArticles = singleFetchOutput.payload.data.headlines.map(h => ({
437 id: h.link || `${feed.url}-${h.title}-${Date.now()}`, // Ensure an ID
438 title: h.title,
444 }));
445 allArticles.push(...transformedArticles);
446 log("INFO", "ParallelFetchAgent", `Workspaceed ${transformedArticles.length} articles from ${feed.name}.`);
447 } else {
448 log(
449 "WARN",
450 "ParallelFetchAgent",
451 `No headlines fetched or error from ${feed.name}. Error: ${
452 singleFetchOutput.error || singleFetchOutput.payload?.data?.message
453 }`,
454 );
455 }
456 }
457 log("SUCCESS", "ParallelFetchAgent", `Total articles fetched from all feeds: ${allArticles.length}.`);
458 return { mandateId: input.mandateId, correlationId: input.taskId, payload: { fetchedArticles: allArticles } };
459}
460
461// 4. ArticleCleaningAgent
462export async function articleCleaningAgent(
463 input: AgentInput<{ articles: FetchedArticle[] }>,
464 context: AgentContext,
465): Promise<AgentOutput<{ processedArticles: FetchedArticle[] }>> {
466 const { log } = context;
467 log("INFO", "ArticleCleaningAgent", `Cleaning ${input.payload.articles.length} articles.`);
478// 5. RelevanceAssessmentAgent
479export async function relevanceAssessmentAgent(
480 input: AgentInput<{ articles: FetchedArticle[]; topic: string; keywords: string[] }>,
481 context: AgentContext,
482): Promise<AgentOutput<{ relevantArticles: FetchedArticle[] }>> {
483 const { log } = context;
484 const { articles, topic, keywords } = input.payload;
485 const openai = new OpenAI();
486 let relevantArticles: FetchedArticle[] = [];
487 log("INFO", "RelevanceAssessmentAgent", `Assessing relevance for ${articles.length} articles on topic: ${topic}`);
488
536// 6. ContentExtractionAgent (Simplified: mainly uses cleaned summary, conceptual for full text)
537export async function contentExtractionAgent(
538 input: AgentInput<{ articles: FetchedArticle[]; analysisDepth: "cursory" | "standard" | "deep" }>,
539 context: AgentContext,
540): Promise<AgentOutput<{ articlesWithContent: FetchedArticle[] }>> {
541 const { log } = context;
542 const { articles, analysisDepth } = input.payload;
543 log("INFO", "ContentExtractionAgent", `Content extraction. Depth: ${analysisDepth}. Articles: ${articles.length}`);
544 // Simulating full content: For "deep" analysis, one might try to fetch article.link.
545 // Here, we'll just ensure `rawContent` is populated from `cleanedSummary` for LLM processing consistency.
546 const updatedArticles = articles.map(article => ({
555// 7. SentimentAnalysisAgent
556export async function sentimentAnalysisAgent(
557 input: AgentInput<{ articles: FetchedArticle[] }>,
558 context: AgentContext,
559): Promise<AgentOutput<{ articlesWithSentiment: FetchedArticle[] }>> {
560 const { log } = context;
561 const openai = new OpenAI();
562 let articlesWithSentiment: FetchedArticle[] = [];
563 if (!input.payload.articles)
564 return { mandateId: input.mandateId, correlationId: input.taskId, payload: { articlesWithSentiment } };
569 const prompt =
570 `Analyze the sentiment of the following text. Respond with a JSON object: {"sentiment": "positive" | "negative" | "neutral" | "mixed", "score": float_between_-1_and_1 (optional, e.g. 0.7 for positive)}.\n\nText:\n${textToAnalyze}`;
571 let sentimentResult: FetchedArticle["sentiment"] = "neutral";
572 let sentimentScore: FetchedArticle["sentimentScore"] = 0.0;
573 try {
574 const completion = await openai.chat.completions.create({
603// 8. KeyThemeExtractionAgent
604export async function keyThemeExtractionAgent(
605 input: AgentInput<{ articles: FetchedArticle[]; topic: string }>,
606 context: AgentContext,
607): Promise<AgentOutput<{ articlesWithThemes: FetchedArticle[] }>> {
608 const { log } = context;
609 const openai = new OpenAI();
610 let articlesWithThemes: FetchedArticle[] = [];
611 log(
612 "INFO",
619 const prompt =
620 `For the text (topic: "${input.payload.topic}"), identify 2-4 key themes and prominent named entities (people, orgs). JSON: {"themes": ["theme1", ...], "entities": [{"text": "EntityName", "type": "PERSON"|"ORG"|"LOC"|"MISC"}]}.\nText:\n${textToAnalyze}`;
621 let themeResult: { themes: string[]; entities: FetchedArticle["entities"] } = { themes: [], entities: [] };
622 try {
623 const completion = await openai.chat.completions.create({
648// 9. TrendAndAnomalyDetectionAgent
649export async function trendAndAnomalyDetectionAgent(
650 input: AgentInput<{ articlesWithThemes: FetchedArticle[]; topic: string; historicalContextSummary?: string }>, // historicalContext is optional
651 context: AgentContext,
652): Promise<AgentOutput<{ trendReport: any; anomalyReport: any }>> {
980 if (sourcesOut.error) throw new Error(`SourceValidationAgent Error: ${sourcesOut.error}`);
981
982 const pFetchOut = await parallelFetchAgent({
983 mandateId,
984 taskId: "pfetch-3",
985 payload: {
986 feedsToFetch: workflowData.discoveredFeeds!,
987 maxHeadlinesPerFeed: workflowData.config!.maxHeadlinesPerFeed,
988 },
989 }, context);
990 workflowData.fetchedArticles = pFetchOut.payload.fetchedArticles;
991 if (pFetchOut.error) throw new Error(`ParallelFetchAgent Error: ${pFetchOut.error}`);
992
993 const cleanOut = await articleCleaningAgent({
994 mandateId,
995 taskId: "clean-4",
996 payload: { articles: workflowData.fetchedArticles! },
997 }, context);
998 workflowData.processedArticles = cleanOut.payload.processedArticles;

agentplex-deal-flow-email-fetch1 file match

@anandvc•Updated 1 day ago

proxyFetch2 file matches

@vidar•Updated 3 days ago