13async function fetchRandomWikipediaArticle(): Promise<{ title: string; extract: string; pageId: number } | null> {
14 try {
15 const response = await fetch('https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&meta=&generator=random&formatversion=2&exsentences=1&explaintext=1&grnnamespace=0');
16
17 if (!response.ok) {
48 <meta name="viewport" content="width=device-width, initial-scale=1.0">
49 <title>Random Wikipedia Article - Error</title>
50 <link rel="preconnect" href="https://fonts.googleapis.com">
51 <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
52 <link href="https://fonts.googleapis.com/css2?family=Inconsolata:wght@400;700&display=swap" rel="stylesheet">
53 <style>
54 body {
121 <meta name="viewport" content="width=device-width, initial-scale=1.0">
122 <title>${article.title} - Random Wikipedia Article</title>
123 <link rel="preconnect" href="https://fonts.googleapis.com">
124 <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
125 <link href="https://fonts.googleapis.com/css2?family=Inconsolata:wght@400;700&display=swap" rel="stylesheet">
126 <style>
127 body {
6
7- **Server-Side Rendering**: All content is fetched and rendered on the server - no JavaScript required
8- **Random Article Fetching**: Uses the Wikipedia API to fetch random articles on each page load
9- **Beautiful Design**: Dark gray background with light gray card featuring sharp corners and yellow drop shadow
10- **Smooth Animations**: CSS-only animations for the card entrance effect
12- **Responsive**: Works well on desktop and mobile devices
13- **Error Handling**: Graceful error handling with retry functionality
14- **Fast Loading**: No client-side API calls means instant content display
15
16## How it Works
17
181. When a user visits the page, the server fetches a random Wikipedia article using the Wikipedia API
192. The server renders the complete HTML with the article title and first sentence extract embedded
203. The page displays immediately with a world icon as an SVG that links to the full article
214. Users can click "Get Another Article" to reload the page and get new random content
22
23## API Used
24
25The app calls the Wikipedia API with these parameters:
26- `action=query` - Query action
27- `format=json` - JSON response format
38- **Server-Side Rendering**: Built as a TypeScript HTTP val that fetches Wikipedia data server-side
39- **No JavaScript**: Completely static HTML with CSS-only animations
40- **Wikipedia API Integration**: Fetches random articles using the Wikipedia API on the server
41- **Error Handling**: Proper error states with retry functionality
42- **Clean Architecture**: Separation of data fetching and HTML generation
43- **Performance**: Fast loading since no client-side API calls are needed
44
45## Styling
8
91. Sign up for [Cerebras](https://cloud.cerebras.ai/)
102. Get a Cerebras API Key
113. Save it in your project env variable called `CEREBRAS_API_KEY`
179 } catch (error) {
180 Toastify({
181 text: "We may have hit our Cerebras Usage limits. Try again later or fork this and use your own API key.",
182 position: "center",
183 duration: 3000,
16 };
17 } else {
18 const client = new Cerebras({ apiKey: Deno.env.get("CEREBRAS_API_KEY") });
19 const completion = await client.chat.completions.create({
20 messages: [
226 }
227 </style>
228 <link href="https://fonts.googleapis.com/css2?family=Amiri:wght@400;700&display=swap" rel="stylesheet">
229</head>
230<body>
320
321 <div class="highlight">
322 <p>Cultural identity plays a pivotal role in shaping attitudes toward English language learning among Saudi students. A mixed-methods study conducted at Majmaah University revealed that participants viewed English as instrumental for career advancement and financial success but did not perceive it as a threat to their core cultural or religious identities.</p>
323 </div>
324 </section>
193 }
194 </style>
195 <link href="https://fonts.googleapis.com/css2?family=Amiri:wght@400;700&display=swap" rel="stylesheet">
196</head>
197<body>
280
281 <div class="highlight">
282 <p>Cultural identity plays a pivotal role in shaping attitudes toward English language learning among Saudi students. A mixed-methods study conducted at Majmaah University revealed that participants viewed English as instrumental for career advancement and financial success but did not perceive it as a threat to their core cultural or religious identities.</p>
283 </div>
284 </section>
1// This file would contain the actual Google Sheets import functionality
2// For a complete implementation, we would need to:
3// 1. Use the Google Sheets API to fetch sheet data
4// 2. Convert the data to a format compatible with x-spreadsheet
5// 3. Return the formatted data
6
7// For now, this is a placeholder that would be expanded with actual implementation
8// using the Google Sheets API and SheetJS for data conversion
9
10export async function importFromGoogleSheets(sheetUrl: string) {
18
19 // In a real implementation, we would:
20 // 1. Call the Google Sheets API using the sheet ID
21 // 2. Process the response with SheetJS
22 // 3. Convert to x-spreadsheet format
1# Amazon Reviews Scraper API
2
3A simple API that scrapes Amazon product reviews and returns them in CSV format.
4
5## Features
9- Support for pagination (multiple pages of reviews)
10- Includes review title, rating, author, date, content, and verification status
11- Respectful scraping with delays between requests
12
13## Usage
31
32- `GET /health` - Health check
33- `GET /usage` - API usage information
34
35## Supported Amazon URL Formats
36
37The API supports various Amazon URL formats:
38- `https://www.amazon.com/dp/PRODUCT_ID`
39- `https://www.amazon.com/product/PRODUCT_ID`
52## Rate Limiting
53
54The API includes a 1-second delay between page requests to be respectful to Amazon's servers.
55
56## Error Handling
57
58The API returns appropriate HTTP status codes:
59- `400`: Invalid or missing URL parameter
60- `404`: No reviews found for the product
61- `500`: Server error during scraping
62
63## Important Notes
70## Example Response
71
72When successful, the API returns a CSV file with headers:
73```csv
74Title,Rating,Author,Date,Content,Verified Purchase
139}
140
141// Main API endpoint
142app.get('/', async (c) => {
143 const url = c.req.query('url');
159
160 try {
161 console.log(`Scraping reviews for product ID: ${productId}`);
162 const reviews = await scrapeAmazonReviews(productId, Math.min(maxPages, 10)); // Limit to 10 pages max
163
178
179 } catch (error) {
180 console.error('Error scraping reviews:', error);
181 return c.json({
182 error: 'Failed to scrape reviews. The product may not be accessible or Amazon may be blocking requests.'
187// Health check endpoint
188app.get('/health', (c) => {
189 return c.json({ status: 'ok', message: 'Amazon Reviews Scraper API is running' });
190});
191
193app.get('/usage', (c) => {
194 return c.json({
195 description: 'Amazon Product Reviews Scraper API',
196 endpoints: {
197 'GET /': {