shirayuki-anime-scraper-api 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/Dockerfile +14 -0
  2. package/LICENSE +24 -0
  3. package/README.md +539 -0
  4. package/config/database.js +37 -0
  5. package/index.js +63 -0
  6. package/models/Episode.js +49 -0
  7. package/models/Schedule.js +50 -0
  8. package/package.json +46 -0
  9. package/routes/anime-list.js +67 -0
  10. package/routes/episodeStream.js +64 -0
  11. package/routes/genre.js +67 -0
  12. package/routes/home.js +30 -0
  13. package/routes/monthly.js +37 -0
  14. package/routes/schedule.js +174 -0
  15. package/routes/search.js +79 -0
  16. package/routes/top10.js +37 -0
  17. package/routes/weekly.js +37 -0
  18. package/save.txt +431 -0
  19. package/scrapeanime/A-Z/AnimeList/filter.js +43 -0
  20. package/scrapeanime/A-Z/Genre/genre.js +42 -0
  21. package/scrapeanime/AnimeDetails/animedetails.js +73 -0
  22. package/scrapeanime/Browse/Search/search.js +119 -0
  23. package/scrapeanime/Browse/Suggestion/suggestion.js +50 -0
  24. package/scrapeanime/Leaderboard/Monthly/scrapeHiAnimeMonthlyTop10.js +137 -0
  25. package/scrapeanime/Leaderboard/Top/scrapeHiAnimeTop10.js +125 -0
  26. package/scrapeanime/Leaderboard/Weekly/scrapeHiAnimeWeeklyTop10.js +188 -0
  27. package/scrapeanime/Schedule/schedule.js +174 -0
  28. package/scrapeanime/SingleEpisode/scrapeSingleEpisode.js +496 -0
  29. package/scrapeanime/homepage/latest/latest.js +118 -0
  30. package/scrapeanime/homepage/most_favorite/mostFavorite.js +55 -0
  31. package/scrapeanime/homepage/most_popular/mostPopular.js +55 -0
  32. package/scrapeanime/homepage/recently_updated/recentlyUpdated.js +56 -0
  33. package/scrapeanime/homepage/scrapeAnimeDetails.js +128 -0
  34. package/scrapeanime/homepage/scrapehomepage.js +2 -0
  35. package/scrapeanime/homepage/scrapeservice.js +158 -0
  36. package/scrapeanime/homepage/slider/slider.js +151 -0
  37. package/scrapeanime/homepage/top_airing/topAiring.js +55 -0
  38. package/scrapeanime/homepage/trending/trending.js +59 -0
  39. package/service/scraperService.js +38 -0
@@ -0,0 +1,49 @@
1
+ import mongoose from 'mongoose';
2
+
3
+ const episodeSchema = new mongoose.Schema({
4
+ anime_id: {
5
+ type: String,
6
+ required: true,
7
+ index: true
8
+ },
9
+ episode_number: {
10
+ type: String,
11
+ required: true
12
+ },
13
+ episode_url: {
14
+ type: String,
15
+ required: true,
16
+ unique: true
17
+ },
18
+ streaming_data: {
19
+ title: String,
20
+ episode_number: String,
21
+ streaming_link: String,
22
+ range_id: String,
23
+ all_ranges: [String]
24
+ },
25
+ extraction_time_seconds: {
26
+ type: Number,
27
+ required: true
28
+ },
29
+ last_updated: {
30
+ type: Date,
31
+ default: Date.now
32
+ },
33
+ cache_expires_at: {
34
+ type: Date,
35
+ required: true
36
+ }
37
+ }, {
38
+ timestamps: true
39
+ });
40
+
41
+ // Compound index for faster queries
42
+ episodeSchema.index({ anime_id: 1, episode_number: 1 });
43
+ episodeSchema.index({ episode_url: 1 });
44
+ episodeSchema.index({ cache_expires_at: 1 });
45
+ episodeSchema.index({ last_updated: -1 });
46
+
47
+ const Episode = mongoose.model('Episode', episodeSchema);
48
+
49
+ export default Episode;
@@ -0,0 +1,50 @@
1
+ import mongoose from 'mongoose';
2
+
3
+ const scheduleItemSchema = new mongoose.Schema({
4
+ day: {
5
+ type: String,
6
+ required: true,
7
+ enum: ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Error']
8
+ },
9
+ anime: {
10
+ type: String,
11
+ required: true
12
+ },
13
+ time: {
14
+ type: String,
15
+ required: true
16
+ }
17
+ }, {
18
+ timestamps: true
19
+ });
20
+
21
+ const scheduleSchema = new mongoose.Schema({
22
+ week_id: {
23
+ type: String,
24
+ required: true,
25
+ unique: true
26
+ },
27
+ schedule_data: [scheduleItemSchema],
28
+ extraction_time_seconds: {
29
+ type: Number,
30
+ required: true
31
+ },
32
+ total_episodes: {
33
+ type: Number,
34
+ default: 0
35
+ },
36
+ last_updated: {
37
+ type: Date,
38
+ default: Date.now
39
+ }
40
+ }, {
41
+ timestamps: true
42
+ });
43
+
44
+ // Index for faster queries
45
+ scheduleSchema.index({ week_id: 1 });
46
+ scheduleSchema.index({ last_updated: -1 });
47
+
48
+ const Schedule = mongoose.model('Schedule', scheduleSchema);
49
+
50
+ export default Schedule;
package/package.json ADDED
@@ -0,0 +1,46 @@
1
+ {
2
+ "name": "shirayuki-anime-scraper-api",
3
+ "version": "1.0.0",
4
+ "description": "A comprehensive anime scraping API that provides anime information, streaming links, and search functionality from HiAnime and other popular anime streaming platforms",
5
+ "keywords": [
6
+ "anime",
7
+ "scraping",
8
+ "streaming",
9
+ "api",
10
+ "hianime",
11
+ "shirayuki",
12
+ "anime-api",
13
+ "scraper"
14
+ ],
15
+ "homepage": "https://github.com/Anandadevnath/Shirayuki-Scrapper-API#readme",
16
+ "bugs": {
17
+ "url": "https://github.com/Anandadevnath/Shirayuki-Scrapper-API/issues"
18
+ },
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "git+https://github.com/Anandadevnath/Shirayuki-Scrapper-API.git"
22
+ },
23
+ "license": "ISC",
24
+ "author": "Anandadevnath",
25
+ "type": "module",
26
+ "main": "index.js",
27
+ "scripts": {
28
+ "start": "node index.js",
29
+ "dev": "node index.js",
30
+ "test": "echo \"Warning: no test specified\" && exit 0"
31
+ },
32
+ "dependencies": {
33
+ "axios": "^1.11.0",
34
+ "cheerio": "^1.1.2",
35
+ "dotenv": "^17.0.1",
36
+ "express": "^5.1.0",
37
+ "mongodb": "^6.17.0",
38
+ "mongoose": "^8.16.1",
39
+ "puppeteer": "^24.20.0",
40
+ "puppeteer-extra": "^3.3.6",
41
+ "puppeteer-extra-plugin-stealth": "^2.11.2"
42
+ },
43
+ "engines": {
44
+ "node": ">=18.0.0"
45
+ }
46
+ }
@@ -0,0 +1,67 @@
1
+ import express from 'express';
2
+ import { scrapeAnimeByLetter } from '../scrapeanime/A-Z/AnimeList/filter.js';
3
+
4
+ const router = express.Router();
5
+
6
+ router.get('/:letter', async (req, res) => {
7
+ try {
8
+ const start = Date.now();
9
+ const letter = req.params.letter;
10
+ const page = parseInt(req.query.page) || 1;
11
+ const axios = (await import('axios')).default;
12
+ const cheerio = await import('cheerio');
13
+ const url = `https://123animehub.cc/az-all-anime/${letter}/?page=${page}`;
14
+ let total_counts = null;
15
+ try {
16
+ const { data: html } = await axios.get(url);
17
+ const $ = cheerio.load(html);
18
+ const totalText = $('.paging-wrapper .total').first().text().replace(/[^\d]/g, '');
19
+ if (totalText) total_counts = parseInt(totalText, 10);
20
+ } catch (e) {
21
+ total_counts = null;
22
+ }
23
+
24
+ const result = await scrapeAnimeByLetter(letter, page);
25
+ const duration = (Date.now() - start) / 1000;
26
+ const indexedResult = result.map((anime, idx) => ({
27
+ index: idx + 1,
28
+ ...anime
29
+ }));
30
+ res.json({
31
+ success: true,
32
+ data: indexedResult,
33
+ pagination: {
34
+ current_page: page,
35
+ total_found: indexedResult.length,
36
+ total_counts: total_counts,
37
+ has_next_page: indexedResult.length > 0,
38
+ has_previous_page: page > 1,
39
+ next_page: indexedResult.length > 0 ? page + 1 : null,
40
+ previous_page: page > 1 ? page - 1 : null
41
+ },
42
+ extraction_time_seconds: duration,
43
+ message: `Anime list for letter '${letter}' - Page ${page}`,
44
+ timestamp: new Date().toISOString(),
45
+ source_url: url
46
+ });
47
+ } catch (error) {
48
+ const duration = (Date.now() - start) / 1000;
49
+ res.status(500).json({
50
+ success: false,
51
+ error: error.message,
52
+ extraction_time_seconds: duration,
53
+ timestamp: new Date().toISOString(),
54
+ pagination: {
55
+ current_page: parseInt(req.query.page) || 1,
56
+ total_found: 0,
57
+ has_next_page: false,
58
+ has_previous_page: false,
59
+ next_page: null,
60
+ previous_page: null
61
+ }
62
+ });
63
+ }
64
+ });
65
+
66
+
67
+ export default router;
@@ -0,0 +1,64 @@
1
+ import express from 'express';
2
+ import { scrapeSingleEpisode } from '../scrapeanime/SingleEpisode/scrapeSingleEpisode.js';
3
+
4
+ const router = express.Router();
5
+
6
+ router.get('/episode-stream', async (req, res) => {
7
+ try {
8
+ const animeId = req.query.id;
9
+ const episodeNumber = req.query.ep;
10
+
11
+ if (!animeId || !episodeNumber) {
12
+ return res.status(400).json({
13
+ error: 'Both id and ep parameters are required',
14
+ example: 'http://localhost:5000/episode-stream?id=sentai-daishikkaku-2nd-season-dub&ep=1'
15
+ });
16
+ }
17
+
18
+ // Validate episode number is numeric
19
+ if (isNaN(episodeNumber) || episodeNumber < 1) {
20
+ return res.status(400).json({
21
+ error: 'Episode number must be a positive integer',
22
+ example: 'http://localhost:5000/episode-stream?id=anime-name&ep=1'
23
+ });
24
+ }
25
+
26
+ const episodeUrl = `https://w1.123animes.ru/anime/${animeId}/episode/${episodeNumber}`;
27
+
28
+ console.log(`🎯 Fetching streaming link for: ${animeId} Episode ${episodeNumber}`);
29
+
30
+ const startTime = Date.now();
31
+ const result = await scrapeSingleEpisode(episodeUrl);
32
+ const endTime = Date.now();
33
+ const duration = (endTime - startTime) / 1000;
34
+
35
+ if (result.success) {
36
+ console.log(`✅ Found streaming link in ${duration.toFixed(2)} seconds`);
37
+ res.json({
38
+ success: true,
39
+ anime_id: animeId,
40
+ episode: episodeNumber,
41
+ data: result.data,
42
+ extraction_time_seconds: duration
43
+ });
44
+ } else {
45
+ console.log(`❌ Failed to find streaming link: ${result.error}`);
46
+ res.status(404).json({
47
+ success: false,
48
+ error: result.error,
49
+ anime_id: animeId,
50
+ episode: episodeNumber,
51
+ extraction_time_seconds: duration
52
+ });
53
+ }
54
+
55
+ } catch (error) {
56
+ console.error('❌ Error fetching episode stream:', error.message);
57
+ res.status(500).json({
58
+ error: error.message,
59
+ timestamp: new Date().toISOString()
60
+ });
61
+ }
62
+ });
63
+
64
+ export default router;
@@ -0,0 +1,67 @@
1
+ import express from 'express';
2
+ import { scrapeAnimeByGenre } from '../scrapeanime/A-Z/Genre/genre.js';
3
+
4
+ const router = express.Router();
5
+
6
+ router.get('/:genre', async (req, res) => {
7
+ try {
8
+ const start = Date.now();
9
+ const genre = req.params.genre;
10
+ const page = parseInt(req.query.page) || 1;
11
+ const axios = (await import('axios')).default;
12
+ const cheerio = await import('cheerio');
13
+ const url = `https://123animehub.cc/genere/${genre}?page=${page}`;
14
+ let total_counts = null;
15
+ try {
16
+ const { data: html } = await axios.get(url);
17
+ const $ = cheerio.load(html);
18
+ const totalText = $('.paging-wrapper .total').first().text().replace(/[^\d]/g, '');
19
+ if (totalText) total_counts = parseInt(totalText, 10);
20
+ } catch (e) {
21
+ total_counts = null;
22
+ }
23
+
24
+ const result = await scrapeAnimeByGenre(genre, page);
25
+ const duration = (Date.now() - start) / 1000;
26
+ const indexedResult = result.map((anime, idx) => ({
27
+ index: idx + 1,
28
+ ...anime
29
+ }));
30
+ res.json({
31
+ success: true,
32
+ data: indexedResult,
33
+ pagination: {
34
+ current_page: page,
35
+ total_found: indexedResult.length,
36
+ total_counts: total_counts,
37
+ has_next_page: indexedResult.length > 0,
38
+ has_previous_page: page > 1,
39
+ next_page: indexedResult.length > 0 ? page + 1 : null,
40
+ previous_page: page > 1 ? page - 1 : null
41
+ },
42
+ extraction_time_seconds: duration,
43
+ message: `Anime list for genre '${genre}' - Page ${page}`,
44
+ timestamp: new Date().toISOString(),
45
+ source_url: url
46
+ });
47
+ } catch (error) {
48
+ const duration = (Date.now() - start) / 1000;
49
+ res.status(500).json({
50
+ success: false,
51
+ error: error.message,
52
+ extraction_time_seconds: duration,
53
+ timestamp: new Date().toISOString(),
54
+ pagination: {
55
+ current_page: parseInt(req.query.page) || 1,
56
+ total_found: 0,
57
+ total_counts: 0,
58
+ has_next_page: false,
59
+ has_previous_page: false,
60
+ next_page: null,
61
+ previous_page: null
62
+ }
63
+ });
64
+ }
65
+ });
66
+
67
+ export default router;
package/routes/home.js ADDED
@@ -0,0 +1,30 @@
1
+ import express from 'express';
2
+ import scrapeHomepage from '../scrapeanime/homepage/scrapehomepage.js';
3
+
4
+ const router = express.Router();
5
+
6
+ router.get('/', async (req, res) => {
7
+ try {
8
+ const start = Date.now();
9
+ const result = await scrapeHomepage(true); // Always include details
10
+ const duration = (Date.now() - start) / 1000;
11
+
12
+ if (result.success) {
13
+ res.json({
14
+ success: true,
15
+ data: result.data,
16
+ extraction_time_seconds: duration,
17
+ });
18
+ } else {
19
+ res.status(502).json({
20
+ success: false,
21
+ error: result.error || 'Unknown error',
22
+ extraction_time_seconds: duration,
23
+ });
24
+ }
25
+ } catch (err) {
26
+ res.status(500).json({ success: false, error: err.message });
27
+ }
28
+ });
29
+
30
+ export default router;
@@ -0,0 +1,37 @@
1
+ import express from 'express';
2
+ import { scrapeHiAnimeMonthlyTop10 } from '../scrapeanime/Leaderboard/Monthly/scrapeHiAnimeMonthlyTop10.js';
3
+
4
+ const router = express.Router();
5
+
6
+ router.get('/', async (req, res) => {
7
+ try {
8
+ const start = Date.now();
9
+ console.log('📅 Starting HiAnime Monthly Top 10 scraping...');
10
+
11
+ const result = await scrapeHiAnimeMonthlyTop10();
12
+ const duration = (Date.now() - start) / 1000;
13
+
14
+ console.log(`✅ Monthly Top 10 scraping completed in ${duration}s`);
15
+
16
+ res.json({
17
+ success: true,
18
+ data: result,
19
+ extraction_time_seconds: duration,
20
+ message: "Top 10 monthly viewed anime from HiAnime",
21
+ timestamp: new Date().toISOString()
22
+ });
23
+
24
+ } catch (error) {
25
+ const duration = (Date.now() - start) / 1000;
26
+ console.error('❌ Error scraping monthly top 10:', error.message);
27
+
28
+ res.status(500).json({
29
+ success: false,
30
+ error: error.message,
31
+ extraction_time_seconds: duration,
32
+ timestamp: new Date().toISOString()
33
+ });
34
+ }
35
+ });
36
+
37
+ export default router;
@@ -0,0 +1,174 @@
1
+ import express from 'express';
2
+ import scrapeSchedule from '../scrapeanime/Schedule/schedule.js';
3
+ import connectDB from '../config/database.js';
4
+ import Schedule from '../models/Schedule.js';
5
+
6
+ const router = express.Router();
7
+
8
+ const getCurrentWeekId = () => {
9
+ const now = new Date();
10
+ const year = now.getFullYear();
11
+ const weekNumber = getWeekNumber(now);
12
+ return `${year}-W${weekNumber.toString().padStart(2, '0')}`;
13
+ };
14
+
15
+ const getWeekNumber = (date) => {
16
+ const firstDayOfYear = new Date(date.getFullYear(), 0, 1);
17
+ const pastDaysOfYear = (date - firstDayOfYear) / 86400000;
18
+ return Math.ceil((pastDaysOfYear + firstDayOfYear.getDay() + 1) / 7);
19
+ };
20
+
21
+ router.get('/', async (req, res) => {
22
+ const start = Date.now();
23
+
24
+ try {
25
+ // Connect to MongoDB
26
+ await connectDB();
27
+
28
+ const currentWeekId = getCurrentWeekId();
29
+
30
+ // Check if we have recent data (less than 6 hours old)
31
+ const existingSchedule = await Schedule.findOne({
32
+ week_id: currentWeekId,
33
+ last_updated: { $gte: new Date(Date.now() - 6 * 60 * 60 * 1000) } // 6 hours ago
34
+ }).sort({ last_updated: -1 });
35
+
36
+ if (existingSchedule) {
37
+ console.log(`📋 Returning cached schedule data for ${currentWeekId}`);
38
+ const cleanData = existingSchedule.schedule_data.map(item => ({
39
+ day: item.day,
40
+ anime: item.anime,
41
+ time: item.time
42
+ }));
43
+
44
+ return res.json({
45
+ success: true,
46
+ data: cleanData,
47
+ extraction_time_seconds: 0.001,
48
+ cached: true,
49
+ week_id: currentWeekId,
50
+ last_updated: existingSchedule.last_updated,
51
+ total_episodes: existingSchedule.total_episodes
52
+ });
53
+ }
54
+
55
+ // Scrape fresh data
56
+ console.log(`🔄 Scraping fresh schedule data for ${currentWeekId}`);
57
+ const scheduleData = await scrapeSchedule();
58
+ const duration = (Date.now() - start) / 1000;
59
+
60
+ // Store in MongoDB
61
+ const savedSchedule = await Schedule.findOneAndUpdate(
62
+ { week_id: currentWeekId },
63
+ {
64
+ schedule_data: scheduleData,
65
+ extraction_time_seconds: duration,
66
+ total_episodes: scheduleData.length,
67
+ last_updated: new Date()
68
+ },
69
+ { upsert: true, new: true }
70
+ );
71
+
72
+ console.log(`💾 Saved schedule data to MongoDB: ${scheduleData.length} episodes`);
73
+
74
+ // Clean up old data (keep only last 4 weeks)
75
+ const fourWeeksAgo = new Date();
76
+ fourWeeksAgo.setDate(fourWeeksAgo.getDate() - 28);
77
+
78
+ const deleteResult = await Schedule.deleteMany({
79
+ last_updated: { $lt: fourWeeksAgo }
80
+ });
81
+
82
+ if (deleteResult.deletedCount > 0) {
83
+ console.log(`🧹 Cleaned up ${deleteResult.deletedCount} old schedule records`);
84
+ }
85
+
86
+ res.json({
87
+ success: true,
88
+ data: scheduleData,
89
+ extraction_time_seconds: duration,
90
+ cached: false,
91
+ week_id: currentWeekId,
92
+ total_episodes: scheduleData.length,
93
+ saved_to_db: true
94
+ });
95
+
96
+ } catch (err) {
97
+ console.error('❌ Schedule route error:', err);
98
+ res.status(500).json({
99
+ success: false,
100
+ error: err.message,
101
+ extraction_time_seconds: (Date.now() - start) / 1000
102
+ });
103
+ }
104
+ });
105
+
106
+ // Additional endpoint to get schedule history
107
+ router.get('/history', async (req, res) => {
108
+ try {
109
+ await connectDB();
110
+
111
+ const limit = parseInt(req.query.limit) || 10;
112
+ const scheduleHistory = await Schedule.find({})
113
+ .select('week_id total_episodes last_updated extraction_time_seconds')
114
+ .sort({ last_updated: -1 })
115
+ .limit(limit);
116
+
117
+ res.json({
118
+ success: true,
119
+ data: scheduleHistory,
120
+ total_records: scheduleHistory.length
121
+ });
122
+ } catch (err) {
123
+ res.status(500).json({
124
+ success: false,
125
+ error: err.message
126
+ });
127
+ }
128
+ });
129
+
130
+ // Endpoint to force refresh schedule data
131
+ router.post('/refresh', async (req, res) => {
132
+ const start = Date.now();
133
+
134
+ try {
135
+ await connectDB();
136
+
137
+ console.log('🔄 Force refreshing schedule data...');
138
+ const scheduleData = await scrapeSchedule();
139
+ const duration = (Date.now() - start) / 1000;
140
+
141
+ const currentWeekId = getCurrentWeekId();
142
+
143
+ // Update or create new schedule
144
+ const updatedSchedule = await Schedule.findOneAndUpdate(
145
+ { week_id: currentWeekId },
146
+ {
147
+ schedule_data: scheduleData,
148
+ extraction_time_seconds: duration,
149
+ total_episodes: scheduleData.length,
150
+ last_updated: new Date()
151
+ },
152
+ { upsert: true, new: true }
153
+ );
154
+
155
+ console.log(`💾 Force updated schedule data: ${scheduleData.length} episodes`);
156
+
157
+ res.json({
158
+ success: true,
159
+ data: scheduleData,
160
+ extraction_time_seconds: duration,
161
+ week_id: currentWeekId,
162
+ total_episodes: scheduleData.length,
163
+ force_refreshed: true
164
+ });
165
+ } catch (err) {
166
+ res.status(500).json({
167
+ success: false,
168
+ error: err.message,
169
+ extraction_time_seconds: (Date.now() - start) / 1000
170
+ });
171
+ }
172
+ });
173
+
174
+ export default router;
@@ -0,0 +1,79 @@
1
+ import express from 'express';
2
+ import { scrapeSearchSuggestions } from '../scrapeanime/Browse/Suggestion/suggestion.js';
3
+ import { scrapeAnimeSearch } from '../scrapeanime/Browse/Search/search.js';
4
+
5
+ const router = express.Router();
6
+
7
+ // GET /search?keyword=one%20piece
8
+ router.get('/', async (req, res) => {
9
+ const start = Date.now();
10
+ try {
11
+ const keyword = req.query.keyword || '';
12
+
13
+ if (!keyword) {
14
+ return res.status(400).json({
15
+ success: false,
16
+ error: 'Query parameter "keyword" is required'
17
+ });
18
+ }
19
+
20
+ const result = await scrapeAnimeSearch(keyword);
21
+ const duration = (Date.now() - start) / 1000;
22
+
23
+ res.json({
24
+ success: true,
25
+ total_results: result.length,
26
+ data: result,
27
+ extraction_time_seconds: duration,
28
+ message: `Search results for "${keyword}"`,
29
+ timestamp: new Date().toISOString(),
30
+ source_url: `https://123animehub.cc/search?keyword=${encodeURIComponent(keyword)}`
31
+ });
32
+ } catch (error) {
33
+ const duration = (Date.now() - start) / 1000;
34
+ res.status(500).json({
35
+ success: false,
36
+ error: error.message,
37
+ extraction_time_seconds: duration,
38
+ timestamp: new Date().toISOString()
39
+ });
40
+ }
41
+ });
42
+
43
+ // GET /search/suggestions?q=demon%20slayer
44
+ router.get('/suggestions', async (req, res) => {
45
+ const start = Date.now();
46
+ try {
47
+ const query = req.query.q || req.query.query || '';
48
+
49
+ if (!query) {
50
+ return res.status(400).json({
51
+ success: false,
52
+ error: 'Query parameter "q" is required'
53
+ });
54
+ }
55
+
56
+ const result = await scrapeSearchSuggestions(query);
57
+ const duration = (Date.now() - start) / 1000;
58
+
59
+ res.json({
60
+ success: true,
61
+ total_suggestions: result.length,
62
+ data: result,
63
+ extraction_time_seconds: duration,
64
+ message: `Search suggestions for "${query}"`,
65
+ timestamp: new Date().toISOString(),
66
+ source_url: `https://123animehub.cc/search?keyword=${encodeURIComponent(query)}`
67
+ });
68
+ } catch (error) {
69
+ const duration = (Date.now() - start) / 1000;
70
+ res.status(500).json({
71
+ success: false,
72
+ error: error.message,
73
+ extraction_time_seconds: duration,
74
+ timestamp: new Date().toISOString()
75
+ });
76
+ }
77
+ });
78
+
79
+ export default router;