rsshub 1.0.0-master.f6cb490 → 1.0.0-master.f6f0273

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/lib/api/index.ts +1 -6
  2. package/lib/routes/2048/index.ts +24 -23
  3. package/lib/routes/anthropic/news.ts +27 -13
  4. package/lib/routes/asianfanfics/namespace.ts +7 -0
  5. package/lib/routes/asianfanfics/tag.ts +89 -0
  6. package/lib/routes/asianfanfics/text-search.ts +68 -0
  7. package/lib/routes/blockworks/index.ts +128 -0
  8. package/lib/routes/blockworks/namespace.ts +7 -0
  9. package/lib/routes/cmu/andypavlo/blog.ts +55 -0
  10. package/lib/routes/cmu/namespace.ts +7 -0
  11. package/lib/routes/coindesk/{index.ts → consensus-magazine.ts} +17 -21
  12. package/lib/routes/coindesk/namespace.ts +2 -1
  13. package/lib/routes/coindesk/news.ts +47 -0
  14. package/lib/routes/coindesk/utils.ts +26 -0
  15. package/lib/routes/cointelegraph/index.ts +106 -0
  16. package/lib/routes/cointelegraph/namespace.ts +7 -0
  17. package/lib/routes/collabo-cafe/category.ts +37 -0
  18. package/lib/routes/collabo-cafe/index.ts +35 -0
  19. package/lib/routes/collabo-cafe/namespace.ts +9 -0
  20. package/lib/routes/collabo-cafe/parser.ts +29 -0
  21. package/lib/routes/collabo-cafe/tag.ts +37 -0
  22. package/lib/routes/cryptoslate/index.ts +98 -0
  23. package/lib/routes/cryptoslate/namespace.ts +7 -0
  24. package/lib/routes/decrypt/index.ts +115 -0
  25. package/lib/routes/decrypt/namespace.ts +7 -0
  26. package/lib/routes/discuz/discuz.ts +7 -9
  27. package/lib/routes/fangchan/list.ts +224 -0
  28. package/lib/routes/fangchan/namespace.ts +9 -0
  29. package/lib/routes/fangchan/templates/description.art +7 -0
  30. package/lib/routes/foreignaffairs/namespace.ts +7 -0
  31. package/lib/routes/foreignaffairs/rss.ts +55 -0
  32. package/lib/routes/forklog/index.ts +72 -0
  33. package/lib/routes/forklog/namespace.ts +7 -0
  34. package/lib/routes/gcores/categories.ts +129 -0
  35. package/lib/routes/gcores/collections.ts +129 -0
  36. package/lib/routes/gcores/topics.ts +63 -0
  37. package/lib/routes/gov/moa/gjs.ts +210 -0
  38. package/lib/routes/gov/tianjin/tjftz.ts +53 -0
  39. package/lib/routes/gov/tianjin/tjrcgzw.ts +51 -0
  40. package/lib/routes/grainoil/category.ts +207 -0
  41. package/lib/routes/grainoil/namespace.ts +9 -0
  42. package/lib/routes/huxiu/util.ts +11 -9
  43. package/lib/routes/ifanr/category.ts +7 -2
  44. package/lib/routes/ifanr/digest.ts +1 -1
  45. package/lib/routes/ifanr/index.ts +1 -1
  46. package/lib/routes/instructables/projects.ts +20 -15
  47. package/lib/routes/juejin/collections.ts +1 -1
  48. package/lib/routes/komiic/comic.ts +88 -0
  49. package/lib/routes/komiic/namespace.ts +7 -0
  50. package/lib/routes/leagueoflegends/namespace.ts +8 -0
  51. package/lib/routes/leagueoflegends/patch-notes.ts +76 -0
  52. package/lib/routes/likeshop/index.ts +43 -0
  53. package/lib/routes/likeshop/namespace.ts +7 -0
  54. package/lib/routes/ltaaa/article.ts +180 -0
  55. package/lib/routes/ltaaa/namespace.ts +9 -0
  56. package/lib/routes/ltaaa/templates/description.art +7 -0
  57. package/lib/routes/mashiro/index.ts +1 -0
  58. package/lib/routes/nhentai/util.ts +4 -1
  59. package/lib/routes/pinterest/user.ts +9 -0
  60. package/lib/routes/sohu/mp.ts +3 -2
  61. package/lib/routes/spotify/show.ts +1 -1
  62. package/lib/routes/stcn/index.ts +241 -136
  63. package/lib/routes/stcn/kx.ts +144 -0
  64. package/lib/routes/swjtu/namespace.ts +1 -1
  65. package/lib/routes/swjtu/{scai/bks.ts → scai.ts} +34 -20
  66. package/lib/routes/swjtu/sports.ts +77 -0
  67. package/lib/routes/theblock/index.ts +142 -0
  68. package/lib/routes/theblock/namespace.ts +7 -0
  69. package/lib/routes/theverge/index.ts +73 -62
  70. package/lib/routes/theverge/templates/header.art +19 -0
  71. package/lib/routes/threads/index.ts +73 -54
  72. package/lib/routes/threads/utils.ts +60 -78
  73. package/lib/routes/tmtpost/column.ts +298 -0
  74. package/lib/routes/tmtpost/new.ts +4 -199
  75. package/lib/routes/tmtpost/util.ts +207 -0
  76. package/lib/routes/toranoana/namespace.ts +7 -0
  77. package/lib/routes/toranoana/news.ts +110 -0
  78. package/lib/routes/wainao/templates/description.art +9 -0
  79. package/lib/routes/wainao/topics.ts +214 -0
  80. package/lib/routes/xiaoyuzhou/podcast.ts +27 -27
  81. package/lib/routes/xjtu/yz.ts +74 -0
  82. package/lib/routes/youmemark/index.ts +6 -6
  83. package/lib/routes/zaobao/util.ts +11 -3
  84. package/lib/routes/zhihu/answers.ts +26 -54
  85. package/package.json +36 -35
  86. package/lib/routes/gcores/category.ts +0 -171
  87. package/lib/routes/gcores/collection.ts +0 -161
  88. package/lib/routes-deprecated/ltaaa/index.js +0 -69
@@ -0,0 +1,106 @@
1
+ import { Route, Data, DataItem } from '@/types';
2
+ import cache from '@/utils/cache';
3
+ import ofetch from '@/utils/ofetch';
4
+ import { parseDate } from '@/utils/parse-date';
5
+ import { load } from 'cheerio';
6
+ import logger from '@/utils/logger';
7
+ import parser from '@/utils/rss-parser';
8
+
9
+ export const route: Route = {
10
+ path: '/',
11
+ categories: ['finance'],
12
+ example: '/cointelegraph',
13
+ parameters: {},
14
+ features: {
15
+ requireConfig: false,
16
+ requirePuppeteer: false,
17
+ antiCrawler: false,
18
+ supportBT: false,
19
+ supportPodcast: false,
20
+ supportScihub: false,
21
+ },
22
+ name: 'News',
23
+ maintainers: ['pseudoyu'],
24
+ handler,
25
+ radar: [
26
+ {
27
+ source: ['cointelegraph.com/'],
28
+ target: '/',
29
+ },
30
+ ],
31
+ description: 'Get latest news from Cointelegraph with full text.',
32
+ };
33
+
34
+ async function handler(): Promise<Data> {
35
+ const rssUrl = 'https://cointelegraph.com/rss';
36
+ const feed = await parser.parseURL(rssUrl);
37
+
38
+ const items = await Promise.all(
39
+ feed.items
40
+ .filter((item) => item.link && /\/news|\/explained|\/innovation-circle/.test(item.link))
41
+ .map((item) => ({
42
+ ...item,
43
+ link: item.link?.split('?')[0],
44
+ }))
45
+ .map((item) =>
46
+ cache.tryGet(item.link!, async () => {
47
+ const link = item.link!;
48
+
49
+ // Extract full text
50
+ const fullText = await extractFullText(link);
51
+
52
+ if (!fullText) {
53
+ logger.warn(`Failed to extract content from ${link}`);
54
+ }
55
+
56
+ // Create article item
57
+ return {
58
+ title: item.title || 'Untitled',
59
+ description: fullText || item.content,
60
+ pubDate: item.pubDate ? parseDate(item.pubDate) : undefined,
61
+ link,
62
+ author: item.creator || 'CoinTelegraph',
63
+ category: item.categories?.map((c) => c.trim()) || [],
64
+ image: item.enclosure?.url,
65
+ } as DataItem;
66
+ })
67
+ )
68
+ );
69
+
70
+ // Filter out null items
71
+ const validItems = items.filter((item): item is DataItem => item !== null);
72
+
73
+ return {
74
+ title: feed.title || 'CoinTelegraph News',
75
+ link: feed.link || 'https://cointelegraph.com',
76
+ description: feed.description || 'Latest news from CoinTelegraph',
77
+ language: feed.language || 'en',
78
+ item: validItems,
79
+ };
80
+ }
81
+
82
+ async function extractFullText(url: string): Promise<string | null> {
83
+ try {
84
+ const response = await ofetch(url);
85
+ const $ = load(response);
86
+ const nuxtData = $('script:contains("window.__NUXT__")').text();
87
+ const fullText = JSON.parse(nuxtData.match(/\.fullText=(".*?");/)?.[1] || '{}');
88
+ const cover = $('.post-cover__image');
89
+
90
+ // Remove unwanted elements
91
+ cover.find('source').remove();
92
+ cover.find('img').removeAttr('srcset');
93
+ cover.find('img').attr(
94
+ 'src',
95
+ cover
96
+ .find('img')
97
+ .attr('src')
98
+ ?.match(/(https:\/\/s3\.cointelegraph\.com\/.+)/)?.[1] || ''
99
+ );
100
+
101
+ return cover.html() + fullText || null;
102
+ } catch (error) {
103
+ logger.error(`Error fetching article content: ${error}`);
104
+ return null;
105
+ }
106
+ }
@@ -0,0 +1,7 @@
1
+ import type { Namespace } from '@/types';
2
+
3
+ export const namespace: Namespace = {
4
+ name: 'Cointelegraph',
5
+ url: 'cointelegraph.com',
6
+ lang: 'en',
7
+ };
@@ -0,0 +1,37 @@
1
+ import { Data, Route } from '@/types';
2
+ import ofetch from '@/utils/ofetch';
3
+ import { load } from 'cheerio';
4
+ import { Context } from 'hono';
5
+ import { parseItems } from './parser';
6
+
7
+ export const handler = async (ctx: Context): Promise<Data | null> => {
8
+ const { category } = ctx.req.param();
9
+ const baseUrl = `https://collabo-cafe.com/events/category/${category}`;
10
+ const res = await ofetch(baseUrl);
11
+ const $ = load(res);
12
+ const items = parseItems($);
13
+
14
+ return {
15
+ title: '分类',
16
+ link: baseUrl,
17
+ item: items,
18
+ };
19
+ };
20
+
21
+ export const route: Route = {
22
+ path: '/category/:category',
23
+ categories: ['anime'],
24
+ example: '/collabo-cafe/category/cafe',
25
+ parameters: { category: 'Category, refer to the original website (ジャンル別)' },
26
+ features: {
27
+ requireConfig: false,
28
+ requirePuppeteer: false,
29
+ antiCrawler: false,
30
+ supportBT: false,
31
+ supportPodcast: false,
32
+ supportScihub: false,
33
+ },
34
+ name: '分类',
35
+ maintainers: ['cokemine'],
36
+ handler,
37
+ };
@@ -0,0 +1,35 @@
1
+ import { Data, Route } from '@/types';
2
+ import ofetch from '@/utils/ofetch';
3
+ import { load } from 'cheerio';
4
+ import { parseItems } from './parser';
5
+
6
+ export const handler = async (): Promise<Data | null> => {
7
+ const baseUrl = 'https://collabo-cafe.com/';
8
+ const res = await ofetch(baseUrl);
9
+ const $ = load(res);
10
+ const items = parseItems($);
11
+
12
+ return {
13
+ title: '全部文章',
14
+ link: baseUrl,
15
+ item: items,
16
+ };
17
+ };
18
+
19
+ export const route: Route = {
20
+ path: '/',
21
+ categories: ['anime'],
22
+ example: '/collabo-cafe/',
23
+ parameters: undefined,
24
+ features: {
25
+ requireConfig: false,
26
+ requirePuppeteer: false,
27
+ antiCrawler: false,
28
+ supportBT: false,
29
+ supportPodcast: false,
30
+ supportScihub: false,
31
+ },
32
+ name: '全部文章',
33
+ maintainers: ['cokemine'],
34
+ handler,
35
+ };
@@ -0,0 +1,9 @@
1
+ import type { Namespace } from '@/types';
2
+
3
+ export const namespace: Namespace = {
4
+ name: 'コラボカフェ',
5
+ url: 'collabo-cafe.com',
6
+ description: 'コラボカフェ - アニメ・漫画・ゲームのコラボ情報一覧まとめ',
7
+ lang: 'ja',
8
+ categories: ['anime'],
9
+ };
@@ -0,0 +1,29 @@
1
+ import { type DataItem } from '@/types';
2
+ import { parseDate } from '@/utils/parse-date';
3
+ import { CheerioAPI } from 'cheerio';
4
+
5
+ export function parseItems($: CheerioAPI): DataItem[] {
6
+ return $('div.top-post-list article')
7
+ .toArray()
8
+ .map((el) => {
9
+ const $el = $(el);
10
+ const a = $el.find('a').first();
11
+ const title = a.attr('title')!;
12
+ const link = a.attr('href');
13
+ const pubDate = parseDate($el.find('span.date.gf.updated').text());
14
+ const author = $el.find('span.author span.fn').text();
15
+ const category = [$el.find('span.cat-name').text()];
16
+ const description = $el.find('div.description p').text();
17
+ const image = $el.find('img').attr('data-src');
18
+ return {
19
+ title,
20
+ link,
21
+ pubDate,
22
+ author,
23
+ category,
24
+ description,
25
+ image,
26
+ banner: image,
27
+ };
28
+ });
29
+ }
@@ -0,0 +1,37 @@
1
+ import { Data, Route } from '@/types';
2
+ import ofetch from '@/utils/ofetch';
3
+ import { load } from 'cheerio';
4
+ import { Context } from 'hono';
5
+ import { parseItems } from './parser';
6
+
7
+ export const handler = async (ctx: Context): Promise<Data | null> => {
8
+ const { tag } = ctx.req.param();
9
+ const baseUrl = `https://collabo-cafe.com/events/tag/${tag}`;
10
+ const res = await ofetch(baseUrl);
11
+ const $ = load(res);
12
+ const items = parseItems($);
13
+
14
+ return {
15
+ title: '标签',
16
+ link: baseUrl,
17
+ item: items,
18
+ };
19
+ };
20
+
21
+ export const route: Route = {
22
+ path: '/tag/:tag',
23
+ categories: ['anime'],
24
+ example: '/collabo-cafe/tag/ikebukuro',
25
+ parameters: { tag: 'Tag, refer to the original website (開催地域別)' },
26
+ features: {
27
+ requireConfig: false,
28
+ requirePuppeteer: false,
29
+ antiCrawler: false,
30
+ supportBT: false,
31
+ supportPodcast: false,
32
+ supportScihub: false,
33
+ },
34
+ name: '标签',
35
+ maintainers: ['cokemine'],
36
+ handler,
37
+ };
@@ -0,0 +1,98 @@
1
+ import { Route, Data } from '@/types';
2
+ import { parseDate } from '@/utils/parse-date';
3
+ import logger from '@/utils/logger';
4
+ import parser from '@/utils/rss-parser';
5
+ import { load } from 'cheerio';
6
+
7
+ export const route: Route = {
8
+ path: '/',
9
+ categories: ['finance'],
10
+ example: '/cryptoslate',
11
+ parameters: {},
12
+ features: {
13
+ requireConfig: false,
14
+ requirePuppeteer: false,
15
+ antiCrawler: false,
16
+ supportBT: false,
17
+ supportPodcast: false,
18
+ supportScihub: false,
19
+ },
20
+ name: 'News',
21
+ maintainers: ['pseudoyu'],
22
+ handler,
23
+ radar: [
24
+ {
25
+ source: ['cryptoslate.com/'],
26
+ target: '/',
27
+ },
28
+ ],
29
+ description: 'Get latest news from CryptoSlate.',
30
+ };
31
+
32
+ async function handler(ctx): Promise<Data> {
33
+ const limit = ctx.req.query('limit') ? Number.parseInt(ctx.req.query('limit')) : 20;
34
+ const rssUrl = 'https://cryptoslate.com/feed/';
35
+
36
+ const feed = await parser.parseURL(rssUrl);
37
+
38
+ const items = feed.items
39
+ .filter((item) => !item.link?.includes('/feed') && !item.link?.includes('#respond'))
40
+ .slice(0, limit)
41
+ .map((item) => {
42
+ if (!item.link) {
43
+ return {};
44
+ }
45
+
46
+ try {
47
+ // Clean URL by removing query parameters
48
+ const cleanUrl = item.link.split('?')[0];
49
+
50
+ return {
51
+ title: item.title || 'Untitled',
52
+ link: cleanUrl,
53
+ pubDate: item.pubDate ? parseDate(item.pubDate) : undefined,
54
+ description: extractFullTextFromRSS(item),
55
+ author: item.creator || 'CryptoSlate',
56
+ category: item.categories || [],
57
+ guid: item.guid || item.link,
58
+ image: item.enclosure?.url,
59
+ };
60
+ } catch (error: any) {
61
+ logger.warn(`Couldn't process article from CryptoSlate: ${item.link}: ${error.message}`);
62
+ return {};
63
+ }
64
+ });
65
+
66
+ // Filter out empty items
67
+ const filteredItems = items.filter((item) => item && Object.keys(item).length > 0);
68
+
69
+ return {
70
+ title: feed.title || 'CryptoSlate',
71
+ link: feed.link || 'https://cryptoslate.com',
72
+ description: feed.description || 'Latest news from CryptoSlate',
73
+ item: filteredItems,
74
+ language: feed.language || 'en',
75
+ image: feed.image?.url,
76
+ } as Data;
77
+ }
78
+
79
+ function extractFullTextFromRSS(entry: any): string | null {
80
+ try {
81
+ const contentEncoded = entry['content:encoded'] || entry['content:encodedSnippet'] || entry.content || entry.contentSnippet;
82
+
83
+ if (!contentEncoded) {
84
+ return null;
85
+ }
86
+
87
+ const $ = load(contentEncoded);
88
+
89
+ // Remove unwanted elements
90
+ $('img').remove();
91
+ $('figure').remove();
92
+
93
+ return $.html() || null;
94
+ } catch (error) {
95
+ logger.error(`Error extracting full text from RSS: ${error}`);
96
+ return null;
97
+ }
98
+ }
@@ -0,0 +1,7 @@
1
+ import type { Namespace } from '@/types';
2
+
3
+ export const namespace: Namespace = {
4
+ name: 'CryptoSlate',
5
+ url: 'cryptoslate.com',
6
+ lang: 'en',
7
+ };
@@ -0,0 +1,115 @@
1
+ import { Route, Data } from '@/types';
2
+ import cache from '@/utils/cache';
3
+ import ofetch from '@/utils/ofetch';
4
+ import { parseDate } from '@/utils/parse-date';
5
+ import { load } from 'cheerio';
6
+ import logger from '@/utils/logger';
7
+ import parser from '@/utils/rss-parser';
8
+
9
+ export const route: Route = {
10
+ path: '/',
11
+ categories: ['finance'],
12
+ example: '/decrypt',
13
+ parameters: {},
14
+ features: {
15
+ requireConfig: false,
16
+ requirePuppeteer: false,
17
+ antiCrawler: false,
18
+ supportBT: false,
19
+ supportPodcast: false,
20
+ supportScihub: false,
21
+ },
22
+ name: 'News',
23
+ maintainers: ['pseudoyu'],
24
+ handler,
25
+ radar: [
26
+ {
27
+ source: ['decrypt.co/'],
28
+ target: '/',
29
+ },
30
+ ],
31
+ description: 'Get latest news from Decrypt.',
32
+ };
33
+
34
+ async function handler(ctx): Promise<Data> {
35
+ const limit = ctx.req.query('limit') ? Number.parseInt(ctx.req.query('limit')) : 20;
36
+ const rssUrl = 'https://decrypt.co/feed';
37
+
38
+ const feed = await parser.parseURL(rssUrl);
39
+
40
+ const items = await Promise.all(
41
+ feed.items
42
+ .filter((item) => item && item.link && !item.link.includes('/videos'))
43
+ .slice(0, limit)
44
+ .map((item) =>
45
+ cache.tryGet(`decrypt:article:${item.link}`, async () => {
46
+ if (!item.link) {
47
+ return {};
48
+ }
49
+
50
+ try {
51
+ const result = await extractFullText(item.link);
52
+ return {
53
+ title: item.title || 'Untitled',
54
+ link: item.link.split('?')[0], // Clean URL by removing query parameters
55
+ pubDate: item.pubDate ? parseDate(item.pubDate) : undefined,
56
+ description: result?.fullText ?? (item.content || ''),
57
+ author: item.creator || 'Decrypt',
58
+ category: result?.tags ? [...new Set([...(item.categories ?? []), ...result.tags])] : item.categories || [],
59
+ guid: item.guid || item.link,
60
+ image: result?.featuredImage ?? item.enclosure?.url,
61
+ };
62
+ } catch (error: any) {
63
+ logger.warn(`Couldn't fetch full content for ${item.link}: ${error.message}`);
64
+
65
+ // Fallback to RSS content
66
+ return {
67
+ title: item.title || 'Untitled',
68
+ link: item.link.split('?')[0],
69
+ pubDate: item.pubDate ? parseDate(item.pubDate) : undefined,
70
+ description: item.content || '',
71
+ author: item.creator || 'Decrypt',
72
+ category: item.categories || [],
73
+ guid: item.guid || item.link,
74
+ image: item.enclosure?.url,
75
+ };
76
+ }
77
+ })
78
+ )
79
+ );
80
+
81
+ return {
82
+ title: feed.title || 'Decrypt',
83
+ link: feed.link || 'https://decrypt.co',
84
+ description: feed.description || 'Latest news from Decrypt',
85
+ item: items,
86
+ language: feed.language || 'en',
87
+ image: feed.image?.url,
88
+ } as Data;
89
+ }
90
+
91
+ async function extractFullText(url: string): Promise<{ fullText: string; featuredImage: string; tags: string[] } | null> {
92
+ try {
93
+ const response = await ofetch(url);
94
+
95
+ const $ = load(response);
96
+
97
+ const nextData = JSON.parse($('script#__NEXT_DATA__').text());
98
+ const post = nextData.props.pageProps.post;
99
+
100
+ if (post.content.length) {
101
+ const fullText = `<img src="${post.featuredImage.src}" alt="${post.featuredImage.alt}">` + post.content;
102
+
103
+ return {
104
+ fullText,
105
+ featuredImage: post.featuredImage.src,
106
+ tags: post.tags.data.map((tag) => tag.name),
107
+ };
108
+ }
109
+
110
+ return null;
111
+ } catch (error) {
112
+ logger.error(`Error extracting full text from ${url}: ${error}`);
113
+ return null;
114
+ }
115
+ }
@@ -0,0 +1,7 @@
1
+ import type { Namespace } from '@/types';
2
+
3
+ export const namespace: Namespace = {
4
+ name: 'Decrypt',
5
+ url: 'decrypt.co',
6
+ lang: 'en',
7
+ };
@@ -1,6 +1,6 @@
1
1
  import { Route } from '@/types';
2
2
  import cache from '@/utils/cache';
3
- import got from '@/utils/got';
3
+ import ofetch from '@/utils/ofetch';
4
4
  import { load } from 'cheerio';
5
5
  import iconv from 'iconv-lite';
6
6
  import { parseDate } from '@/utils/parse-date';
@@ -22,14 +22,13 @@ function fixUrl(itemLink, baseUrl) {
22
22
  // discuz 7.x 与 discuz x系列 通用文章内容抓取
23
23
  async function loadContent(itemLink, charset, header) {
24
24
  // 处理编码问题
25
- const response = await got({
25
+ const response = await ofetch.raw(itemLink, {
26
26
  method: 'get',
27
- url: itemLink,
28
- responseType: 'buffer',
27
+ responseType: 'arrayBuffer',
29
28
  headers: header,
30
29
  });
31
30
 
32
- const responseData = iconv.decode(response.data, charset ?? 'utf-8');
31
+ const responseData = iconv.decode(Buffer.from(response._data), charset ?? 'utf-8');
33
32
  if (!responseData) {
34
33
  const description = '获取详细内容失败';
35
34
  return { description };
@@ -77,14 +76,13 @@ async function handler(ctx) {
77
76
  Cookie: cookie,
78
77
  };
79
78
 
80
- const response = await got({
79
+ const response = await ofetch.raw(link, {
81
80
  method: 'get',
82
- url: link,
83
- responseType: 'buffer',
81
+ responseType: 'arrayBuffer',
84
82
  headers: header,
85
83
  });
86
84
 
87
- const responseData = response.data;
85
+ const responseData = Buffer.from(response._data);
88
86
  // 若没有指定编码,则默认utf-8
89
87
  const contentType = response.headers['content-type'] || '';
90
88
  let $ = load(iconv.decode(responseData, 'utf-8'));