@arcblock/crawler 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/dist/blocklet.d.ts +6 -0
  2. package/dist/blocklet.js +199 -0
  3. package/dist/cache.d.ts +10 -0
  4. package/dist/cache.js +119 -0
  5. package/dist/config.d.ts +10 -0
  6. package/dist/config.js +17 -0
  7. package/dist/crawler.d.ts +28 -0
  8. package/dist/crawler.js +314 -0
  9. package/dist/db/index.d.ts +1 -0
  10. package/dist/db/index.js +41 -0
  11. package/dist/db/job.d.ts +33 -0
  12. package/dist/db/job.js +54 -0
  13. package/dist/db/snapshot.d.ts +31 -0
  14. package/dist/db/snapshot.js +52 -0
  15. package/dist/index.d.ts +6 -0
  16. package/dist/index.js +45 -0
  17. package/dist/middleware.d.ts +4 -0
  18. package/dist/middleware.js +44 -0
  19. package/dist/puppeteer.d.ts +16 -0
  20. package/dist/puppeteer.js +318 -0
  21. package/dist/utils.d.ts +15 -0
  22. package/dist/utils.js +239 -0
  23. package/esm/blocklet.d.ts +6 -0
  24. package/esm/blocklet.js +190 -0
  25. package/esm/cache.d.ts +10 -0
  26. package/esm/cache.js +114 -0
  27. package/esm/config.d.ts +10 -0
  28. package/esm/config.js +11 -0
  29. package/esm/crawler.d.ts +28 -0
  30. package/esm/crawler.js +301 -0
  31. package/esm/db/index.d.ts +1 -0
  32. package/esm/db/index.js +35 -0
  33. package/esm/db/job.d.ts +33 -0
  34. package/esm/db/job.js +50 -0
  35. package/esm/db/snapshot.d.ts +31 -0
  36. package/esm/db/snapshot.js +48 -0
  37. package/esm/index.d.ts +6 -0
  38. package/esm/index.js +26 -0
  39. package/esm/middleware.d.ts +4 -0
  40. package/esm/middleware.js +41 -0
  41. package/esm/puppeteer.d.ts +16 -0
  42. package/esm/puppeteer.js +272 -0
  43. package/esm/utils.d.ts +15 -0
  44. package/esm/utils.js +220 -0
  45. package/package.json +10 -3
  46. package/src/blocklet.ts +0 -223
  47. package/src/cache.ts +0 -117
  48. package/src/config.ts +0 -13
  49. package/src/crawler.ts +0 -364
  50. package/src/db/index.ts +0 -27
  51. package/src/db/job.ts +0 -93
  52. package/src/db/snapshot.ts +0 -89
  53. package/src/index.ts +0 -19
  54. package/src/middleware.ts +0 -46
  55. package/src/puppeteer.ts +0 -296
  56. package/src/utils.ts +0 -240
  57. package/third.d.ts +0 -1
  58. package/tsconfig.json +0 -9
package/esm/utils.js ADDED
@@ -0,0 +1,220 @@
1
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
2
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
3
+ return new (P || (P = Promise))(function (resolve, reject) {
4
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
5
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
6
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
7
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
8
+ });
9
+ };
10
+ import { components, env } from '@blocklet/sdk/lib/config';
11
+ import axios from 'axios';
12
+ import flattenDeep from 'lodash/flattenDeep';
13
+ import uniq from 'lodash/uniq';
14
+ import { createHash } from 'node:crypto';
15
+ import robotsParser from 'robots-parser';
16
+ import { parseSitemap } from 'sitemap';
17
+ import { Readable } from 'stream';
18
+ import { joinURL } from 'ufo';
19
+ export const api = axios.create({
20
+ timeout: 1000 * 10,
21
+ headers: {
22
+ 'Content-Type': 'application/json',
23
+ },
24
+ });
25
+ export const sleep = (ms) => {
26
+ return new Promise((resolve) => {
27
+ setTimeout(resolve, ms);
28
+ });
29
+ };
30
+ export const CRAWLER_FLAG = 'x-crawler';
31
+ export const isSelfCrawler = (req) => {
32
+ const ua = req.get('user-agent') || '';
33
+ return req.get(CRAWLER_FLAG) === 'true' || `${ua}`.toLowerCase().indexOf('headless') !== -1;
34
+ };
35
+ /**
36
+ * A default set of user agent patterns for bots/crawlers that do not perform
37
+ * well with pages that require JavaScript.
38
+ */
39
+ const botUserAgents = [
40
+ /bot/i,
41
+ /spider/i,
42
+ /facebookexternalhit/i,
43
+ /simplepie/i,
44
+ /yahooseeker/i,
45
+ /embedly/i,
46
+ /quora link preview/i,
47
+ /outbrain/i,
48
+ /vkshare/i,
49
+ /monit/i,
50
+ /Pingability/i,
51
+ /Monitoring/i,
52
+ /WinHttpRequest/i,
53
+ /Apache-HttpClient/i,
54
+ /getprismatic.com/i,
55
+ /python-requests/i,
56
+ /Twurly/i,
57
+ /yandex/i,
58
+ /browserproxy/i,
59
+ /crawler/i,
60
+ /Qwantify/i,
61
+ /Yahoo/i,
62
+ /pinterest/i,
63
+ /Tumblr/i,
64
+ /Tumblr Agent/i,
65
+ /WhatsApp/i,
66
+ /Google-Structured-Data-Testing-Tool/i,
67
+ /Google-InspectionTool/i,
68
+ /Googlebot/i,
69
+ /GPTBot/i,
70
+ /Applebot/i,
71
+ // AI bots
72
+ /Anthropic-ai/i,
73
+ /Claude-Web/i,
74
+ /anthropic-ai-scraper/i,
75
+ /Google-Extended/i,
76
+ /GoogleOther/i,
77
+ /CCBot\/\d/i,
78
+ /Bytespider/i,
79
+ /BingBot/i,
80
+ /Baiduspider/i,
81
+ /Sogou/i,
82
+ /Perplexity/i,
83
+ /Cohere-ai/i,
84
+ /xlts-bot/i,
85
+ /THAAS/i,
86
+ /YisouSpider/i,
87
+ /AlibabaGroup/i,
88
+ /adaptive-edge-crawler/i,
89
+ ];
90
+ const isSpider = (ua) => botUserAgents.some((spider) => {
91
+ return spider.test(ua);
92
+ });
93
+ /**
94
+ * A default set of file extensions for static assets that do not need to be
95
+ * proxied.
96
+ */
97
+ const staticFileExtensions = [
98
+ 'ai',
99
+ 'avi',
100
+ 'css',
101
+ 'dat',
102
+ 'dmg',
103
+ 'doc',
104
+ 'doc',
105
+ 'exe',
106
+ 'flv',
107
+ 'gif',
108
+ 'ico',
109
+ 'iso',
110
+ 'jpeg',
111
+ 'jpg',
112
+ 'js',
113
+ 'less',
114
+ 'm4a',
115
+ 'm4v',
116
+ 'mov',
117
+ 'mp3',
118
+ 'mp4',
119
+ 'mpeg',
120
+ 'mpg',
121
+ 'pdf',
122
+ 'png',
123
+ 'ppt',
124
+ 'psd',
125
+ 'rar',
126
+ 'rss',
127
+ 'svg',
128
+ 'swf',
129
+ 'tif',
130
+ 'torrent',
131
+ 'ttf',
132
+ 'txt',
133
+ 'wav',
134
+ 'wmv',
135
+ 'woff',
136
+ 'xls',
137
+ 'xml',
138
+ 'zip',
139
+ ];
140
+ export const getDefaultRobotsUrl = (url) => {
141
+ const { origin } = new URL(url);
142
+ return joinURL(origin, 'robots.txt?nocache=1');
143
+ };
144
+ export function getRobots(url) {
145
+ return __awaiter(this, void 0, void 0, function* () {
146
+ const { origin } = new URL(url);
147
+ const robotsUrl = joinURL(origin, 'robots.txt?nocache=1');
148
+ const { data } = yield api.get(robotsUrl).catch(() => ({
149
+ data: '',
150
+ }));
151
+ return data ? robotsParser(robotsUrl, data) : null;
152
+ });
153
+ }
154
+ export const getDefaultSitemapUrl = (url) => {
155
+ const { origin } = new URL(url);
156
+ return joinURL(origin, 'sitemap.xml?nocache=1');
157
+ };
158
+ export const isAcceptCrawler = (url) => __awaiter(void 0, void 0, void 0, function* () {
159
+ const robots = yield getRobots(url);
160
+ const isAllowed = robots ? yield robots.isAllowed(url) : true;
161
+ return isAllowed;
162
+ });
163
+ export const getSitemapList = (url) => __awaiter(void 0, void 0, void 0, function* () {
164
+ let sitemapUrlList = [getDefaultSitemapUrl(url)];
165
+ const robots = yield getRobots(url);
166
+ if (robots) {
167
+ const robotsTxtSitemapUrlList = (yield robots.getSitemaps()) || [];
168
+ if (robotsTxtSitemapUrlList.length > 0) {
169
+ sitemapUrlList = robotsTxtSitemapUrlList;
170
+ }
171
+ }
172
+ // loop site map url list
173
+ const sitemapList = yield Promise.all(sitemapUrlList.map((sitemapUrl) => __awaiter(void 0, void 0, void 0, function* () {
174
+ const newUrl = new URL(sitemapUrl);
175
+ newUrl.searchParams.set('nocache', '1');
176
+ sitemapUrl = newUrl.toString();
177
+ const { data: sitemapTxt } = yield api.get(sitemapUrl).catch(() => ({
178
+ data: '',
179
+ }));
180
+ if (sitemapTxt) {
181
+ const stream = Readable.from([sitemapTxt]);
182
+ const sitemapJson = yield parseSitemap(stream);
183
+ return sitemapJson;
184
+ }
185
+ return [];
186
+ })));
187
+ return uniq(flattenDeep(sitemapList.filter(Boolean)));
188
+ });
189
+ export const isBotUserAgent = (req) => {
190
+ const ua = req.get('user-agent');
191
+ const excludeUrlPattern = new RegExp(`\\.(${staticFileExtensions.join('|')})$`, 'i');
192
+ if (ua === undefined || !isSpider(ua) || excludeUrlPattern.test(req.path)) {
193
+ return false;
194
+ }
195
+ return true;
196
+ };
197
+ export const getComponentInfo = () => {
198
+ return components.find((item) => item.did === env.componentDid) || {};
199
+ };
200
+ export const getFullUrl = (req) => {
201
+ const blockletPathname = req.headers['x-path-prefix']
202
+ ? joinURL(req.headers['x-path-prefix'], req.originalUrl)
203
+ : req.originalUrl;
204
+ return joinURL(env.appUrl, blockletPathname);
205
+ };
206
+ export const getRelativePath = (url) => {
207
+ try {
208
+ return new URL(url).pathname;
209
+ }
210
+ catch (error) {
211
+ // ignore error
212
+ }
213
+ return url;
214
+ };
215
+ export const formatUrl = (url) => {
216
+ return url.replace(/\/$/, '').trim();
217
+ };
218
+ export function md5(content) {
219
+ return createHash('md5').update(content).digest('hex');
220
+ }
package/package.json CHANGED
@@ -1,11 +1,15 @@
1
1
  {
2
2
  "name": "@arcblock/crawler",
3
- "version": "1.0.1",
3
+ "version": "1.0.2",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "publishConfig": {
7
7
  "access": "public"
8
8
  },
9
+ "files": [
10
+ "dist",
11
+ "esm"
12
+ ],
9
13
  "lint-staged": {
10
14
  "*.{mjs,js,jsx,ts,tsx}": [
11
15
  "prettier --write",
@@ -102,9 +106,12 @@
102
106
  "pre-commit": "npx lint-staged"
103
107
  },
104
108
  "scripts": {
105
- "dev": "tsc --watch",
109
+ "dev": "tsc -p tsconfig.cjs.json --watch",
106
110
  "lint": "tsc --noEmit && eslint src --ext .mjs,.js,.jsx,.ts,.tsx",
107
111
  "lint:fix": "npm run lint -- --fix",
108
- "bundle": "tsc"
112
+ "bundle": "npm run build",
113
+ "build:cjs": "tsc -p tsconfig.cjs.json",
114
+ "build:esm": "tsc -p tsconfig.esm.json",
115
+ "build": "npm run build:cjs && npm run build:esm"
109
116
  }
110
117
  }
package/src/blocklet.ts DELETED
@@ -1,223 +0,0 @@
1
- import Cron from '@abtnode/cron';
2
- import { components } from '@blocklet/sdk/lib/config';
3
- import debounce from 'lodash/debounce';
4
- import { joinURL } from 'ufo';
5
-
6
- import { useCache } from './cache';
7
- import { config, logger } from './config';
8
- import { createCrawlJob } from './crawler';
9
- import { closeBrowser, getBrowser } from './puppeteer';
10
- import { getComponentInfo, getRelativePath, getSitemapList } from './utils';
11
-
12
- // record crawl blocklet running
13
- const crawlBlockletRunningMap = new Map();
14
-
15
- // crawl blocklet sitemap urls
16
- export const crawlBlocklet = async () => {
17
- // @ts-ignore
18
- const { mountPoint, did } = getComponentInfo();
19
-
20
- if (crawlBlockletRunningMap.has(did) && crawlBlockletRunningMap.get(did)) {
21
- logger.info(`Crawler blocklet ${did} is running, skip it`);
22
- return;
23
- }
24
-
25
- // check has browser can use
26
- try {
27
- const browser = await getBrowser();
28
- if (!browser) {
29
- throw new Error('No Browser can use');
30
- }
31
- logger.info('Crawler blocklet existing can use browser');
32
- } catch (error: any) {
33
- logger.info(`Crawler blocklet abort by error: ${error?.message || error?.reason || error}`);
34
- return;
35
- }
36
-
37
- const { appUrl } = config;
38
-
39
- if (!appUrl) {
40
- throw new Error('appUrl not found');
41
- }
42
-
43
- const sitemapList = await getSitemapList(appUrl);
44
-
45
- const matchMountPoint = joinURL(appUrl, !mountPoint || mountPoint === '/' ? '' : mountPoint);
46
- const otherMountPointList = components
47
- .filter((item) => item.mountPoint && item.mountPoint !== mountPoint)
48
- .map((item) => item.mountPoint);
49
-
50
- // get can use loc
51
- const blockletLocList = sitemapList.filter((item: any) => {
52
- if (mountPoint !== '/') {
53
- return item?.url?.indexOf(matchMountPoint) > -1;
54
- }
55
- // if mountPoint is /, skip other mountPoint
56
- return otherMountPointList.every((mountPoint) => item?.url?.indexOf(mountPoint) === -1);
57
- }) as [];
58
-
59
- const canUseBlockletLocList = [] as string[];
60
- const lastmodMap = new Map();
61
- let skipBlockletLocTotal = 0;
62
- let blockletLocTotal = 0;
63
-
64
- await Promise.all(
65
- blockletLocList.map(async (item: any) => {
66
- let tempLocList: string[] = [];
67
-
68
- if (item.url) {
69
- tempLocList.push(item.url);
70
- }
71
-
72
- if (item?.links?.length > 0) {
73
- tempLocList.push(...item.links.map((ytem: any) => ytem.url));
74
- }
75
-
76
- blockletLocTotal += tempLocList.length;
77
-
78
- // @ts-ignore
79
- tempLocList = (
80
- await Promise.all(
81
- tempLocList.map(async (loc) => {
82
- try {
83
- const { lastModified: cacheLastModified } = await useCache.get(getRelativePath(loc));
84
-
85
- // sitemap item lastmod is same as cache lastModified, skip it
86
- if (
87
- item.lastmod &&
88
- cacheLastModified &&
89
- new Date(cacheLastModified).getTime() === new Date(item.lastmod).getTime()
90
- ) {
91
- skipBlockletLocTotal++;
92
- return false;
93
- }
94
-
95
- return loc;
96
- } catch (error) {
97
- // ignore error
98
- }
99
-
100
- // if can not get cache, return loc
101
- return loc;
102
- }),
103
- )
104
- ).filter(Boolean);
105
-
106
- tempLocList.forEach((loc) => {
107
- if (item.lastmod) lastmodMap.set(loc, item.lastmod);
108
- });
109
-
110
- canUseBlockletLocList.push(...tempLocList);
111
- }),
112
- );
113
-
114
- const crawlerLogText = (step = '') => [
115
- `Crawler sitemap.xml about ${did} ${step}: `,
116
- {
117
- blockletLocTotal,
118
- canUseBlockletLocTotal: canUseBlockletLocList.length,
119
- skipBlockletLocTotal,
120
- lastmodMapTotal: lastmodMap.size,
121
- },
122
- ];
123
-
124
- logger.info(...crawlerLogText('start'));
125
-
126
- try {
127
- // record crawl blocklet running
128
- crawlBlockletRunningMap.set(did, true);
129
-
130
- await createCrawlJob({
131
- // @ts-ignore
132
- urls: canUseBlockletLocList,
133
- saveToRedis: true,
134
- lastmodMap,
135
- // formatPageContent: async ({ page }: { page: any; url: string; lastmod?: string }) => {
136
- // const pageContent = await page.evaluate(() => {
137
- // const removeElements = (tagName: string) => {
138
- // const elements = document.querySelectorAll(tagName);
139
- // for (let i = elements.length - 1; i >= 0; i--) {
140
- // try {
141
- // elements[i]?.parentNode?.removeChild(elements[i] as Node);
142
- // } catch (error) {
143
- // // do noting
144
- // }
145
- // }
146
- // };
147
-
148
- // // remove script, style, link, noscript
149
- // // removeElements('script');
150
- // // removeElements('style');
151
- // // removeElements('link');
152
- // // removeElements('noscript');
153
-
154
- // // remove uploader
155
- // removeElements('[id="uploader-container"]');
156
- // removeElements('[class^="uppy-"]');
157
-
158
- // // remove point up component
159
- // removeElements('[id="point-up-component"]');
160
-
161
- // // add meta tag to record crawler
162
- // const meta = document.createElement('meta');
163
- // meta.name = 'blocklet-crawler';
164
- // meta.content = 'true';
165
- // document.head.appendChild(meta);
166
-
167
- // return document.documentElement.outerHTML;
168
- // });
169
-
170
- // return pageContent;
171
- // },
172
- });
173
-
174
- logger.info(...crawlerLogText('success'));
175
-
176
- await closeBrowser({
177
- trimCache: true,
178
- });
179
- } catch (error) {
180
- logger.info('Crawler blocklet abort by error', error);
181
- } finally {
182
- // delete crawl blocklet running
183
- crawlBlockletRunningMap.delete(did);
184
- }
185
- };
186
-
187
- const CRON_CRAWL_BLOCKLET_KEY = 'cron-crawl-blocklet';
188
- let cronCrawlBlockletJob = null as any;
189
-
190
- // init cron crawl blocklet
191
- export const initCronCrawlBlocklet = (
192
- {
193
- time = '0 0 */12 * * *', // every 12 hours
194
- options,
195
- } = {} as { time: string; options: any },
196
- ) => {
197
- if (!cronCrawlBlockletJob) {
198
- cronCrawlBlockletJob = Cron.init({
199
- context: {},
200
- jobs: [
201
- {
202
- name: CRON_CRAWL_BLOCKLET_KEY,
203
- time,
204
- fn: debounce(crawlBlocklet),
205
- options: { runOnInit: false, ...options },
206
- },
207
- ],
208
- onError: (err: Error) => {
209
- console.error('run job failed', err);
210
- },
211
- });
212
- }
213
-
214
- return cronCrawlBlockletJob;
215
- };
216
-
217
- export const cancelCronCrawlBlocklet = () => {
218
- if (cronCrawlBlockletJob) {
219
- cronCrawlBlockletJob.jobs[CRON_CRAWL_BLOCKLET_KEY].stop();
220
- cronCrawlBlockletJob = null;
221
- logger.info('Cron crawl blocklet stop, clear crawl queue');
222
- }
223
- };
package/src/cache.ts DELETED
@@ -1,117 +0,0 @@
1
- import { createPool } from 'generic-pool';
2
- import { createClient } from 'redis';
3
-
4
- import { config, logger } from './config';
5
-
6
- const cacheKeyPrefix = process.env?.BLOCKLET_REAL_DID ? `${process.env.BLOCKLET_REAL_DID}:` : '';
7
- const MAX_REDIS_RETRY = 3;
8
- const ttl = 1000 * 60 * 60 * 24 * 7;
9
-
10
- export const cachePool = createPool(
11
- {
12
- create: async () => {
13
- try {
14
- const { redisUrl } = config;
15
- const redisClient = createClient({
16
- url: redisUrl,
17
- socket: {
18
- // @ts-ignore
19
- reconnectStrategy: (retries) => {
20
- if (retries >= MAX_REDIS_RETRY) {
21
- return new Error('Retry Time Exhausted');
22
- }
23
- return Math.min(retries * 500, 1000 * 3);
24
- },
25
- },
26
- });
27
-
28
- redisClient.on('error', (err) => logger.warn('Redis Client Error:', err));
29
- await redisClient.connect();
30
- logger.info(`Successfully connected to Redis: ${redisUrl}`);
31
-
32
- return redisClient;
33
- } catch (error) {
34
- logger.warn('Redis connection failed', error);
35
- return null;
36
- }
37
- },
38
- destroy: async (client: any) => {
39
- // if is redis client
40
- if (client.isReady) {
41
- await client.quit();
42
- }
43
- },
44
- },
45
- {
46
- max: 2, // 2 clients
47
- min: 0,
48
- // evictionRunIntervalMillis: 0,
49
- },
50
- );
51
-
52
- export const memoryPool = createPool(
53
- {
54
- create: () => {
55
- const map = new Map<string, any>();
56
- // @ts-ignore
57
- map.del = map.delete;
58
- return Promise.resolve(map);
59
- },
60
- destroy: (client: Map<string, any>) => {
61
- client.clear();
62
- return Promise.resolve();
63
- },
64
- },
65
- {
66
- max: 10,
67
- min: 0,
68
- },
69
- );
70
-
71
- export const withCache = async (cb: Function) => {
72
- const pool = config.redisUrl ? cachePool : memoryPool;
73
- const client = await pool.acquire();
74
-
75
- if (client) {
76
- try {
77
- return cb(client);
78
- } finally {
79
- // release client to pool, let other use
80
- await pool.release(client);
81
- }
82
- }
83
- };
84
-
85
- export const formatKey = (key: string) => {
86
- return `${cacheKeyPrefix}${key}`;
87
- };
88
-
89
- export const useCache = {
90
- get: (key: string) => {
91
- return withCache(async (client: any) => {
92
- const value = await client.get(formatKey(key));
93
- try {
94
- return JSON.parse(value);
95
- } catch (error) {
96
- // ignore error
97
- }
98
- return value;
99
- });
100
- },
101
- set: (key: string, value: any, options?: any) => {
102
- return withCache((client: any) => {
103
- const formatValue = typeof value === 'string' ? value : JSON.stringify(value);
104
- return client.set(formatKey(key), formatValue, { PX: ttl, ...options });
105
- });
106
- },
107
- remove: (key: string) => {
108
- return withCache((client: any) => {
109
- return client.del(formatKey(key));
110
- });
111
- },
112
- list: (key: string = '*') => {
113
- return withCache((client: any) => {
114
- return client.keys(formatKey(key));
115
- });
116
- },
117
- };
package/src/config.ts DELETED
@@ -1,13 +0,0 @@
1
- import createLogger from '@blocklet/logger';
2
-
3
- export const logger = createLogger('crawler', { level: process.env.LOG_LEVEL || 'info' });
4
-
5
- export const config = {
6
- redisUrl: process.env.REDIS_URL!,
7
- dataDir: process.env.BLOCKLET_DATA_DIR!,
8
- appDir: process.env.BLOCKLET_APP_DIR! || process.cwd(),
9
- appUrl: process.env.BLOCKLET_APP_URL!,
10
- puppeteerPath: process.env.PUPPETEER_EXECUTABLE_PATH!,
11
- cacheDir: process.env.BLOCKLET_CACHE_DIR!,
12
- testOnInitialize: process.env.NODE_ENV === 'production',
13
- };