@atlaspack/cache 3.1.1-canary.3 → 3.1.1-canary.300
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +379 -0
- package/dist/FSCache.js +171 -0
- package/dist/IDBCache.browser.js +123 -0
- package/dist/IDBCache.js +10 -0
- package/dist/LMDBLiteCache.js +223 -0
- package/dist/constants.js +5 -0
- package/dist/index.js +19 -0
- package/dist/types.js +2 -0
- package/lib/FSCache.js +35 -3
- package/lib/IDBCache.browser.js +7 -6
- package/lib/IDBCache.js +1 -1
- package/lib/LMDBLiteCache.js +110 -41
- package/lib/types/FSCache.d.ts +27 -0
- package/lib/types/IDBCache.browser.d.ts +22 -0
- package/lib/types/IDBCache.d.ts +4 -0
- package/lib/types/LMDBLiteCache.d.ts +78 -0
- package/lib/types/constants.d.ts +1 -0
- package/lib/types/index.d.ts +4 -0
- package/lib/types/types.d.ts +2 -0
- package/package.json +14 -14
- package/src/{FSCache.js → FSCache.ts} +30 -15
- package/src/{IDBCache.browser.js → IDBCache.browser.ts} +8 -10
- package/src/{IDBCache.js → IDBCache.ts} +1 -2
- package/src/{LMDBLiteCache.js → LMDBLiteCache.ts} +119 -48
- package/src/{constants.js → constants.ts} +0 -2
- package/src/{index.js → index.ts} +0 -2
- package/src/{types.js → types.ts} +0 -1
- package/test/LMDBLiteCache.test.ts +241 -0
- package/test/workerThreadsTest.js +42 -0
- package/tsconfig.json +27 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/index.d.ts +0 -12
- package/lib/types.d.ts +0 -2
- package/test/LMDBLiteCache.test.js +0 -33
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
// @flow strict-local
|
|
2
|
-
|
|
3
1
|
import {
|
|
4
2
|
deserialize,
|
|
5
3
|
registerSerializableClass,
|
|
@@ -10,17 +8,17 @@ import {Lmdb} from '@atlaspack/rust';
|
|
|
10
8
|
import type {FilePath} from '@atlaspack/types';
|
|
11
9
|
import type {Cache} from './types';
|
|
12
10
|
import type {Readable, Writable} from 'stream';
|
|
13
|
-
|
|
11
|
+
// @ts-expect-error TS7016
|
|
12
|
+
import ncp from 'ncp';
|
|
13
|
+
import {promisify} from 'util';
|
|
14
14
|
import stream from 'stream';
|
|
15
15
|
import path from 'path';
|
|
16
|
-
import {promisify} from 'util';
|
|
17
|
-
|
|
18
16
|
import {NodeFS} from '@atlaspack/fs';
|
|
19
|
-
|
|
20
|
-
// $FlowFixMe
|
|
21
17
|
import packageJson from '../package.json';
|
|
22
|
-
|
|
23
18
|
import {FSCache} from './FSCache';
|
|
19
|
+
import {instrumentAsync} from '@atlaspack/logger';
|
|
20
|
+
|
|
21
|
+
const ncpAsync = promisify(ncp);
|
|
24
22
|
|
|
25
23
|
interface DBOpenOptions {
|
|
26
24
|
name: string;
|
|
@@ -35,11 +33,6 @@ export class LmdbWrapper {
|
|
|
35
33
|
|
|
36
34
|
constructor(lmdb: Lmdb) {
|
|
37
35
|
this.lmdb = lmdb;
|
|
38
|
-
|
|
39
|
-
// $FlowFixMe
|
|
40
|
-
this[Symbol.dispose] = () => {
|
|
41
|
-
this.lmdb.close();
|
|
42
|
-
};
|
|
43
36
|
}
|
|
44
37
|
|
|
45
38
|
has(key: string): boolean {
|
|
@@ -60,7 +53,21 @@ export class LmdbWrapper {
|
|
|
60
53
|
await this.lmdb.put(key, buffer);
|
|
61
54
|
}
|
|
62
55
|
|
|
63
|
-
|
|
56
|
+
*keys(): Iterable<string> {
|
|
57
|
+
const PAGE_SIZE = 10000000;
|
|
58
|
+
|
|
59
|
+
let currentKeys = this.lmdb.keysSync(0, PAGE_SIZE);
|
|
60
|
+
while (currentKeys.length > 0) {
|
|
61
|
+
for (const key of currentKeys) {
|
|
62
|
+
yield key;
|
|
63
|
+
}
|
|
64
|
+
currentKeys = this.lmdb.keysSync(currentKeys.length, PAGE_SIZE);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
compact(targetPath: string) {
|
|
69
|
+
this.lmdb.compact(targetPath);
|
|
70
|
+
}
|
|
64
71
|
}
|
|
65
72
|
|
|
66
73
|
export function open(
|
|
@@ -80,23 +87,28 @@ export function open(
|
|
|
80
87
|
);
|
|
81
88
|
}
|
|
82
89
|
|
|
83
|
-
const pipeline: (Readable, Writable) => Promise<void> = promisify(
|
|
90
|
+
const pipeline: (arg1: Readable, arg2: Writable) => Promise<void> = promisify(
|
|
84
91
|
stream.pipeline,
|
|
85
92
|
);
|
|
86
93
|
|
|
87
|
-
export type SerLMDBLiteCache = {
|
|
88
|
-
dir: FilePath
|
|
89
|
-
|
|
94
|
+
export type SerLMDBLiteCache = {
|
|
95
|
+
dir: FilePath;
|
|
96
|
+
};
|
|
90
97
|
|
|
91
98
|
export class LMDBLiteCache implements Cache {
|
|
92
99
|
fs: NodeFS;
|
|
93
100
|
dir: FilePath;
|
|
94
101
|
store: LmdbWrapper;
|
|
95
102
|
fsCache: FSCache;
|
|
103
|
+
/**
|
|
104
|
+
* Directory where we store raw files.
|
|
105
|
+
*/
|
|
106
|
+
cacheFilesDirectory: FilePath;
|
|
96
107
|
|
|
97
108
|
constructor(cacheDir: FilePath) {
|
|
98
109
|
this.fs = new NodeFS();
|
|
99
110
|
this.dir = cacheDir;
|
|
111
|
+
this.cacheFilesDirectory = path.join(cacheDir, 'files');
|
|
100
112
|
this.fsCache = new FSCache(this.fs, cacheDir);
|
|
101
113
|
|
|
102
114
|
this.store = open(cacheDir, {
|
|
@@ -117,6 +129,7 @@ export class LMDBLiteCache implements Cache {
|
|
|
117
129
|
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
118
130
|
await this.fsCache.ensure();
|
|
119
131
|
}
|
|
132
|
+
await this.fs.mkdirp(this.cacheFilesDirectory);
|
|
120
133
|
return Promise.resolve();
|
|
121
134
|
}
|
|
122
135
|
|
|
@@ -134,7 +147,7 @@ export class LMDBLiteCache implements Cache {
|
|
|
134
147
|
return Promise.resolve(this.store.has(key));
|
|
135
148
|
}
|
|
136
149
|
|
|
137
|
-
get<T>(key: string): Promise
|
|
150
|
+
get<T>(key: string): Promise<T | null | undefined> {
|
|
138
151
|
let data = this.store.get(key);
|
|
139
152
|
if (data == null) {
|
|
140
153
|
return Promise.resolve(null);
|
|
@@ -143,19 +156,29 @@ export class LMDBLiteCache implements Cache {
|
|
|
143
156
|
return Promise.resolve(deserialize(data));
|
|
144
157
|
}
|
|
145
158
|
|
|
146
|
-
async set(key: string, value:
|
|
159
|
+
async set(key: string, value: unknown): Promise<void> {
|
|
147
160
|
await this.setBlob(key, serialize(value));
|
|
148
161
|
}
|
|
149
162
|
|
|
150
163
|
getStream(key: string): Readable {
|
|
151
|
-
|
|
164
|
+
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
165
|
+
return this.fs.createReadStream(path.join(this.dir, key));
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
return this.fs.createReadStream(this.getFileKey(key));
|
|
152
169
|
}
|
|
153
170
|
|
|
154
|
-
setStream(key: string, stream: Readable): Promise<void> {
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
171
|
+
async setStream(key: string, stream: Readable): Promise<void> {
|
|
172
|
+
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
173
|
+
return pipeline(
|
|
174
|
+
stream,
|
|
175
|
+
this.fs.createWriteStream(path.join(this.dir, key)),
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
const filePath = this.getFileKey(key);
|
|
180
|
+
await this.fs.mkdirp(path.dirname(filePath));
|
|
181
|
+
return pipeline(stream, this.fs.createWriteStream(filePath));
|
|
159
182
|
}
|
|
160
183
|
|
|
161
184
|
// eslint-disable-next-line require-await
|
|
@@ -175,43 +198,39 @@ export class LMDBLiteCache implements Cache {
|
|
|
175
198
|
await this.store.put(key, contents);
|
|
176
199
|
}
|
|
177
200
|
|
|
178
|
-
getBuffer(key: string): Promise
|
|
201
|
+
getBuffer(key: string): Promise<Buffer | null | undefined> {
|
|
179
202
|
return Promise.resolve(this.store.get(key));
|
|
180
203
|
}
|
|
181
204
|
|
|
182
|
-
#getFilePath(key: string, index: number): string {
|
|
183
|
-
return path.join(this.dir, `${key}-${index}`);
|
|
184
|
-
}
|
|
185
|
-
|
|
186
205
|
hasLargeBlob(key: string): Promise<boolean> {
|
|
187
206
|
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
188
207
|
return this.fsCache.hasLargeBlob(key);
|
|
189
208
|
}
|
|
190
|
-
|
|
209
|
+
|
|
210
|
+
return this.fs.exists(this.getFileKey(key));
|
|
191
211
|
}
|
|
192
212
|
|
|
193
|
-
/**
|
|
194
|
-
* @deprecated Use getBlob instead.
|
|
195
|
-
*/
|
|
196
213
|
getLargeBlob(key: string): Promise<Buffer> {
|
|
197
214
|
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
198
215
|
return this.fsCache.getLargeBlob(key);
|
|
199
216
|
}
|
|
200
|
-
return
|
|
217
|
+
return this.fs.readFile(this.getFileKey(key));
|
|
201
218
|
}
|
|
202
219
|
|
|
203
|
-
|
|
204
|
-
* @deprecated Use setBlob instead.
|
|
205
|
-
*/
|
|
206
|
-
setLargeBlob(
|
|
220
|
+
async setLargeBlob(
|
|
207
221
|
key: string,
|
|
208
222
|
contents: Buffer | string,
|
|
209
|
-
options?: {
|
|
223
|
+
options?: {
|
|
224
|
+
signal?: AbortSignal;
|
|
225
|
+
},
|
|
210
226
|
): Promise<void> {
|
|
211
227
|
if (!getFeatureFlag('cachePerformanceImprovements')) {
|
|
212
228
|
return this.fsCache.setLargeBlob(key, contents, options);
|
|
213
229
|
}
|
|
214
|
-
|
|
230
|
+
|
|
231
|
+
const targetPath = this.getFileKey(key);
|
|
232
|
+
await this.fs.mkdirp(path.dirname(targetPath));
|
|
233
|
+
return this.fs.writeFile(targetPath, contents);
|
|
215
234
|
}
|
|
216
235
|
|
|
217
236
|
/**
|
|
@@ -225,12 +244,64 @@ export class LMDBLiteCache implements Cache {
|
|
|
225
244
|
return this.store.delete(key);
|
|
226
245
|
}
|
|
227
246
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
this.
|
|
247
|
+
keys(): Iterable<string> {
|
|
248
|
+
return this.store.keys();
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
async compact(targetPath: string): Promise<void> {
|
|
252
|
+
await this.fs.mkdirp(targetPath);
|
|
253
|
+
|
|
254
|
+
const files = await this.fs.readdir(this.dir);
|
|
255
|
+
// copy all files except data.mdb and lock.mdb to the target path (recursive)
|
|
256
|
+
for (const file of files) {
|
|
257
|
+
const filePath = path.join(this.dir, file);
|
|
258
|
+
|
|
259
|
+
if (file === 'data.mdb' || file === 'lock.mdb') {
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
await ncpAsync(filePath, path.join(targetPath, file));
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
this.store.compact(path.join(targetPath, 'data.mdb'));
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
refresh(): void {}
|
|
270
|
+
|
|
271
|
+
/**
|
|
272
|
+
* Streams, packages are stored in files instead of LMDB.
|
|
273
|
+
*
|
|
274
|
+
* On this case, if a cache key happens to have a parent traversal, ../..
|
|
275
|
+
* it is treated specially
|
|
276
|
+
*
|
|
277
|
+
* That is, something/../something and something are meant to be different
|
|
278
|
+
* keys.
|
|
279
|
+
*
|
|
280
|
+
* Plus we do not want to store values outside of the cache directory.
|
|
281
|
+
*/
|
|
282
|
+
getFileKey(key: string): string {
|
|
283
|
+
const cleanKey = key
|
|
284
|
+
.split('/')
|
|
285
|
+
.map((part) => {
|
|
286
|
+
if (part === '..') {
|
|
287
|
+
return '$$__parent_dir$$';
|
|
288
|
+
}
|
|
289
|
+
return part;
|
|
290
|
+
})
|
|
291
|
+
.join('/');
|
|
292
|
+
return path.join(this.cacheFilesDirectory, cleanKey);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
async clear(): Promise<void> {
|
|
296
|
+
await instrumentAsync('LMDBLiteCache::clear', async () => {
|
|
297
|
+
const keys = await this.keys();
|
|
298
|
+
for (const key of keys) {
|
|
299
|
+
await this.store.delete(key);
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
await this.fs.rimraf(this.cacheFilesDirectory);
|
|
303
|
+
await this.fs.mkdirp(this.cacheFilesDirectory);
|
|
304
|
+
});
|
|
234
305
|
}
|
|
235
306
|
}
|
|
236
307
|
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import {tmpdir} from 'os';
|
|
4
|
+
import {LMDBLiteCache} from '../src/index';
|
|
5
|
+
import {deserialize, serialize} from 'v8';
|
|
6
|
+
import assert from 'assert';
|
|
7
|
+
import {Worker} from 'worker_threads';
|
|
8
|
+
import {initializeMonitoring} from '@atlaspack/rust';
|
|
9
|
+
|
|
10
|
+
const cacheDir = path.join(tmpdir(), 'lmdb-lite-cache-tests');
|
|
11
|
+
|
|
12
|
+
describe('LMDBLiteCache', () => {
|
|
13
|
+
let cache: any;
|
|
14
|
+
|
|
15
|
+
beforeEach(async () => {
|
|
16
|
+
await fs.promises.rm(cacheDir, {recursive: true, force: true});
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
it('can be constructed', async () => {
|
|
20
|
+
cache = new LMDBLiteCache(cacheDir);
|
|
21
|
+
await cache.ensure();
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
it('can retrieve keys', async () => {
|
|
25
|
+
cache = new LMDBLiteCache(cacheDir);
|
|
26
|
+
await cache.ensure();
|
|
27
|
+
await cache.setBlob('key', Buffer.from(serialize({value: 42})));
|
|
28
|
+
const buffer = await cache.getBlob('key');
|
|
29
|
+
const result = deserialize(buffer);
|
|
30
|
+
assert.equal(result.value, 42);
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
it('can retrieve keys synchronously', async () => {
|
|
34
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'retrieve_keys_test'));
|
|
35
|
+
await cache.ensure();
|
|
36
|
+
await cache.setBlob('key', Buffer.from(serialize({value: 42})));
|
|
37
|
+
const buffer = cache.getBlobSync('key');
|
|
38
|
+
const result = deserialize(buffer);
|
|
39
|
+
assert.equal(result.value, 42);
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
it('can iterate over keys', async () => {
|
|
43
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'keys_test'));
|
|
44
|
+
await cache.ensure();
|
|
45
|
+
await cache.setBlob('key1', Buffer.from(serialize({value: 42})));
|
|
46
|
+
await cache.setBlob('key2', Buffer.from(serialize({value: 43})));
|
|
47
|
+
const keys = cache.keys();
|
|
48
|
+
assert.deepEqual(Array.from(keys), ['key1', 'key2']);
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
it('can compact databases', async () => {
|
|
52
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'compact_test'));
|
|
53
|
+
await cache.ensure();
|
|
54
|
+
await cache.setBlob('key1', Buffer.from(serialize({value: 42})));
|
|
55
|
+
await cache.setBlob('key2', Buffer.from(serialize({value: 43})));
|
|
56
|
+
await cache.compact(path.join(cacheDir, 'compact_test_compacted'));
|
|
57
|
+
|
|
58
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'compact_test_compacted'));
|
|
59
|
+
await cache.ensure();
|
|
60
|
+
const keys = cache.keys();
|
|
61
|
+
assert.deepEqual(Array.from(keys), ['key1', 'key2']);
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
describe('getFileKey', () => {
|
|
65
|
+
it('should return the correct key', () => {
|
|
66
|
+
const target = path.join(cacheDir, 'test-file-keys');
|
|
67
|
+
const cache = new LMDBLiteCache(target);
|
|
68
|
+
const key = cache.getFileKey('key');
|
|
69
|
+
assert.equal(key, path.join(target, 'files', 'key'));
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
it('should return the correct key for a key with a parent traversal', () => {
|
|
73
|
+
const target = path.join(cacheDir, 'test-parent-keys');
|
|
74
|
+
cache = new LMDBLiteCache(target);
|
|
75
|
+
const key = cache.getFileKey('../../key');
|
|
76
|
+
assert.equal(
|
|
77
|
+
key,
|
|
78
|
+
path.join(target, 'files', '$$__parent_dir$$/$$__parent_dir$$/key'),
|
|
79
|
+
);
|
|
80
|
+
});
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
it('can be closed and re-opened', async () => {
|
|
84
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'close_and_reopen_test'));
|
|
85
|
+
await cache.ensure();
|
|
86
|
+
await cache.setBlob('key', Buffer.from(serialize({value: 42})));
|
|
87
|
+
cache = new LMDBLiteCache(path.join(cacheDir, 'close_and_reopen_test'));
|
|
88
|
+
await cache.ensure();
|
|
89
|
+
const buffer = await cache.getBlob('key');
|
|
90
|
+
const result = deserialize(buffer);
|
|
91
|
+
assert.equal(result.value, 42);
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
it('should NOT fail when trying to open the same database twice', async () => {
|
|
95
|
+
const testDir = path.join(cacheDir, 'double_open_test');
|
|
96
|
+
const cache1 = new LMDBLiteCache(testDir);
|
|
97
|
+
await cache1.ensure();
|
|
98
|
+
|
|
99
|
+
assert.doesNotThrow(() => {
|
|
100
|
+
new LMDBLiteCache(testDir);
|
|
101
|
+
});
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
it('should NOT fail when trying to open after GC', async () => {
|
|
105
|
+
const testDir = path.join(cacheDir, 'gc_test');
|
|
106
|
+
|
|
107
|
+
let cache1 = new LMDBLiteCache(testDir);
|
|
108
|
+
await cache1.ensure();
|
|
109
|
+
await cache1.setBlob('key', Buffer.from(serialize({value: 42})));
|
|
110
|
+
|
|
111
|
+
cache1 = null;
|
|
112
|
+
|
|
113
|
+
if (global.gc) {
|
|
114
|
+
global.gc();
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
assert.doesNotThrow(() => {
|
|
118
|
+
new LMDBLiteCache(testDir);
|
|
119
|
+
});
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
it('should handle rapid open/close cycles', async () => {
|
|
123
|
+
const testDir = path.join(cacheDir, 'rapid_cycles_test');
|
|
124
|
+
|
|
125
|
+
for (let i = 0; i < 10; i++) {
|
|
126
|
+
const cache = new LMDBLiteCache(testDir);
|
|
127
|
+
await cache.ensure();
|
|
128
|
+
await cache.setBlob(`key${i}`, Buffer.from(serialize({value: i})));
|
|
129
|
+
|
|
130
|
+
await new Promise((resolve: any) => setTimeout(resolve, 10));
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const finalCache = new LMDBLiteCache(testDir);
|
|
134
|
+
await finalCache.ensure();
|
|
135
|
+
const buffer = await finalCache.getBlob('key9');
|
|
136
|
+
const result = deserialize(buffer);
|
|
137
|
+
assert.equal(result.value, 9);
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
it('should work when there are multiple node.js worker threads accessing the same database', async function () {
|
|
141
|
+
this.timeout(40000);
|
|
142
|
+
|
|
143
|
+
try {
|
|
144
|
+
initializeMonitoring();
|
|
145
|
+
} catch (error: any) {
|
|
146
|
+
/* empty */
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const testDir = path.join(cacheDir, 'worker_threads_test');
|
|
150
|
+
|
|
151
|
+
let cache = new LMDBLiteCache(testDir);
|
|
152
|
+
await cache.set('main_thread_key', {
|
|
153
|
+
mainThreadId: 0,
|
|
154
|
+
hello: 'world',
|
|
155
|
+
});
|
|
156
|
+
setTimeout(() => {
|
|
157
|
+
cache = null;
|
|
158
|
+
|
|
159
|
+
if (global.gc) {
|
|
160
|
+
global.gc();
|
|
161
|
+
}
|
|
162
|
+
}, Math.random() * 300);
|
|
163
|
+
|
|
164
|
+
const numWorkers = 10;
|
|
165
|
+
|
|
166
|
+
const workers: Array<any> = [];
|
|
167
|
+
const responsePromises: Array<any> = [];
|
|
168
|
+
for (let i = 0; i < numWorkers; i++) {
|
|
169
|
+
const worker = new Worker(path.join(__dirname, 'workerThreadsTest.js'), {
|
|
170
|
+
workerData: {
|
|
171
|
+
cacheDir: testDir,
|
|
172
|
+
},
|
|
173
|
+
});
|
|
174
|
+
workers.push(worker);
|
|
175
|
+
|
|
176
|
+
const responsePromise = new Promise((resolve: any, reject: any) => {
|
|
177
|
+
worker.addListener('error', (error: Error) => {
|
|
178
|
+
reject(error);
|
|
179
|
+
});
|
|
180
|
+
worker.addListener('message', (message: any) => {
|
|
181
|
+
resolve(message);
|
|
182
|
+
});
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
worker.addListener('message', (message: any) => {
|
|
186
|
+
// eslint-disable-next-line no-console
|
|
187
|
+
console.log('Worker message', message);
|
|
188
|
+
});
|
|
189
|
+
worker.addListener('online', () => {
|
|
190
|
+
worker.postMessage({
|
|
191
|
+
type: 'go',
|
|
192
|
+
});
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
responsePromises.push(responsePromise);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// eslint-disable-next-line no-console
|
|
199
|
+
console.log('Waiting for responses');
|
|
200
|
+
const responses = await Promise.all(responsePromises);
|
|
201
|
+
|
|
202
|
+
// eslint-disable-next-line no-console
|
|
203
|
+
console.log('Responses received');
|
|
204
|
+
for (const [index, response] of responses.entries()) {
|
|
205
|
+
const worker = workers[index];
|
|
206
|
+
|
|
207
|
+
assert.deepEqual(
|
|
208
|
+
response,
|
|
209
|
+
{
|
|
210
|
+
mainThreadData: {
|
|
211
|
+
mainThreadId: 0,
|
|
212
|
+
hello: 'world',
|
|
213
|
+
},
|
|
214
|
+
workerId: worker.threadId,
|
|
215
|
+
},
|
|
216
|
+
`worker_${index} - Worker ${worker.threadId} should have received the correct data`,
|
|
217
|
+
);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// eslint-disable-next-line no-console
|
|
221
|
+
console.log('Getting main thread key');
|
|
222
|
+
cache = new LMDBLiteCache(testDir);
|
|
223
|
+
const data = await cache?.get('main_thread_key');
|
|
224
|
+
assert.deepEqual(data, {
|
|
225
|
+
mainThreadId: 0,
|
|
226
|
+
hello: 'world',
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
// eslint-disable-next-line no-console
|
|
230
|
+
console.log('Getting worker keys');
|
|
231
|
+
for (const worker of workers) {
|
|
232
|
+
const data = await cache?.get(`worker_key/${worker.threadId}`);
|
|
233
|
+
assert.deepEqual(data, {
|
|
234
|
+
workerId: worker.threadId,
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
await new Promise((resolve: any) => setTimeout(resolve, 500));
|
|
238
|
+
worker.terminate();
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
});
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
/* eslint-disable no-inner-declarations */
|
|
2
|
+
|
|
3
|
+
require('@atlaspack/babel-register');
|
|
4
|
+
const {
|
|
5
|
+
workerData,
|
|
6
|
+
threadId,
|
|
7
|
+
parentPort,
|
|
8
|
+
isMainThread,
|
|
9
|
+
} = require('worker_threads');
|
|
10
|
+
const {LMDBLiteCache} = require('../src/index');
|
|
11
|
+
|
|
12
|
+
if (!isMainThread) {
|
|
13
|
+
const cache = new LMDBLiteCache(workerData.cacheDir);
|
|
14
|
+
|
|
15
|
+
async function onMessage() {
|
|
16
|
+
try {
|
|
17
|
+
cache.set(`worker_key/${threadId}`, {
|
|
18
|
+
workerId: threadId,
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
const data = await cache.get('main_thread_key');
|
|
22
|
+
|
|
23
|
+
parentPort.postMessage({
|
|
24
|
+
mainThreadData: data,
|
|
25
|
+
workerId: threadId,
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
setTimeout(() => {
|
|
29
|
+
parentPort.postMessage({
|
|
30
|
+
type: 'close',
|
|
31
|
+
workerId: threadId,
|
|
32
|
+
});
|
|
33
|
+
}, Math.random() * 200);
|
|
34
|
+
} catch (error) {
|
|
35
|
+
parentPort.postMessage({
|
|
36
|
+
error: error.message,
|
|
37
|
+
});
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
parentPort.on('message', onMessage);
|
|
42
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"extends": "../../../tsconfig.base.json",
|
|
3
|
+
"include": ["./src/", "./package.json"],
|
|
4
|
+
"compilerOptions": {
|
|
5
|
+
"composite": true
|
|
6
|
+
},
|
|
7
|
+
"references": [
|
|
8
|
+
{
|
|
9
|
+
"path": "../build-cache/tsconfig.json"
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"path": "../feature-flags/tsconfig.json"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"path": "../fs/tsconfig.json"
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"path": "../logger/tsconfig.json"
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"path": "../rust/tsconfig.json"
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"path": "../utils/tsconfig.json"
|
|
25
|
+
}
|
|
26
|
+
]
|
|
27
|
+
}
|