smart-pool 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +9 -0
- package/package.json +46 -0
- package/readme.md +1007 -0
- package/src/heap.js +130 -0
- package/src/index.d.ts +118 -0
- package/src/index.js +996 -0
package/src/index.js
ADDED
|
@@ -0,0 +1,996 @@
|
|
|
1
|
+
import { Worker } from "node:worker_threads";
|
|
2
|
+
import { PriorityHeap } from "./heap.js";
|
|
3
|
+
|
|
4
|
+
export default function leap(initialConcurrency, globalOptions = {}) {
|
|
5
|
+
if (typeof initialConcurrency !== "number" || initialConcurrency < 1) {
|
|
6
|
+
throw new Error("initialConcurrency must be a number >= 1");
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
if (
|
|
10
|
+
globalOptions.minConcurrency !== undefined &&
|
|
11
|
+
globalOptions.minConcurrency < 1
|
|
12
|
+
) {
|
|
13
|
+
throw new Error("minConcurrency must be >= 1");
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
if (
|
|
17
|
+
globalOptions.maxConcurrency !== undefined &&
|
|
18
|
+
globalOptions.minConcurrency !== undefined &&
|
|
19
|
+
globalOptions.maxConcurrency < globalOptions.minConcurrency
|
|
20
|
+
) {
|
|
21
|
+
throw new Error("maxConcurrency must be >= minConcurrency");
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
if (
|
|
25
|
+
globalOptions.circuitThreshold !== undefined &&
|
|
26
|
+
globalOptions.circuitThreshold < 1
|
|
27
|
+
) {
|
|
28
|
+
throw new Error("circuitThreshold must be >= 1");
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
if (globalOptions.batchSize !== undefined && globalOptions.batchSize < 1) {
|
|
32
|
+
throw new Error("batchSize must be >= 1");
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
if (globalOptions.interval !== undefined && globalOptions.interval < 1) {
|
|
36
|
+
throw new Error("interval must be >= 1");
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const subQueues = new Map();
|
|
40
|
+
const emitter = globalOptions.emitter ?? null;
|
|
41
|
+
const emit = (event, data) => emitter?.emit?.(event, data);
|
|
42
|
+
|
|
43
|
+
const { onEnqueue, onDequeue, beforeExecute, afterExecute } = globalOptions;
|
|
44
|
+
|
|
45
|
+
const validateWorkerPath = (path) => {
|
|
46
|
+
if (typeof path !== "string" || path.length === 0) {
|
|
47
|
+
throw new Error("Worker path must be a non-empty string");
|
|
48
|
+
}
|
|
49
|
+
if (path.includes("..") || path.includes("~")) {
|
|
50
|
+
throw new Error("Worker path cannot contain '..' or '~'");
|
|
51
|
+
}
|
|
52
|
+
if (globalOptions.workerPathWhitelist) {
|
|
53
|
+
const isWhitelisted = globalOptions.workerPathWhitelist.some((allowed) =>
|
|
54
|
+
path.startsWith(allowed)
|
|
55
|
+
);
|
|
56
|
+
if (!isWhitelisted) {
|
|
57
|
+
throw new Error(`Worker path '${path}' is not in whitelist`);
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
return true;
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
const createPoolInstance = (concurrency, options) => {
|
|
64
|
+
const queue = new PriorityHeap();
|
|
65
|
+
const blockedTasks = new Map();
|
|
66
|
+
const completedTasks = new Map();
|
|
67
|
+
const pendingCache = new Map();
|
|
68
|
+
const typeRateLimitState = new Map();
|
|
69
|
+
const circuitBreakers = new Map();
|
|
70
|
+
const batchBuffers = new Map();
|
|
71
|
+
const batchTimers = new Map();
|
|
72
|
+
const workerPool = [];
|
|
73
|
+
const activeWorkers = new Set();
|
|
74
|
+
const rateLimitTimers = new Set();
|
|
75
|
+
const abortListeners = new Map();
|
|
76
|
+
const workerWaitingQueue = [];
|
|
77
|
+
const scheduledRateLimitChecks = new Set();
|
|
78
|
+
|
|
79
|
+
let currentLoad = 0;
|
|
80
|
+
let activeCount = 0;
|
|
81
|
+
let currentConcurrency = concurrency;
|
|
82
|
+
let isDraining = false;
|
|
83
|
+
let isPaused = false;
|
|
84
|
+
let seqCounter = 0;
|
|
85
|
+
let idleResolver = null;
|
|
86
|
+
let errorsInCycle = [];
|
|
87
|
+
let latencies = [];
|
|
88
|
+
let minPriority = Infinity;
|
|
89
|
+
let maxPriority = -Infinity;
|
|
90
|
+
let priorityTracker = { min: Infinity, max: -Infinity, count: 0 };
|
|
91
|
+
|
|
92
|
+
const config = {
|
|
93
|
+
maxWorkerPoolSize: options.workerPoolSize ?? 0,
|
|
94
|
+
circuitThreshold: options.circuitThreshold ?? 5,
|
|
95
|
+
circuitResetTimeout: options.circuitResetTimeout ?? 30000,
|
|
96
|
+
adaptive: options.adaptive ?? false,
|
|
97
|
+
minC: options.minConcurrency ?? 1,
|
|
98
|
+
maxC: options.maxConcurrency ?? concurrency * 2,
|
|
99
|
+
completedTaskCleanupMs: options.completedTaskCleanupMs ?? 60000,
|
|
100
|
+
batchSize: options.batchSize ?? 10,
|
|
101
|
+
batchTimeout: options.batchTimeout ?? 100,
|
|
102
|
+
initialRetryDelay: options.initialRetryDelay ?? 100,
|
|
103
|
+
retryFactor: options.retryFactor ?? 2,
|
|
104
|
+
maxRetryDelay: options.maxRetryDelay ?? 10000,
|
|
105
|
+
maintenanceInterval: options.interval ?? 1000,
|
|
106
|
+
maxLatencyHistory: options.maxLatencyHistory ?? 10000,
|
|
107
|
+
maxErrorHistory: options.maxErrorHistory ?? 1000,
|
|
108
|
+
maxQueueSize: options.maxQueueSize ?? 10000,
|
|
109
|
+
adaptiveLatencyLow: options.adaptiveLatencyLow ?? 50,
|
|
110
|
+
adaptiveLatencyHigh: options.adaptiveLatencyHigh ?? 200,
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
const metrics = {
|
|
114
|
+
totalTasks: 0,
|
|
115
|
+
successfulTasks: 0,
|
|
116
|
+
failedTasks: 0,
|
|
117
|
+
startTime: Date.now(),
|
|
118
|
+
allLatencies: [],
|
|
119
|
+
latencyLock: false,
|
|
120
|
+
get throughput() {
|
|
121
|
+
const elapsedSec = (Date.now() - this.startTime) / 1000;
|
|
122
|
+
return elapsedSec > 0
|
|
123
|
+
? (this.successfulTasks / elapsedSec).toFixed(2)
|
|
124
|
+
: 0;
|
|
125
|
+
},
|
|
126
|
+
get errorRate() {
|
|
127
|
+
return this.totalTasks > 0
|
|
128
|
+
? (this.failedTasks / this.totalTasks).toFixed(4)
|
|
129
|
+
: 0;
|
|
130
|
+
},
|
|
131
|
+
get percentiles() {
|
|
132
|
+
this.latencyLock = true;
|
|
133
|
+
const snapshot = [...this.allLatencies];
|
|
134
|
+
this.latencyLock = false;
|
|
135
|
+
|
|
136
|
+
if (snapshot.length === 0)
|
|
137
|
+
return { p50: "0.00", p90: "0.00", p99: "0.00" };
|
|
138
|
+
const sorted = snapshot.sort((a, b) => a - b);
|
|
139
|
+
const getP = (p) => {
|
|
140
|
+
const idx = Math.max(0, Math.ceil((p / 100) * sorted.length) - 1);
|
|
141
|
+
return sorted[idx].toFixed(2);
|
|
142
|
+
};
|
|
143
|
+
return { p50: getP(50), p90: getP(90), p99: getP(99) };
|
|
144
|
+
},
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
const getCircuitBreaker = (type) => {
|
|
148
|
+
const key = type || "default";
|
|
149
|
+
if (!circuitBreakers.has(key)) {
|
|
150
|
+
circuitBreakers.set(key, {
|
|
151
|
+
openUntil: 0,
|
|
152
|
+
consecutiveFailures: 0,
|
|
153
|
+
lock: false,
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
return circuitBreakers.get(key);
|
|
157
|
+
};
|
|
158
|
+
|
|
159
|
+
const maintenanceInterval = setInterval(() => {
|
|
160
|
+
if (options.agingThreshold && queue.size() > 0) {
|
|
161
|
+
queue.adjustPriorities(
|
|
162
|
+
options.agingThreshold,
|
|
163
|
+
options.agingBoost || 1,
|
|
164
|
+
false
|
|
165
|
+
);
|
|
166
|
+
rebuildPriorityTracker();
|
|
167
|
+
}
|
|
168
|
+
if (options.decayThreshold && queue.size() > 0) {
|
|
169
|
+
queue.adjustPriorities(
|
|
170
|
+
options.decayThreshold,
|
|
171
|
+
options.decayAmount || 1,
|
|
172
|
+
true
|
|
173
|
+
);
|
|
174
|
+
rebuildPriorityTracker();
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
const cutoff = Date.now() - config.completedTaskCleanupMs;
|
|
178
|
+
const toDelete = [];
|
|
179
|
+
for (const [id, time] of completedTasks.entries()) {
|
|
180
|
+
if (time < cutoff) {
|
|
181
|
+
const hasBlockedDeps = Array.from(blockedTasks.values()).some(
|
|
182
|
+
(tasks) => tasks.some((t) => t.dependsOn?.includes(id))
|
|
183
|
+
);
|
|
184
|
+
if (!hasBlockedDeps) toDelete.push(id);
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
toDelete.forEach((id) => completedTasks.delete(id));
|
|
188
|
+
|
|
189
|
+
for (const [type, breaker] of circuitBreakers.entries()) {
|
|
190
|
+
const now = Date.now();
|
|
191
|
+
if (
|
|
192
|
+
now >= breaker.openUntil &&
|
|
193
|
+
breaker.openUntil > 0 &&
|
|
194
|
+
!breaker.lock
|
|
195
|
+
) {
|
|
196
|
+
breaker.lock = true;
|
|
197
|
+
breaker.consecutiveFailures = 0;
|
|
198
|
+
breaker.openUntil = 0;
|
|
199
|
+
breaker.lock = false;
|
|
200
|
+
emit("circuit:closed", { type });
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
next();
|
|
205
|
+
}, config.maintenanceInterval);
|
|
206
|
+
|
|
207
|
+
const rebuildPriorityTracker = () => {
|
|
208
|
+
if (queue.size() === 0) {
|
|
209
|
+
priorityTracker = { min: Infinity, max: -Infinity, count: 0 };
|
|
210
|
+
return;
|
|
211
|
+
}
|
|
212
|
+
let min = Infinity;
|
|
213
|
+
let max = -Infinity;
|
|
214
|
+
for (let i = 0; i < queue.heap.length; i++) {
|
|
215
|
+
const p = queue.heap[i].priority;
|
|
216
|
+
if (p < min) min = p;
|
|
217
|
+
if (p > max) max = p;
|
|
218
|
+
}
|
|
219
|
+
priorityTracker = { min, max, count: queue.size() };
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
const updatePriorityOnPush = (priority) => {
|
|
223
|
+
priorityTracker.count++;
|
|
224
|
+
if (priority < priorityTracker.min) priorityTracker.min = priority;
|
|
225
|
+
if (priority > priorityTracker.max) priorityTracker.max = priority;
|
|
226
|
+
};
|
|
227
|
+
|
|
228
|
+
const updatePriorityOnPop = () => {
|
|
229
|
+
priorityTracker.count--;
|
|
230
|
+
if (priorityTracker.count === 0) {
|
|
231
|
+
priorityTracker = { min: Infinity, max: -Infinity, count: 0 };
|
|
232
|
+
} else if (priorityTracker.count < 100) {
|
|
233
|
+
rebuildPriorityTracker();
|
|
234
|
+
}
|
|
235
|
+
};
|
|
236
|
+
|
|
237
|
+
const adjustConcurrency = () => {
|
|
238
|
+
if (!config.adaptive || latencies.length < 10) return;
|
|
239
|
+
const avg = latencies.reduce((a, b) => a + b, 0) / latencies.length;
|
|
240
|
+
latencies = [];
|
|
241
|
+
|
|
242
|
+
if (avg < config.adaptiveLatencyLow && currentConcurrency < config.maxC) {
|
|
243
|
+
currentConcurrency++;
|
|
244
|
+
emit("concurrency:adjust", {
|
|
245
|
+
concurrency: currentConcurrency,
|
|
246
|
+
reason: "low_latency",
|
|
247
|
+
});
|
|
248
|
+
} else if (
|
|
249
|
+
avg > config.adaptiveLatencyHigh &&
|
|
250
|
+
currentConcurrency > config.minC
|
|
251
|
+
) {
|
|
252
|
+
currentConcurrency--;
|
|
253
|
+
emit("concurrency:adjust", {
|
|
254
|
+
concurrency: currentConcurrency,
|
|
255
|
+
reason: "high_latency",
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
};
|
|
259
|
+
|
|
260
|
+
const checkRateLimit = (type) => {
|
|
261
|
+
const taskType = type || "default";
|
|
262
|
+
const limit =
|
|
263
|
+
options.rateLimits?.[taskType] ||
|
|
264
|
+
(options.tasksPerInterval
|
|
265
|
+
? {
|
|
266
|
+
interval: options.interval || 1000,
|
|
267
|
+
tasksPerInterval: options.tasksPerInterval,
|
|
268
|
+
}
|
|
269
|
+
: null);
|
|
270
|
+
|
|
271
|
+
if (!limit) return false;
|
|
272
|
+
|
|
273
|
+
if (!typeRateLimitState.has(taskType)) {
|
|
274
|
+
typeRateLimitState.set(taskType, {
|
|
275
|
+
count: 0,
|
|
276
|
+
windowStart: Date.now(),
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
const state = typeRateLimitState.get(taskType);
|
|
281
|
+
const now = Date.now();
|
|
282
|
+
const elapsed = now - state.windowStart;
|
|
283
|
+
|
|
284
|
+
if (elapsed >= limit.interval) {
|
|
285
|
+
state.count = 0;
|
|
286
|
+
state.windowStart = now;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
if (state.count >= limit.tasksPerInterval) {
|
|
290
|
+
const remainingTime = limit.interval - elapsed;
|
|
291
|
+
if (!scheduledRateLimitChecks.has(taskType)) {
|
|
292
|
+
scheduledRateLimitChecks.add(taskType);
|
|
293
|
+
const timerId = setTimeout(() => {
|
|
294
|
+
scheduledRateLimitChecks.delete(taskType);
|
|
295
|
+
next();
|
|
296
|
+
}, remainingTime);
|
|
297
|
+
rateLimitTimers.add(timerId);
|
|
298
|
+
}
|
|
299
|
+
return true;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
state.count++;
|
|
303
|
+
return false;
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
const getWorker = async (path) => {
|
|
307
|
+
let available = workerPool.find(
|
|
308
|
+
(w) => w.path === path && !w.busy && activeWorkers.has(w.worker)
|
|
309
|
+
);
|
|
310
|
+
if (available) {
|
|
311
|
+
available.busy = true;
|
|
312
|
+
return available;
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if (workerPool.length < config.maxWorkerPoolSize) {
|
|
316
|
+
const worker = new Worker(path);
|
|
317
|
+
const wrapper = { worker, path, busy: true };
|
|
318
|
+
workerPool.push(wrapper);
|
|
319
|
+
activeWorkers.add(worker);
|
|
320
|
+
return wrapper;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
return new Promise((resolve) => {
|
|
324
|
+
workerWaitingQueue.push({ path, resolve });
|
|
325
|
+
});
|
|
326
|
+
};
|
|
327
|
+
|
|
328
|
+
const releaseWorker = (wrapper) => {
|
|
329
|
+
wrapper.busy = false;
|
|
330
|
+
const waiting = workerWaitingQueue.findIndex(
|
|
331
|
+
(w) => w.path === wrapper.path
|
|
332
|
+
);
|
|
333
|
+
if (waiting !== -1) {
|
|
334
|
+
const { resolve } = workerWaitingQueue.splice(waiting, 1)[0];
|
|
335
|
+
wrapper.busy = true;
|
|
336
|
+
resolve(wrapper);
|
|
337
|
+
}
|
|
338
|
+
};
|
|
339
|
+
|
|
340
|
+
const terminateWorker = async (wrapper) => {
|
|
341
|
+
try {
|
|
342
|
+
activeWorkers.delete(wrapper.worker);
|
|
343
|
+
await wrapper.worker.terminate();
|
|
344
|
+
} catch (err) {
|
|
345
|
+
emit("worker:terminate:error", { path: wrapper.path, error: err });
|
|
346
|
+
}
|
|
347
|
+
};
|
|
348
|
+
|
|
349
|
+
const executeTask = async (taskData) => {
|
|
350
|
+
const {
|
|
351
|
+
task,
|
|
352
|
+
resolve,
|
|
353
|
+
reject,
|
|
354
|
+
type,
|
|
355
|
+
retryCount,
|
|
356
|
+
timeout,
|
|
357
|
+
signal,
|
|
358
|
+
worker: workerOptions,
|
|
359
|
+
} = taskData;
|
|
360
|
+
const breaker = getCircuitBreaker(type);
|
|
361
|
+
const now = Date.now();
|
|
362
|
+
|
|
363
|
+
if (now < breaker.openUntil) {
|
|
364
|
+
reject(
|
|
365
|
+
new Error(`Circuit breaker open for type: ${type || "default"}`)
|
|
366
|
+
);
|
|
367
|
+
return;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
currentLoad += taskData.weight || 1;
|
|
371
|
+
taskData.isActive = true;
|
|
372
|
+
activeCount++;
|
|
373
|
+
beforeExecute?.(taskData);
|
|
374
|
+
|
|
375
|
+
const startTime = Date.now();
|
|
376
|
+
const startMem = process.memoryUsage().heapUsed;
|
|
377
|
+
let retries = 0;
|
|
378
|
+
let lastError = null;
|
|
379
|
+
const maxRetries = retryCount ?? options.retryCount ?? 0;
|
|
380
|
+
|
|
381
|
+
while (retries <= maxRetries) {
|
|
382
|
+
try {
|
|
383
|
+
let result;
|
|
384
|
+
let timeoutId;
|
|
385
|
+
let abortHandler;
|
|
386
|
+
|
|
387
|
+
const taskTimeout = timeout ?? 0;
|
|
388
|
+
|
|
389
|
+
const executePromise = workerOptions
|
|
390
|
+
? (async () => {
|
|
391
|
+
const wrapper = await getWorker(workerOptions.path);
|
|
392
|
+
return new Promise((res, rej) => {
|
|
393
|
+
const handleMessage = (msg) => {
|
|
394
|
+
if (msg.type === "result") {
|
|
395
|
+
wrapper.worker.off("message", handleMessage);
|
|
396
|
+
wrapper.worker.off("error", handleError);
|
|
397
|
+
releaseWorker(wrapper);
|
|
398
|
+
res(msg.data);
|
|
399
|
+
}
|
|
400
|
+
};
|
|
401
|
+
const handleError = (err) => {
|
|
402
|
+
wrapper.worker.off("message", handleMessage);
|
|
403
|
+
wrapper.worker.off("error", handleError);
|
|
404
|
+
releaseWorker(wrapper);
|
|
405
|
+
breaker.consecutiveFailures++;
|
|
406
|
+
if (
|
|
407
|
+
breaker.consecutiveFailures >= config.circuitThreshold
|
|
408
|
+
) {
|
|
409
|
+
breaker.openUntil =
|
|
410
|
+
Date.now() + config.circuitResetTimeout;
|
|
411
|
+
emit("circuit:open", { type: type || "default" });
|
|
412
|
+
}
|
|
413
|
+
rej(err);
|
|
414
|
+
};
|
|
415
|
+
wrapper.worker.on("message", handleMessage);
|
|
416
|
+
wrapper.worker.on("error", handleError);
|
|
417
|
+
wrapper.worker.postMessage(workerOptions.data);
|
|
418
|
+
});
|
|
419
|
+
})()
|
|
420
|
+
: task();
|
|
421
|
+
|
|
422
|
+
const promises = [executePromise];
|
|
423
|
+
|
|
424
|
+
if (taskTimeout > 0) {
|
|
425
|
+
const timeoutPromise = new Promise((_, rej) => {
|
|
426
|
+
timeoutId = setTimeout(
|
|
427
|
+
() => rej(new Error("Task timeout")),
|
|
428
|
+
taskTimeout
|
|
429
|
+
);
|
|
430
|
+
});
|
|
431
|
+
promises.push(timeoutPromise);
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
if (signal) {
|
|
435
|
+
const abortPromise = new Promise((_, rej) => {
|
|
436
|
+
abortHandler = () => rej(new Error("Task aborted"));
|
|
437
|
+
signal.addEventListener("abort", abortHandler);
|
|
438
|
+
});
|
|
439
|
+
promises.push(abortPromise);
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
result = await Promise.race(promises);
|
|
443
|
+
|
|
444
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
445
|
+
if (abortHandler && signal) {
|
|
446
|
+
signal.removeEventListener("abort", abortHandler);
|
|
447
|
+
abortListeners.delete(taskData);
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
const duration = Date.now() - startTime;
|
|
451
|
+
const memDelta = process.memoryUsage().heapUsed - startMem;
|
|
452
|
+
|
|
453
|
+
if (!metrics.latencyLock) {
|
|
454
|
+
metrics.allLatencies.push(duration);
|
|
455
|
+
if (metrics.allLatencies.length > config.maxLatencyHistory) {
|
|
456
|
+
metrics.allLatencies.shift();
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
latencies.push(duration);
|
|
461
|
+
if (latencies.length > 100) latencies.shift();
|
|
462
|
+
|
|
463
|
+
breaker.consecutiveFailures = 0;
|
|
464
|
+
metrics.successfulTasks++;
|
|
465
|
+
afterExecute?.(taskData, {
|
|
466
|
+
duration,
|
|
467
|
+
memoryDelta: memDelta,
|
|
468
|
+
status: "success",
|
|
469
|
+
});
|
|
470
|
+
resolve(result);
|
|
471
|
+
return;
|
|
472
|
+
} catch (err) {
|
|
473
|
+
lastError = err;
|
|
474
|
+
retries++;
|
|
475
|
+
|
|
476
|
+
if (retries > maxRetries) {
|
|
477
|
+
const duration = Date.now() - startTime;
|
|
478
|
+
const memDelta = process.memoryUsage().heapUsed - startMem;
|
|
479
|
+
|
|
480
|
+
if (
|
|
481
|
+
err.message !== "Task aborted" &&
|
|
482
|
+
err.message !== "Task timeout"
|
|
483
|
+
) {
|
|
484
|
+
breaker.consecutiveFailures++;
|
|
485
|
+
if (breaker.consecutiveFailures >= config.circuitThreshold) {
|
|
486
|
+
breaker.openUntil = Date.now() + config.circuitResetTimeout;
|
|
487
|
+
emit("circuit:open", { type: type || "default" });
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
metrics.failedTasks++;
|
|
492
|
+
errorsInCycle.push(err);
|
|
493
|
+
if (errorsInCycle.length > config.maxErrorHistory) {
|
|
494
|
+
errorsInCycle.shift();
|
|
495
|
+
}
|
|
496
|
+
afterExecute?.(taskData, {
|
|
497
|
+
duration,
|
|
498
|
+
memoryDelta: memDelta,
|
|
499
|
+
status: "failure",
|
|
500
|
+
error: err.message,
|
|
501
|
+
});
|
|
502
|
+
reject(err);
|
|
503
|
+
return;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
const delay = Math.min(
|
|
507
|
+
config.initialRetryDelay *
|
|
508
|
+
Math.pow(config.retryFactor, retries - 1),
|
|
509
|
+
config.maxRetryDelay
|
|
510
|
+
);
|
|
511
|
+
await new Promise((r) => setTimeout(r, delay));
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
};
|
|
515
|
+
|
|
516
|
+
const flushBatch = (batchKey) => {
|
|
517
|
+
const buffer = batchBuffers.get(batchKey);
|
|
518
|
+
if (!buffer || buffer.length === 0) return;
|
|
519
|
+
|
|
520
|
+
batchBuffers.delete(batchKey);
|
|
521
|
+
const timerId = batchTimers.get(batchKey);
|
|
522
|
+
if (timerId) {
|
|
523
|
+
clearTimeout(timerId);
|
|
524
|
+
batchTimers.delete(batchKey);
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
for (const taskData of buffer) {
|
|
528
|
+
if (taskData.dependsOn && taskData.dependsOn.length > 0) {
|
|
529
|
+
checkDependencies(taskData);
|
|
530
|
+
} else {
|
|
531
|
+
queue.push(taskData);
|
|
532
|
+
updatePriorityOnPush(taskData.priority);
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
next();
|
|
536
|
+
};
|
|
537
|
+
|
|
538
|
+
const checkDependencies = (taskData) => {
|
|
539
|
+
const unresolved = taskData.dependsOn.filter(
|
|
540
|
+
(id) => !completedTasks.has(id)
|
|
541
|
+
);
|
|
542
|
+
if (unresolved.length === 0) {
|
|
543
|
+
queue.push(taskData);
|
|
544
|
+
updatePriorityOnPush(taskData.priority);
|
|
545
|
+
next();
|
|
546
|
+
} else {
|
|
547
|
+
for (const depId of unresolved) {
|
|
548
|
+
if (!blockedTasks.has(depId)) blockedTasks.set(depId, []);
|
|
549
|
+
const blocked = blockedTasks.get(depId);
|
|
550
|
+
if (!blocked.includes(taskData)) {
|
|
551
|
+
blocked.push(taskData);
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
};
|
|
556
|
+
|
|
557
|
+
const next = () => {
|
|
558
|
+
if (isPaused || isDraining) return;
|
|
559
|
+
|
|
560
|
+
if (queue.size() === 0) {
|
|
561
|
+
if (
|
|
562
|
+
activeCount === 0 &&
|
|
563
|
+
blockedTasks.size === 0 &&
|
|
564
|
+
batchBuffers.size === 0 &&
|
|
565
|
+
idleResolver
|
|
566
|
+
) {
|
|
567
|
+
idleResolver({
|
|
568
|
+
errors: errorsInCycle,
|
|
569
|
+
failed: errorsInCycle.length > 0,
|
|
570
|
+
metrics,
|
|
571
|
+
});
|
|
572
|
+
idleResolver = null;
|
|
573
|
+
errorsInCycle = [];
|
|
574
|
+
}
|
|
575
|
+
return;
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
while (activeCount < currentConcurrency && queue.size() > 0) {
|
|
579
|
+
const taskData = queue.peek();
|
|
580
|
+
if (!taskData) break;
|
|
581
|
+
|
|
582
|
+
const weight = taskData.weight || 1;
|
|
583
|
+
if (activeCount > 0 && currentLoad + weight > currentConcurrency) {
|
|
584
|
+
break;
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
if (checkRateLimit(taskData.type)) break;
|
|
588
|
+
|
|
589
|
+
queue.pop();
|
|
590
|
+
updatePriorityOnPop();
|
|
591
|
+
onDequeue?.(taskData);
|
|
592
|
+
|
|
593
|
+
if (taskData.deadline && Date.now() > taskData.deadline) {
|
|
594
|
+
taskData.reject(new Error("Task deadline exceeded"));
|
|
595
|
+
metrics.failedTasks++;
|
|
596
|
+
continue;
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
metrics.totalTasks++;
|
|
600
|
+
|
|
601
|
+
executeTask(taskData).finally(() => {
|
|
602
|
+
currentLoad -= taskData.weight || 1;
|
|
603
|
+
taskData.isActive = false;
|
|
604
|
+
activeCount--;
|
|
605
|
+
|
|
606
|
+
if (taskData.id) {
|
|
607
|
+
completedTasks.set(taskData.id, Date.now());
|
|
608
|
+
const blocked = blockedTasks.get(taskData.id);
|
|
609
|
+
if (blocked) {
|
|
610
|
+
blockedTasks.delete(taskData.id);
|
|
611
|
+
for (const waiting of blocked) {
|
|
612
|
+
checkDependencies(waiting);
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
if (taskData.cacheKey) {
|
|
618
|
+
pendingCache.delete(taskData.cacheKey);
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
adjustConcurrency();
|
|
622
|
+
next();
|
|
623
|
+
});
|
|
624
|
+
}
|
|
625
|
+
};
|
|
626
|
+
|
|
627
|
+
const poolInstance = (task, options) => {
|
|
628
|
+
if (typeof task !== "function") {
|
|
629
|
+
throw new Error("Task must be a function");
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
const opts =
|
|
633
|
+
typeof options === "number" ? { priority: options } : options || {};
|
|
634
|
+
|
|
635
|
+
if (opts.worker) validateWorkerPath(opts.worker.path);
|
|
636
|
+
|
|
637
|
+
if (opts.cacheKey) {
|
|
638
|
+
const cached = pendingCache.get(opts.cacheKey);
|
|
639
|
+
if (cached && cached.task === task) return cached.promise;
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
if (isDraining) {
|
|
643
|
+
return Promise.reject(new Error("Pool is draining"));
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
if (queue.size() >= config.maxQueueSize) {
|
|
647
|
+
return Promise.reject(new Error("Queue is full"));
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
const taskPromise = new Promise((resolve, reject) => {
|
|
651
|
+
const taskData = {
|
|
652
|
+
task,
|
|
653
|
+
resolve,
|
|
654
|
+
reject,
|
|
655
|
+
seq: seqCounter++,
|
|
656
|
+
priority: opts.priority ?? 0,
|
|
657
|
+
weight: opts.weight ?? 1,
|
|
658
|
+
dependsOn: opts.dependsOn || [],
|
|
659
|
+
cycles: 0,
|
|
660
|
+
isActive: false,
|
|
661
|
+
...opts,
|
|
662
|
+
};
|
|
663
|
+
onEnqueue?.(taskData);
|
|
664
|
+
|
|
665
|
+
if (opts.signal) {
|
|
666
|
+
const abortHandler = () => {
|
|
667
|
+
reject(new Error("Task aborted"));
|
|
668
|
+
poolInstance.cancel((t) => t === taskData);
|
|
669
|
+
};
|
|
670
|
+
opts.signal.addEventListener("abort", abortHandler);
|
|
671
|
+
abortListeners.set(taskData, abortHandler);
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
if (opts.batchKey) {
|
|
675
|
+
if (!batchBuffers.has(opts.batchKey))
|
|
676
|
+
batchBuffers.set(opts.batchKey, []);
|
|
677
|
+
const buffer = batchBuffers.get(opts.batchKey);
|
|
678
|
+
buffer.push(taskData);
|
|
679
|
+
|
|
680
|
+
if (buffer.length >= config.batchSize) {
|
|
681
|
+
flushBatch(opts.batchKey);
|
|
682
|
+
} else if (!batchTimers.has(opts.batchKey)) {
|
|
683
|
+
const timerId = setTimeout(() => {
|
|
684
|
+
flushBatch(opts.batchKey);
|
|
685
|
+
}, config.batchTimeout);
|
|
686
|
+
batchTimers.set(opts.batchKey, timerId);
|
|
687
|
+
}
|
|
688
|
+
} else if (taskData.dependsOn.length > 0) {
|
|
689
|
+
checkDependencies(taskData);
|
|
690
|
+
} else {
|
|
691
|
+
queue.push(taskData);
|
|
692
|
+
updatePriorityOnPush(taskData.priority);
|
|
693
|
+
next();
|
|
694
|
+
}
|
|
695
|
+
});
|
|
696
|
+
|
|
697
|
+
taskPromise.catch(() => {
|
|
698
|
+
const opts =
|
|
699
|
+
typeof options === "number" ? { priority: options } : options || {};
|
|
700
|
+
|
|
701
|
+
const taskData = Array.from(abortListeners.keys()).find(
|
|
702
|
+
(t) => t.task === task
|
|
703
|
+
);
|
|
704
|
+
|
|
705
|
+
if (taskData) {
|
|
706
|
+
const handler = abortListeners.get(taskData);
|
|
707
|
+
if (handler && taskData.signal) {
|
|
708
|
+
taskData.signal.removeEventListener("abort", handler);
|
|
709
|
+
}
|
|
710
|
+
abortListeners.delete(taskData);
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
if (opts.batchKey) {
|
|
714
|
+
const buffer = batchBuffers.get(opts.batchKey);
|
|
715
|
+
if (buffer) {
|
|
716
|
+
const filtered = buffer.filter((t) => t.task !== task);
|
|
717
|
+
if (filtered.length === 0) {
|
|
718
|
+
batchBuffers.delete(opts.batchKey);
|
|
719
|
+
const timer = batchTimers.get(opts.batchKey);
|
|
720
|
+
if (timer) {
|
|
721
|
+
clearTimeout(timer);
|
|
722
|
+
batchTimers.delete(opts.batchKey);
|
|
723
|
+
}
|
|
724
|
+
} else {
|
|
725
|
+
batchBuffers.set(opts.batchKey, filtered);
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
});
|
|
730
|
+
|
|
731
|
+
if (opts.cacheKey) {
|
|
732
|
+
pendingCache.set(opts.cacheKey, { task, promise: taskPromise });
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
return taskPromise;
|
|
736
|
+
};
|
|
737
|
+
|
|
738
|
+
poolInstance.pause = () => {
|
|
739
|
+
isPaused = true;
|
|
740
|
+
};
|
|
741
|
+
poolInstance.resume = () => {
|
|
742
|
+
isPaused = false;
|
|
743
|
+
next();
|
|
744
|
+
};
|
|
745
|
+
poolInstance.onIdle = () => {
|
|
746
|
+
for (const key of batchBuffers.keys()) flushBatch(key);
|
|
747
|
+
return new Promise((r) =>
|
|
748
|
+
activeCount === 0 &&
|
|
749
|
+
queue.size() === 0 &&
|
|
750
|
+
blockedTasks.size === 0 &&
|
|
751
|
+
batchBuffers.size === 0
|
|
752
|
+
? r({ errors: [], failed: false, metrics })
|
|
753
|
+
: (idleResolver = r)
|
|
754
|
+
);
|
|
755
|
+
};
|
|
756
|
+
poolInstance.drain = () => {
|
|
757
|
+
isDraining = true;
|
|
758
|
+
return poolInstance.onIdle();
|
|
759
|
+
};
|
|
760
|
+
|
|
761
|
+
poolInstance.cancel = (query) => {
|
|
762
|
+
const match =
|
|
763
|
+
typeof query === "function"
|
|
764
|
+
? query
|
|
765
|
+
: (t) =>
|
|
766
|
+
(query.id && t.id === query.id) ||
|
|
767
|
+
(query.tag && t.tags?.includes(query.tag));
|
|
768
|
+
let count = 0;
|
|
769
|
+
let minRemoved = Infinity;
|
|
770
|
+
let maxRemoved = -Infinity;
|
|
771
|
+
|
|
772
|
+
queue.remove((t) => {
|
|
773
|
+
if (match(t)) {
|
|
774
|
+
if (t.cacheKey) pendingCache.delete(t.cacheKey);
|
|
775
|
+
|
|
776
|
+
const handler = abortListeners.get(t);
|
|
777
|
+
if (handler && t.signal) {
|
|
778
|
+
t.signal.removeEventListener("abort", handler);
|
|
779
|
+
}
|
|
780
|
+
abortListeners.delete(t);
|
|
781
|
+
|
|
782
|
+
if (t.priority < minRemoved) minRemoved = t.priority;
|
|
783
|
+
if (t.priority > maxRemoved) maxRemoved = t.priority;
|
|
784
|
+
|
|
785
|
+
t.reject(new Error("Task cancelled via API"));
|
|
786
|
+
count++;
|
|
787
|
+
return true;
|
|
788
|
+
}
|
|
789
|
+
return false;
|
|
790
|
+
});
|
|
791
|
+
|
|
792
|
+
for (const [batchKey, buffer] of batchBuffers.entries()) {
|
|
793
|
+
const initialLen = buffer.length;
|
|
794
|
+
const filtered = buffer.filter((task) => {
|
|
795
|
+
if (match(task)) {
|
|
796
|
+
if (task.cacheKey) pendingCache.delete(task.cacheKey);
|
|
797
|
+
|
|
798
|
+
const handler = abortListeners.get(task);
|
|
799
|
+
if (handler && task.signal) {
|
|
800
|
+
task.signal.removeEventListener("abort", handler);
|
|
801
|
+
}
|
|
802
|
+
abortListeners.delete(task);
|
|
803
|
+
|
|
804
|
+
task.reject(new Error("Task cancelled via API"));
|
|
805
|
+
count++;
|
|
806
|
+
return false;
|
|
807
|
+
}
|
|
808
|
+
return true;
|
|
809
|
+
});
|
|
810
|
+
|
|
811
|
+
if (filtered.length === 0) {
|
|
812
|
+
batchBuffers.delete(batchKey);
|
|
813
|
+
if (batchTimers.has(batchKey)) {
|
|
814
|
+
clearTimeout(batchTimers.get(batchKey));
|
|
815
|
+
batchTimers.delete(batchKey);
|
|
816
|
+
}
|
|
817
|
+
} else if (filtered.length !== initialLen) {
|
|
818
|
+
batchBuffers.set(batchKey, filtered);
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
const cancelledInBlocked = new Set();
|
|
823
|
+
for (const [depId, tasks] of blockedTasks.entries()) {
|
|
824
|
+
const initialLen = tasks.length;
|
|
825
|
+
const filtered = tasks.filter((task) => {
|
|
826
|
+
if (match(task)) {
|
|
827
|
+
if (!cancelledInBlocked.has(task)) {
|
|
828
|
+
if (task.cacheKey) pendingCache.delete(task.cacheKey);
|
|
829
|
+
|
|
830
|
+
const handler = abortListeners.get(task);
|
|
831
|
+
if (handler && task.signal) {
|
|
832
|
+
task.signal.removeEventListener("abort", handler);
|
|
833
|
+
}
|
|
834
|
+
abortListeners.delete(task);
|
|
835
|
+
|
|
836
|
+
task.reject(new Error("Task cancelled via API"));
|
|
837
|
+
count++;
|
|
838
|
+
cancelledInBlocked.add(task);
|
|
839
|
+
}
|
|
840
|
+
return false;
|
|
841
|
+
}
|
|
842
|
+
return true;
|
|
843
|
+
});
|
|
844
|
+
|
|
845
|
+
if (filtered.length === 0) {
|
|
846
|
+
blockedTasks.delete(depId);
|
|
847
|
+
} else if (filtered.length !== initialLen) {
|
|
848
|
+
blockedTasks.set(depId, filtered);
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
if (
|
|
853
|
+
count > 0 &&
|
|
854
|
+
(minRemoved <= priorityTracker.min || maxRemoved >= priorityTracker.max)
|
|
855
|
+
) {
|
|
856
|
+
rebuildPriorityTracker();
|
|
857
|
+
}
|
|
858
|
+
|
|
859
|
+
return count;
|
|
860
|
+
};
|
|
861
|
+
|
|
862
|
+
poolInstance.setConcurrency = (limit) => {
|
|
863
|
+
currentConcurrency = limit;
|
|
864
|
+
next();
|
|
865
|
+
};
|
|
866
|
+
|
|
867
|
+
poolInstance.peek = () => {
|
|
868
|
+
return queue.peek();
|
|
869
|
+
};
|
|
870
|
+
|
|
871
|
+
poolInstance.remove = (predicate) => {
|
|
872
|
+
const result = queue.remove(predicate);
|
|
873
|
+
if (result) rebuildPriorityTracker();
|
|
874
|
+
return result;
|
|
875
|
+
};
|
|
876
|
+
|
|
877
|
+
poolInstance.clear = async () => {
|
|
878
|
+
clearInterval(maintenanceInterval);
|
|
879
|
+
rateLimitTimers.forEach(clearTimeout);
|
|
880
|
+
rateLimitTimers.clear();
|
|
881
|
+
|
|
882
|
+
const queuedTasks = [...queue.heap];
|
|
883
|
+
queue.clear();
|
|
884
|
+
|
|
885
|
+
activeCount = 0;
|
|
886
|
+
currentLoad = 0;
|
|
887
|
+
|
|
888
|
+
for (const task of queuedTasks) {
|
|
889
|
+
task.reject(new Error("Pool cleared"));
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
for (const [, tasks] of blockedTasks) {
|
|
893
|
+
for (const task of tasks) {
|
|
894
|
+
task.reject(new Error("Pool cleared"));
|
|
895
|
+
}
|
|
896
|
+
}
|
|
897
|
+
blockedTasks.clear();
|
|
898
|
+
|
|
899
|
+
for (const [, buffer] of batchBuffers) {
|
|
900
|
+
for (const task of buffer) {
|
|
901
|
+
task.reject(new Error("Pool cleared"));
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
batchBuffers.clear();
|
|
905
|
+
|
|
906
|
+
batchTimers.forEach(clearTimeout);
|
|
907
|
+
batchTimers.clear();
|
|
908
|
+
|
|
909
|
+
completedTasks.clear();
|
|
910
|
+
pendingCache.clear();
|
|
911
|
+
circuitBreakers.clear();
|
|
912
|
+
|
|
913
|
+
for (const [task, handler] of abortListeners.entries()) {
|
|
914
|
+
if (task.signal) {
|
|
915
|
+
task.signal.removeEventListener("abort", handler);
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
abortListeners.clear();
|
|
919
|
+
|
|
920
|
+
const terminationPromises = workerPool.map((w) => terminateWorker(w));
|
|
921
|
+
await Promise.allSettled(terminationPromises);
|
|
922
|
+
|
|
923
|
+
workerPool.length = 0;
|
|
924
|
+
activeWorkers.clear();
|
|
925
|
+
workerWaitingQueue.length = 0;
|
|
926
|
+
|
|
927
|
+
if (poolInstance.useQueue) {
|
|
928
|
+
for (const sub of subQueues.values()) {
|
|
929
|
+
await sub.clear();
|
|
930
|
+
}
|
|
931
|
+
subQueues.clear();
|
|
932
|
+
}
|
|
933
|
+
};
|
|
934
|
+
|
|
935
|
+
poolInstance.map = async (items, fn, opts) => {
|
|
936
|
+
const optsObj =
|
|
937
|
+
typeof opts === "number" ? { priority: opts } : opts || {};
|
|
938
|
+
const throwOnError = optsObj.throwOnError ?? true;
|
|
939
|
+
|
|
940
|
+
const results = [];
|
|
941
|
+
for (const item of items) {
|
|
942
|
+
results.push(poolInstance(() => fn(item), optsObj));
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
if (throwOnError) {
|
|
946
|
+
return Promise.all(results);
|
|
947
|
+
} else {
|
|
948
|
+
const settled = await Promise.allSettled(results);
|
|
949
|
+
return settled.map((result) =>
|
|
950
|
+
result.status === "fulfilled" ? result.value : result.reason
|
|
951
|
+
);
|
|
952
|
+
}
|
|
953
|
+
};
|
|
954
|
+
|
|
955
|
+
poolInstance.getWorkerHealth = () => {
|
|
956
|
+
return workerPool.map((w) => ({
|
|
957
|
+
path: w.path,
|
|
958
|
+
busy: w.busy,
|
|
959
|
+
active: activeWorkers.has(w.worker),
|
|
960
|
+
}));
|
|
961
|
+
};
|
|
962
|
+
|
|
963
|
+
Object.defineProperties(poolInstance, {
|
|
964
|
+
activeCount: { get: () => activeCount },
|
|
965
|
+
pendingCount: {
|
|
966
|
+
get: () => {
|
|
967
|
+
const blockedCount = Array.from(blockedTasks.values()).reduce(
|
|
968
|
+
(acc, tasks) => acc + tasks.length,
|
|
969
|
+
0
|
|
970
|
+
);
|
|
971
|
+
const batchCount = Array.from(batchBuffers.values()).reduce(
|
|
972
|
+
(acc, tasks) => acc + tasks.length,
|
|
973
|
+
0
|
|
974
|
+
);
|
|
975
|
+
return queue.size() + blockedCount + batchCount;
|
|
976
|
+
},
|
|
977
|
+
},
|
|
978
|
+
currentLoad: { get: () => currentLoad },
|
|
979
|
+
concurrency: { get: () => currentConcurrency },
|
|
980
|
+
isDraining: { get: () => isDraining },
|
|
981
|
+
isPaused: { get: () => isPaused },
|
|
982
|
+
metrics: { get: () => metrics },
|
|
983
|
+
});
|
|
984
|
+
|
|
985
|
+
return poolInstance;
|
|
986
|
+
};
|
|
987
|
+
|
|
988
|
+
const mainPool = createPoolInstance(initialConcurrency, globalOptions);
|
|
989
|
+
mainPool.useQueue = (name, concurrency = initialConcurrency) => {
|
|
990
|
+
if (!subQueues.has(name))
|
|
991
|
+
subQueues.set(name, createPoolInstance(concurrency, globalOptions));
|
|
992
|
+
return subQueues.get(name);
|
|
993
|
+
};
|
|
994
|
+
|
|
995
|
+
return mainPool;
|
|
996
|
+
}
|