@qianxude/tem 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +303 -0
- package/package.json +42 -0
- package/src/core/index.ts +4 -0
- package/src/core/tem.ts +100 -0
- package/src/core/worker.ts +168 -0
- package/src/database/index.ts +114 -0
- package/src/database/schema.sql +45 -0
- package/src/index.ts +19 -0
- package/src/interfaces/index.ts +186 -0
- package/src/mock-server/README.md +352 -0
- package/src/mock-server/index.ts +3 -0
- package/src/mock-server/router.ts +235 -0
- package/src/mock-server/server.ts +148 -0
- package/src/mock-server/service.ts +122 -0
- package/src/mock-server/types.ts +62 -0
- package/src/services/batch.ts +121 -0
- package/src/services/index.ts +2 -0
- package/src/services/task.ts +176 -0
- package/src/utils/auto-detect.ts +487 -0
- package/src/utils/batch-monitor.ts +52 -0
- package/src/utils/concurrency.ts +44 -0
- package/src/utils/index.ts +9 -0
- package/src/utils/rate-limiter.ts +54 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
export interface DetectOptions {
|
|
2
|
+
url: string;
|
|
3
|
+
method?: 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH';
|
|
4
|
+
headers?: Record<string, string>;
|
|
5
|
+
body?: unknown;
|
|
6
|
+
timeoutMs?: number;
|
|
7
|
+
maxConcurrencyToTest?: number;
|
|
8
|
+
rateLimitTestDurationMs?: number;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface DetectedConfig {
|
|
12
|
+
concurrency: number;
|
|
13
|
+
rateLimit: {
|
|
14
|
+
requests: number;
|
|
15
|
+
windowMs: number;
|
|
16
|
+
};
|
|
17
|
+
confidence: 'high' | 'medium' | 'low';
|
|
18
|
+
notes: string[];
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
interface TestResult {
|
|
22
|
+
success: boolean;
|
|
23
|
+
statusCode: number;
|
|
24
|
+
hasConcurrencyErrors: boolean;
|
|
25
|
+
hasRateLimitErrors: boolean;
|
|
26
|
+
retryAfterMs?: number;
|
|
27
|
+
error?: Error;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
interface ConcurrencyTestResults {
|
|
31
|
+
level: number;
|
|
32
|
+
successCount: number;
|
|
33
|
+
failCount: number;
|
|
34
|
+
hasConcurrencyErrors: boolean;
|
|
35
|
+
hasRateLimitErrors: boolean;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const DEFAULT_TIMEOUT_MS = 30000;
|
|
39
|
+
const DEFAULT_MAX_CONCURRENCY = 100;
|
|
40
|
+
const DEFAULT_RATE_LIMIT_TEST_DURATION_MS = 10000;
|
|
41
|
+
const MAX_RATE_LIMIT_TEST_REQUESTS = 200;
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Detect API constraints including maximum concurrency and rate limits.
|
|
45
|
+
* Uses binary search for concurrency detection and burst testing for rate limits.
|
|
46
|
+
*/
|
|
47
|
+
export async function detectConstraints(options: DetectOptions): Promise<DetectedConfig> {
|
|
48
|
+
const timeoutMs = options.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
49
|
+
const maxConcurrency = options.maxConcurrencyToTest ?? DEFAULT_MAX_CONCURRENCY;
|
|
50
|
+
const rateLimitDurationMs = options.rateLimitTestDurationMs ?? DEFAULT_RATE_LIMIT_TEST_DURATION_MS;
|
|
51
|
+
|
|
52
|
+
const requestOptions: RequestOptions = {
|
|
53
|
+
url: options.url,
|
|
54
|
+
method: options.method ?? 'GET',
|
|
55
|
+
headers: options.headers ?? {},
|
|
56
|
+
body: options.body,
|
|
57
|
+
timeoutMs,
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
const notes: string[] = [];
|
|
61
|
+
|
|
62
|
+
// Phase 1: Detect concurrency limit
|
|
63
|
+
const detectedConcurrency = await detectConcurrency(requestOptions, maxConcurrency, notes);
|
|
64
|
+
|
|
65
|
+
// Phase 2: Detect rate limit using safe concurrency (80% of detected)
|
|
66
|
+
const safeConcurrency = Math.max(1, Math.floor(detectedConcurrency * 0.8));
|
|
67
|
+
const rateLimitResult = await detectRateLimit(requestOptions, safeConcurrency, rateLimitDurationMs, notes);
|
|
68
|
+
|
|
69
|
+
// Calculate confidence
|
|
70
|
+
const confidence = calculateConfidence(detectedConcurrency, rateLimitResult, notes);
|
|
71
|
+
|
|
72
|
+
// Generate recommended config with safety margins
|
|
73
|
+
const recommendedConcurrency = Math.max(1, Math.floor(detectedConcurrency * 0.8));
|
|
74
|
+
const recommendedRateLimit = {
|
|
75
|
+
requests: Math.max(1, Math.floor(rateLimitResult.requests * 0.9)),
|
|
76
|
+
windowMs: rateLimitResult.windowMs,
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
concurrency: recommendedConcurrency,
|
|
81
|
+
rateLimit: recommendedRateLimit,
|
|
82
|
+
confidence,
|
|
83
|
+
notes,
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
interface RequestOptions {
|
|
88
|
+
url: string;
|
|
89
|
+
method: string;
|
|
90
|
+
headers: Record<string, string>;
|
|
91
|
+
body?: unknown;
|
|
92
|
+
timeoutMs: number;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Detect maximum concurrency using exponential search followed by binary search.
|
|
97
|
+
*/
|
|
98
|
+
async function detectConcurrency(
|
|
99
|
+
options: RequestOptions,
|
|
100
|
+
maxToTest: number,
|
|
101
|
+
notes: string[]
|
|
102
|
+
): Promise<number> {
|
|
103
|
+
// Phase 1: Exponential search to find upper bound
|
|
104
|
+
let lower = 1;
|
|
105
|
+
let upper = 1;
|
|
106
|
+
|
|
107
|
+
while (upper < maxToTest) {
|
|
108
|
+
const result = await testConcurrentRequests(options, upper);
|
|
109
|
+
|
|
110
|
+
if (result.hasConcurrencyErrors) {
|
|
111
|
+
notes.push(`Concurrency limit found between ${lower} and ${upper}`);
|
|
112
|
+
break;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (result.hasRateLimitErrors) {
|
|
116
|
+
notes.push(`Hit rate limit at concurrency ${upper}, stopping concurrency search`);
|
|
117
|
+
break;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
lower = upper;
|
|
121
|
+
upper *= 2;
|
|
122
|
+
|
|
123
|
+
// Stop if we're hitting rate limits consistently
|
|
124
|
+
if (upper > maxToTest) {
|
|
125
|
+
upper = maxToTest;
|
|
126
|
+
break;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Phase 2: Binary search for exact limit
|
|
131
|
+
while (lower < upper - 1) {
|
|
132
|
+
const mid = Math.floor((lower + upper) / 2);
|
|
133
|
+
const result = await testConcurrentRequests(options, mid);
|
|
134
|
+
|
|
135
|
+
if (result.hasConcurrencyErrors || result.hasRateLimitErrors) {
|
|
136
|
+
upper = mid;
|
|
137
|
+
} else {
|
|
138
|
+
lower = mid;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Final verification at detected limit
|
|
143
|
+
const finalResult = await testConcurrentRequests(options, lower);
|
|
144
|
+
if (finalResult.hasConcurrencyErrors) {
|
|
145
|
+
// Edge case: our detected limit actually fails, reduce
|
|
146
|
+
return Math.max(1, lower - 1);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
return lower;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Test concurrent requests at a specific concurrency level.
|
|
154
|
+
*/
|
|
155
|
+
async function testConcurrentRequests(
|
|
156
|
+
options: RequestOptions,
|
|
157
|
+
concurrency: number
|
|
158
|
+
): Promise<ConcurrencyTestResults> {
|
|
159
|
+
const results: TestResult[] = [];
|
|
160
|
+
const abortController = new AbortController();
|
|
161
|
+
const { signal } = abortController;
|
|
162
|
+
|
|
163
|
+
// Create concurrent requests
|
|
164
|
+
const promises: Promise<void>[] = [];
|
|
165
|
+
const semaphore = new SimpleSemaphore(concurrency);
|
|
166
|
+
|
|
167
|
+
for (let i = 0; i < concurrency; i++) {
|
|
168
|
+
promises.push(
|
|
169
|
+
(async () => {
|
|
170
|
+
await semaphore.acquire();
|
|
171
|
+
try {
|
|
172
|
+
const result = await makeRequest(options, signal);
|
|
173
|
+
results.push(result);
|
|
174
|
+
|
|
175
|
+
// Early abort on auth errors
|
|
176
|
+
if (result.statusCode === 401 || result.statusCode === 403) {
|
|
177
|
+
abortController.abort();
|
|
178
|
+
}
|
|
179
|
+
} finally {
|
|
180
|
+
semaphore.release();
|
|
181
|
+
}
|
|
182
|
+
})()
|
|
183
|
+
);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
await Promise.all(promises);
|
|
187
|
+
|
|
188
|
+
const successCount = results.filter((r) => r.success).length;
|
|
189
|
+
const failCount = results.length - successCount;
|
|
190
|
+
const hasConcurrencyErrors = results.some((r) => r.hasConcurrencyErrors);
|
|
191
|
+
const hasRateLimitErrors = results.some((r) => r.hasRateLimitErrors);
|
|
192
|
+
|
|
193
|
+
return {
|
|
194
|
+
level: concurrency,
|
|
195
|
+
successCount,
|
|
196
|
+
failCount,
|
|
197
|
+
hasConcurrencyErrors,
|
|
198
|
+
hasRateLimitErrors,
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Detect rate limit by sending bursts of requests and observing patterns.
|
|
204
|
+
*/
|
|
205
|
+
async function detectRateLimit(
|
|
206
|
+
options: RequestOptions,
|
|
207
|
+
safeConcurrency: number,
|
|
208
|
+
durationMs: number,
|
|
209
|
+
notes: string[]
|
|
210
|
+
): Promise<{ requests: number; windowMs: number }> {
|
|
211
|
+
const startTime = Date.now();
|
|
212
|
+
const requestTimes: number[] = [];
|
|
213
|
+
const rateLimitHitTimes: number[] = [];
|
|
214
|
+
let totalRequests = 0;
|
|
215
|
+
|
|
216
|
+
// Send requests as fast as possible at safe concurrency
|
|
217
|
+
while (Date.now() - startTime < durationMs && totalRequests < MAX_RATE_LIMIT_TEST_REQUESTS) {
|
|
218
|
+
const batchStart = Date.now();
|
|
219
|
+
const results = await sendBatch(options, safeConcurrency);
|
|
220
|
+
|
|
221
|
+
for (const result of results) {
|
|
222
|
+
totalRequests++;
|
|
223
|
+
if (result.success) {
|
|
224
|
+
requestTimes.push(Date.now());
|
|
225
|
+
} else if (result.hasRateLimitErrors) {
|
|
226
|
+
rateLimitHitTimes.push(Date.now());
|
|
227
|
+
|
|
228
|
+
// Honor Retry-After header if present
|
|
229
|
+
if (result.retryAfterMs) {
|
|
230
|
+
notes.push(`API returned Retry-After: ${Math.ceil(result.retryAfterMs / 1000)}s`);
|
|
231
|
+
await Bun.sleep(result.retryAfterMs);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// Small delay between batches to avoid overwhelming
|
|
237
|
+
const batchDuration = Date.now() - batchStart;
|
|
238
|
+
if (batchDuration < 100) {
|
|
239
|
+
await Bun.sleep(100 - batchDuration);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Analyze results to determine rate limit
|
|
244
|
+
const analysis = analyzeRateLimitPattern(requestTimes, rateLimitHitTimes, durationMs);
|
|
245
|
+
|
|
246
|
+
if (analysis.requestsPerWindow > 0) {
|
|
247
|
+
notes.push(`Rate limit detected: ~${analysis.requestsPerWindow} requests per ${analysis.windowMs / 1000}s window`);
|
|
248
|
+
} else {
|
|
249
|
+
notes.push('No clear rate limit pattern detected, using conservative defaults');
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
requests: analysis.requestsPerWindow || Math.max(10, Math.floor(totalRequests / 2)),
|
|
254
|
+
windowMs: analysis.windowMs || 60000,
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Send a batch of concurrent requests.
|
|
260
|
+
*/
|
|
261
|
+
async function sendBatch(options: RequestOptions, count: number): Promise<TestResult[]> {
|
|
262
|
+
const promises: Promise<TestResult>[] = [];
|
|
263
|
+
|
|
264
|
+
for (let i = 0; i < count; i++) {
|
|
265
|
+
promises.push(makeRequest(options));
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
return Promise.all(promises);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/**
|
|
272
|
+
* Analyze rate limit patterns from request timing data.
|
|
273
|
+
*/
|
|
274
|
+
function analyzeRateLimitPattern(
|
|
275
|
+
requestTimes: number[],
|
|
276
|
+
rateLimitHits: number[],
|
|
277
|
+
durationMs: number
|
|
278
|
+
): { requestsPerWindow: number; windowMs: number } {
|
|
279
|
+
if (requestTimes.length === 0) {
|
|
280
|
+
return { requestsPerWindow: 0, windowMs: 60000 };
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// If we never hit rate limits, estimate based on throughput
|
|
284
|
+
if (rateLimitHits.length === 0) {
|
|
285
|
+
const throughputPerSecond = requestTimes.length / (durationMs / 1000);
|
|
286
|
+
// Conservative estimate: assume window is 60s
|
|
287
|
+
return {
|
|
288
|
+
requestsPerWindow: Math.floor(throughputPerSecond * 60 * 0.8),
|
|
289
|
+
windowMs: 60000,
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// Look for patterns in rate limit hits to identify window size
|
|
294
|
+
const intervals: number[] = [];
|
|
295
|
+
for (let i = 1; i < rateLimitHits.length; i++) {
|
|
296
|
+
intervals.push(rateLimitHits[i] - rateLimitHits[i - 1]);
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// Common rate limit windows
|
|
300
|
+
const commonWindows = [1000, 5000, 10000, 15000, 20000, 30000, 60000];
|
|
301
|
+
|
|
302
|
+
// Try to identify window from pattern
|
|
303
|
+
let detectedWindow = 60000;
|
|
304
|
+
if (intervals.length > 0) {
|
|
305
|
+
const avgInterval = intervals.reduce((a, b) => a + b, 0) / intervals.length;
|
|
306
|
+
// Find closest common window
|
|
307
|
+
detectedWindow = commonWindows.reduce((closest, window) =>
|
|
308
|
+
Math.abs(window - avgInterval) < Math.abs(closest - avgInterval) ? window : closest
|
|
309
|
+
);
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Calculate requests per window based on successful requests
|
|
313
|
+
const windowCount = Math.ceil(durationMs / detectedWindow);
|
|
314
|
+
const avgRequestsPerWindow = Math.floor(requestTimes.length / Math.max(1, windowCount));
|
|
315
|
+
|
|
316
|
+
return {
|
|
317
|
+
requestsPerWindow: avgRequestsPerWindow,
|
|
318
|
+
windowMs: detectedWindow,
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
/**
|
|
323
|
+
* Make a single HTTP request with timeout.
|
|
324
|
+
*/
|
|
325
|
+
async function makeRequest(
|
|
326
|
+
options: RequestOptions,
|
|
327
|
+
signal?: AbortSignal
|
|
328
|
+
): Promise<TestResult> {
|
|
329
|
+
const timeoutSignal = AbortSignal.timeout(options.timeoutMs);
|
|
330
|
+
|
|
331
|
+
try {
|
|
332
|
+
const response = await fetch(options.url, {
|
|
333
|
+
method: options.method,
|
|
334
|
+
headers: options.headers,
|
|
335
|
+
body: options.body ? JSON.stringify(options.body) : undefined,
|
|
336
|
+
signal: signal ? AbortSignal.any([signal, timeoutSignal]) : timeoutSignal,
|
|
337
|
+
});
|
|
338
|
+
|
|
339
|
+
const statusCode = response.status;
|
|
340
|
+
|
|
341
|
+
// Parse Retry-After header
|
|
342
|
+
let retryAfterMs: number | undefined;
|
|
343
|
+
const retryAfter = response.headers.get('retry-after');
|
|
344
|
+
if (retryAfter) {
|
|
345
|
+
// Try parsing as seconds first, then as HTTP date
|
|
346
|
+
const seconds = parseInt(retryAfter, 10);
|
|
347
|
+
if (!isNaN(seconds)) {
|
|
348
|
+
retryAfterMs = seconds * 1000;
|
|
349
|
+
} else {
|
|
350
|
+
const date = new Date(retryAfter);
|
|
351
|
+
if (!isNaN(date.getTime())) {
|
|
352
|
+
retryAfterMs = date.getTime() - Date.now();
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// Also check X-RateLimit-Reset if available
|
|
358
|
+
const rateLimitReset = response.headers.get('x-ratelimit-reset');
|
|
359
|
+
if (rateLimitReset && !retryAfterMs) {
|
|
360
|
+
const resetTime = parseInt(rateLimitReset, 10);
|
|
361
|
+
if (!isNaN(resetTime)) {
|
|
362
|
+
// Could be seconds or milliseconds since epoch
|
|
363
|
+
const resetMs = resetTime > 1000000000000 ? resetTime : resetTime * 1000;
|
|
364
|
+
retryAfterMs = Math.max(0, resetMs - Date.now());
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
const isSuccess = response.ok;
|
|
369
|
+
const isConcurrencyError = statusCode === 503 || statusCode === 502;
|
|
370
|
+
const isRateLimitError = statusCode === 429;
|
|
371
|
+
|
|
372
|
+
return {
|
|
373
|
+
success: isSuccess,
|
|
374
|
+
statusCode,
|
|
375
|
+
hasConcurrencyErrors: isConcurrencyError,
|
|
376
|
+
hasRateLimitErrors: isRateLimitError,
|
|
377
|
+
retryAfterMs,
|
|
378
|
+
};
|
|
379
|
+
} catch (error) {
|
|
380
|
+
const isTimeout = error instanceof Error && error.name === 'TimeoutError';
|
|
381
|
+
|
|
382
|
+
return {
|
|
383
|
+
success: false,
|
|
384
|
+
statusCode: isTimeout ? 408 : 0,
|
|
385
|
+
hasConcurrencyErrors: false,
|
|
386
|
+
hasRateLimitErrors: false,
|
|
387
|
+
error: error instanceof Error ? error : new Error(String(error)),
|
|
388
|
+
};
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
/**
|
|
393
|
+
* Simple semaphore for controlling concurrent requests.
|
|
394
|
+
*/
|
|
395
|
+
class SimpleSemaphore {
|
|
396
|
+
private available: number;
|
|
397
|
+
private queue: Array<() => void> = [];
|
|
398
|
+
|
|
399
|
+
constructor(max: number) {
|
|
400
|
+
this.available = max;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
acquire(): Promise<void> {
|
|
404
|
+
if (this.available > 0) {
|
|
405
|
+
this.available--;
|
|
406
|
+
return Promise.resolve();
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
return new Promise((resolve) => {
|
|
410
|
+
this.queue.push(resolve);
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
release(): void {
|
|
415
|
+
const next = this.queue.shift();
|
|
416
|
+
if (next) {
|
|
417
|
+
next();
|
|
418
|
+
} else {
|
|
419
|
+
this.available++;
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
/**
|
|
425
|
+
* Calculate confidence level based on detection results.
|
|
426
|
+
*/
|
|
427
|
+
function calculateConfidence(
|
|
428
|
+
concurrency: number,
|
|
429
|
+
rateLimit: { requests: number; windowMs: number },
|
|
430
|
+
notes: string[]
|
|
431
|
+
): 'high' | 'medium' | 'low' {
|
|
432
|
+
let score = 0;
|
|
433
|
+
|
|
434
|
+
// Concurrency detection confidence
|
|
435
|
+
if (concurrency > 1 && concurrency < DEFAULT_MAX_CONCURRENCY) {
|
|
436
|
+
score += 2;
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
// Rate limit detection confidence
|
|
440
|
+
if (rateLimit.requests > 0 && rateLimit.windowMs > 0) {
|
|
441
|
+
score += 2;
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
// Notes indicate clarity
|
|
445
|
+
if (notes.some((n) => n.includes('clearly') || n.includes('detected'))) {
|
|
446
|
+
score += 1;
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// Penalty for uncertainty indicators
|
|
450
|
+
if (notes.some((n) => n.includes('conservative') || n.includes('no clear'))) {
|
|
451
|
+
score -= 1;
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
if (score >= 4) return 'high';
|
|
455
|
+
if (score >= 2) return 'medium';
|
|
456
|
+
return 'low';
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
/**
|
|
460
|
+
* Print detection results in a formatted way.
|
|
461
|
+
*/
|
|
462
|
+
export function printDetectedConfig(config: DetectedConfig, url?: string): void {
|
|
463
|
+
console.log('');
|
|
464
|
+
console.log('[TEM Auto-Detect] Results' + (url ? ` for ${url}` : ''));
|
|
465
|
+
console.log('');
|
|
466
|
+
console.log('Detected Limits:');
|
|
467
|
+
console.log(` - Max Concurrency: ${config.concurrency} requests (80% of detected)`);
|
|
468
|
+
console.log(` - Rate Limit: ${config.rateLimit.requests} requests per ${config.rateLimit.windowMs / 1000} seconds`);
|
|
469
|
+
console.log('');
|
|
470
|
+
console.log('Recommended Configuration:');
|
|
471
|
+
console.log(' {');
|
|
472
|
+
console.log(` concurrency: ${config.concurrency},`);
|
|
473
|
+
console.log(' rateLimit: {');
|
|
474
|
+
console.log(` requests: ${config.rateLimit.requests},`);
|
|
475
|
+
console.log(` windowMs: ${config.rateLimit.windowMs}`);
|
|
476
|
+
console.log(' }');
|
|
477
|
+
console.log(' }');
|
|
478
|
+
console.log('');
|
|
479
|
+
console.log(`Confidence: ${config.confidence}`);
|
|
480
|
+
if (config.notes.length > 0) {
|
|
481
|
+
console.log('Notes:');
|
|
482
|
+
for (const note of config.notes) {
|
|
483
|
+
console.log(` - ${note}`);
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
console.log('');
|
|
487
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { TEM } from '../core/tem.js';
|
|
2
|
+
import type { BatchStats } from '../interfaces/index.js';
|
|
3
|
+
|
|
4
|
+
export interface WaitForBatchOptions {
|
|
5
|
+
timeoutMs?: number;
|
|
6
|
+
intervalMs?: number;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Waits for a batch to complete by polling its statistics.
|
|
11
|
+
* Logs progress at each interval showing completion percentage and task counts.
|
|
12
|
+
*
|
|
13
|
+
* @param tem - TEM instance
|
|
14
|
+
* @param batchId - ID of the batch to monitor
|
|
15
|
+
* @param options - Optional configuration
|
|
16
|
+
* @param options.timeoutMs - Maximum time to wait in milliseconds (default: 30000)
|
|
17
|
+
* @param options.intervalMs - Polling interval in milliseconds (default: 1000)
|
|
18
|
+
* @returns Promise that resolves when batch is complete
|
|
19
|
+
* @throws Error if timeout is reached before completion
|
|
20
|
+
*/
|
|
21
|
+
export async function waitForBatch(
|
|
22
|
+
tem: TEM,
|
|
23
|
+
batchId: string,
|
|
24
|
+
options: WaitForBatchOptions = {}
|
|
25
|
+
): Promise<void> {
|
|
26
|
+
const { timeoutMs = 30000, intervalMs = 1000 } = options;
|
|
27
|
+
const start = Date.now();
|
|
28
|
+
|
|
29
|
+
while (Date.now() - start < timeoutMs) {
|
|
30
|
+
const stats: BatchStats = await tem.batch.getStats(batchId);
|
|
31
|
+
|
|
32
|
+
const total = stats.total;
|
|
33
|
+
const completed = stats.completed;
|
|
34
|
+
const failed = stats.failed;
|
|
35
|
+
const pending = stats.pending;
|
|
36
|
+
const running = stats.running;
|
|
37
|
+
|
|
38
|
+
const percent = total > 0 ? Math.round((completed / total) * 100) : 0;
|
|
39
|
+
|
|
40
|
+
console.log(
|
|
41
|
+
`[BatchMonitor] Batch ${batchId}: ${percent}% complete (${completed}/${total}) - completed:${completed} failed:${failed} pending:${pending} running:${running}`
|
|
42
|
+
);
|
|
43
|
+
|
|
44
|
+
if (pending === 0 && running === 0) {
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
await Bun.sleep(intervalMs);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
throw new Error('Batch completion timeout');
|
|
52
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Simple semaphore implementation to control max concurrent tasks.
|
|
3
|
+
*/
|
|
4
|
+
export class ConcurrencyController {
|
|
5
|
+
private available: number;
|
|
6
|
+
private queue: Array<() => void> = [];
|
|
7
|
+
|
|
8
|
+
constructor(private max: number) {
|
|
9
|
+
this.available = max;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Acquire a slot. Returns a promise that resolves when a slot is available.
|
|
14
|
+
*/
|
|
15
|
+
acquire(): Promise<void> {
|
|
16
|
+
if (this.available > 0) {
|
|
17
|
+
this.available--;
|
|
18
|
+
return Promise.resolve();
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
return new Promise((resolve) => {
|
|
22
|
+
this.queue.push(resolve);
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Release a slot, allowing the next waiting acquirer to proceed.
|
|
28
|
+
*/
|
|
29
|
+
release(): void {
|
|
30
|
+
const next = this.queue.shift();
|
|
31
|
+
if (next) {
|
|
32
|
+
next();
|
|
33
|
+
} else {
|
|
34
|
+
this.available++;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Get the number of currently running (acquired) slots.
|
|
40
|
+
*/
|
|
41
|
+
getRunning(): number {
|
|
42
|
+
return this.max - this.available - this.queue.length;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export { ConcurrencyController } from './concurrency.js';
|
|
2
|
+
export { RateLimiter, type RateLimitConfig } from './rate-limiter.js';
|
|
3
|
+
export { waitForBatch, type WaitForBatchOptions } from './batch-monitor.js';
|
|
4
|
+
export {
|
|
5
|
+
detectConstraints,
|
|
6
|
+
printDetectedConfig,
|
|
7
|
+
type DetectOptions,
|
|
8
|
+
type DetectedConfig,
|
|
9
|
+
} from './auto-detect.js';
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
export interface RateLimitConfig {
|
|
2
|
+
requests: number;
|
|
3
|
+
windowMs: number;
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Token bucket implementation for rate limiting.
|
|
8
|
+
*/
|
|
9
|
+
export class RateLimiter {
|
|
10
|
+
private tokens: number;
|
|
11
|
+
private lastRefill: number;
|
|
12
|
+
|
|
13
|
+
constructor(private config: RateLimitConfig) {
|
|
14
|
+
this.tokens = config.requests;
|
|
15
|
+
this.lastRefill = Date.now();
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Acquire a token. Returns a promise that resolves when a token is available.
|
|
20
|
+
* Uses Bun.sleep for async delay if tokens need to be refilled.
|
|
21
|
+
*/
|
|
22
|
+
async acquire(): Promise<void> {
|
|
23
|
+
const now = Date.now();
|
|
24
|
+
this.refill(now);
|
|
25
|
+
|
|
26
|
+
if (this.tokens >= 1) {
|
|
27
|
+
this.tokens--;
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Calculate wait time for next token
|
|
32
|
+
const tokensNeeded = 1 - this.tokens;
|
|
33
|
+
const msPerToken = this.config.windowMs / this.config.requests;
|
|
34
|
+
const waitMs = Math.ceil(tokensNeeded * msPerToken);
|
|
35
|
+
|
|
36
|
+
await Bun.sleep(waitMs);
|
|
37
|
+
|
|
38
|
+
// After waiting, recurse to try again (will refill and get token)
|
|
39
|
+
return this.acquire();
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Refill tokens based on elapsed time since last check.
|
|
44
|
+
*/
|
|
45
|
+
private refill(now: number): void {
|
|
46
|
+
const elapsedMs = now - this.lastRefill;
|
|
47
|
+
const tokensToAdd = (elapsedMs / this.config.windowMs) * this.config.requests;
|
|
48
|
+
|
|
49
|
+
if (tokensToAdd > 0) {
|
|
50
|
+
this.tokens = Math.min(this.config.requests, this.tokens + tokensToAdd);
|
|
51
|
+
this.lastRefill = now;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|