@logtape/logtape 1.2.0-dev.343 → 1.2.0-dev.344
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/deno.json +1 -1
- package/dist/sink.cjs +56 -11
- package/dist/sink.d.cts +47 -1
- package/dist/sink.d.cts.map +1 -1
- package/dist/sink.d.ts +47 -1
- package/dist/sink.d.ts.map +1 -1
- package/dist/sink.js +56 -11
- package/dist/sink.js.map +1 -1
- package/package.json +1 -1
- package/src/sink.test.ts +381 -0
- package/src/sink.ts +167 -13
package/deno.json
CHANGED
package/dist/sink.cjs
CHANGED
|
@@ -257,6 +257,11 @@ function fingersCrossed(sink, options = {}) {
|
|
|
257
257
|
const maxBufferSize = Math.max(0, options.maxBufferSize ?? 1e3);
|
|
258
258
|
const isolateByCategory = options.isolateByCategory;
|
|
259
259
|
const isolateByContext = options.isolateByContext;
|
|
260
|
+
const bufferTtlMs = isolateByContext?.bufferTtlMs;
|
|
261
|
+
const cleanupIntervalMs = isolateByContext?.cleanupIntervalMs ?? 3e4;
|
|
262
|
+
const maxContexts = isolateByContext?.maxContexts;
|
|
263
|
+
const hasTtl = bufferTtlMs != null && bufferTtlMs > 0;
|
|
264
|
+
const hasLru = maxContexts != null && maxContexts > 0;
|
|
260
265
|
try {
|
|
261
266
|
require_level.compareLogLevel("trace", triggerLevel);
|
|
262
267
|
} catch (error) {
|
|
@@ -320,6 +325,27 @@ function fingersCrossed(sink, options = {}) {
|
|
|
320
325
|
context: contextPart
|
|
321
326
|
};
|
|
322
327
|
}
|
|
328
|
+
function cleanupExpiredBuffers(buffers) {
|
|
329
|
+
if (!hasTtl) return;
|
|
330
|
+
const now = Date.now();
|
|
331
|
+
const expiredKeys = [];
|
|
332
|
+
for (const [key, metadata] of buffers) {
|
|
333
|
+
if (metadata.buffer.length === 0) continue;
|
|
334
|
+
const lastRecordTimestamp = metadata.buffer[metadata.buffer.length - 1].timestamp;
|
|
335
|
+
if (now - lastRecordTimestamp > bufferTtlMs) expiredKeys.push(key);
|
|
336
|
+
}
|
|
337
|
+
for (const key of expiredKeys) buffers.delete(key);
|
|
338
|
+
}
|
|
339
|
+
function evictLruBuffers(buffers, numToEvict) {
|
|
340
|
+
if (!hasLru) return;
|
|
341
|
+
const toEvict = numToEvict ?? Math.max(0, buffers.size - maxContexts);
|
|
342
|
+
if (toEvict <= 0) return;
|
|
343
|
+
const sortedEntries = Array.from(buffers.entries()).sort(([, a], [, b]) => a.lastAccess - b.lastAccess);
|
|
344
|
+
for (let i = 0; i < toEvict; i++) {
|
|
345
|
+
const [key] = sortedEntries[i];
|
|
346
|
+
buffers.delete(key);
|
|
347
|
+
}
|
|
348
|
+
}
|
|
323
349
|
if (!isolateByCategory && !isolateByContext) {
|
|
324
350
|
const buffer = [];
|
|
325
351
|
let triggered = false;
|
|
@@ -341,7 +367,11 @@ function fingersCrossed(sink, options = {}) {
|
|
|
341
367
|
} else {
|
|
342
368
|
const buffers = /* @__PURE__ */ new Map();
|
|
343
369
|
const triggered = /* @__PURE__ */ new Set();
|
|
344
|
-
|
|
370
|
+
let cleanupTimer = null;
|
|
371
|
+
if (hasTtl) cleanupTimer = setInterval(() => {
|
|
372
|
+
cleanupExpiredBuffers(buffers);
|
|
373
|
+
}, cleanupIntervalMs);
|
|
374
|
+
const fingersCrossedSink = (record) => {
|
|
345
375
|
const bufferKey = getBufferKey(record.category, record.properties);
|
|
346
376
|
if (triggered.has(bufferKey)) {
|
|
347
377
|
sink(record);
|
|
@@ -365,9 +395,9 @@ function fingersCrossed(sink, options = {}) {
|
|
|
365
395
|
}
|
|
366
396
|
const allRecordsToFlush = [];
|
|
367
397
|
for (const key of keysToFlush) {
|
|
368
|
-
const
|
|
369
|
-
if (
|
|
370
|
-
allRecordsToFlush.push(...buffer);
|
|
398
|
+
const metadata = buffers.get(key);
|
|
399
|
+
if (metadata) {
|
|
400
|
+
allRecordsToFlush.push(...metadata.buffer);
|
|
371
401
|
buffers.delete(key);
|
|
372
402
|
triggered.add(key);
|
|
373
403
|
}
|
|
@@ -377,15 +407,30 @@ function fingersCrossed(sink, options = {}) {
|
|
|
377
407
|
triggered.add(bufferKey);
|
|
378
408
|
sink(record);
|
|
379
409
|
} else {
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
buffers.
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
410
|
+
const now = Date.now();
|
|
411
|
+
let metadata = buffers.get(bufferKey);
|
|
412
|
+
if (!metadata) {
|
|
413
|
+
if (hasLru && buffers.size >= maxContexts) {
|
|
414
|
+
const numToEvict = buffers.size - maxContexts + 1;
|
|
415
|
+
evictLruBuffers(buffers, numToEvict);
|
|
416
|
+
}
|
|
417
|
+
metadata = {
|
|
418
|
+
buffer: [],
|
|
419
|
+
lastAccess: now
|
|
420
|
+
};
|
|
421
|
+
buffers.set(bufferKey, metadata);
|
|
422
|
+
} else metadata.lastAccess = now;
|
|
423
|
+
metadata.buffer.push(record);
|
|
424
|
+
while (metadata.buffer.length > maxBufferSize) metadata.buffer.shift();
|
|
425
|
+
}
|
|
426
|
+
};
|
|
427
|
+
if (cleanupTimer !== null) fingersCrossedSink[Symbol.dispose] = () => {
|
|
428
|
+
if (cleanupTimer !== null) {
|
|
429
|
+
clearInterval(cleanupTimer);
|
|
430
|
+
cleanupTimer = null;
|
|
387
431
|
}
|
|
388
432
|
};
|
|
433
|
+
return fingersCrossedSink;
|
|
389
434
|
}
|
|
390
435
|
}
|
|
391
436
|
|
package/dist/sink.d.cts
CHANGED
|
@@ -265,6 +265,17 @@ interface FingersCrossedOptions {
|
|
|
265
265
|
* })
|
|
266
266
|
* ```
|
|
267
267
|
*
|
|
268
|
+
* @example With TTL-based buffer cleanup
|
|
269
|
+
* ```typescript
|
|
270
|
+
* fingersCrossed(sink, {
|
|
271
|
+
* isolateByContext: {
|
|
272
|
+
* keys: ['requestId'],
|
|
273
|
+
* bufferTtlMs: 30000, // 30 seconds
|
|
274
|
+
* cleanupIntervalMs: 10000 // cleanup every 10 seconds
|
|
275
|
+
* }
|
|
276
|
+
* })
|
|
277
|
+
* ```
|
|
278
|
+
*
|
|
268
279
|
* @default `undefined` (no context isolation)
|
|
269
280
|
* @since 1.2.0
|
|
270
281
|
*/
|
|
@@ -274,6 +285,41 @@ interface FingersCrossedOptions {
|
|
|
274
285
|
* Buffers will be separate for different combinations of these context values.
|
|
275
286
|
*/
|
|
276
287
|
readonly keys: readonly string[];
|
|
288
|
+
/**
|
|
289
|
+
* Maximum number of context buffers to maintain simultaneously.
|
|
290
|
+
* When this limit is exceeded, the least recently used (LRU) buffers
|
|
291
|
+
* will be evicted to make room for new ones.
|
|
292
|
+
*
|
|
293
|
+
* This provides memory protection in high-concurrency scenarios where
|
|
294
|
+
* many different context values might be active simultaneously.
|
|
295
|
+
*
|
|
296
|
+
* When set to 0 or undefined, no limit is enforced.
|
|
297
|
+
*
|
|
298
|
+
* @default `undefined` (no limit)
|
|
299
|
+
* @since 1.2.0
|
|
300
|
+
*/
|
|
301
|
+
readonly maxContexts?: number;
|
|
302
|
+
/**
|
|
303
|
+
* Time-to-live for context buffers in milliseconds.
|
|
304
|
+
* Buffers that haven't been accessed for this duration will be automatically
|
|
305
|
+
* cleaned up to prevent memory leaks in long-running applications.
|
|
306
|
+
*
|
|
307
|
+
* When set to 0 or undefined, buffers will never expire based on time.
|
|
308
|
+
*
|
|
309
|
+
* @default `undefined` (no TTL)
|
|
310
|
+
* @since 1.2.0
|
|
311
|
+
*/
|
|
312
|
+
readonly bufferTtlMs?: number;
|
|
313
|
+
/**
|
|
314
|
+
* Interval in milliseconds for running cleanup operations.
|
|
315
|
+
* The cleanup process removes expired buffers based on {@link bufferTtlMs}.
|
|
316
|
+
*
|
|
317
|
+
* This option is ignored if {@link bufferTtlMs} is not set.
|
|
318
|
+
*
|
|
319
|
+
* @default `30000` (30 seconds)
|
|
320
|
+
* @since 1.2.0
|
|
321
|
+
*/
|
|
322
|
+
readonly cleanupIntervalMs?: number;
|
|
277
323
|
};
|
|
278
324
|
}
|
|
279
325
|
/**
|
|
@@ -311,7 +357,7 @@ interface FingersCrossedOptions {
|
|
|
311
357
|
* @returns A sink that buffers records until the trigger level is reached.
|
|
312
358
|
* @since 1.1.0
|
|
313
359
|
*/
|
|
314
|
-
declare function fingersCrossed(sink: Sink, options?: FingersCrossedOptions): Sink;
|
|
360
|
+
declare function fingersCrossed(sink: Sink, options?: FingersCrossedOptions): Sink | (Sink & Disposable);
|
|
315
361
|
//#endregion
|
|
316
362
|
export { AsyncSink, ConsoleSinkOptions, FingersCrossedOptions, Sink, StreamSinkOptions, fingersCrossed, fromAsyncSink, getConsoleSink, getStreamSink, withFilter };
|
|
317
363
|
//# sourceMappingURL=sink.d.cts.map
|
package/dist/sink.d.cts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.cts","names":[],"sources":["../src/sink.ts"],"sourcesContent":[],"mappings":";;;;;;;;;AAmBA;AAWA;;;;AAAsD;AAgBtD;AAA0B,KA3Bd,IAAA,GA2Bc,CAAA,MAAA,EA3BE,SA2BF,EAAA,GAAA,IAAA;;;;AAAsC;AAUhE;;;;AAS8C;AA+D9B,KAlGJ,SAAA,GAkGiB,CAAA,MAAA,EAlGI,SAkGJ,EAAA,GAlGkB,OAkGlB,CAAA,IAAA,CAAA;;;;;;AAGJ;AAgGxB;AAOD;;;;;;;AA2BY,iBAvNI,UAAA,CAuNJ,IAAA,EAvNqB,IAuNrB,EAAA,MAAA,EAvNmC,UAuNnC,CAAA,EAvNgD,IAuNhD;AAAO;AA8CnB;;AACW,UA5PM,iBAAA,CA4PN;EAAuB;;;EACN,SAAA,CAAA,EAzPd,aAyPc;EA4HZ;;;EAAkC,OAAG,CAAA,EAAA;IAAO,MAAA,CAAA,IAAA,EAAA,MAAA,CAAA,EAhXxB,UAgXwB;EAAe,CAAA;EAmB1D;
|
|
1
|
+
{"version":3,"file":"sink.d.cts","names":[],"sources":["../src/sink.ts"],"sourcesContent":[],"mappings":";;;;;;;;;AAmBA;AAWA;;;;AAAsD;AAgBtD;AAA0B,KA3Bd,IAAA,GA2Bc,CAAA,MAAA,EA3BE,SA2BF,EAAA,GAAA,IAAA;;;;AAAsC;AAUhE;;;;AAS8C;AA+D9B,KAlGJ,SAAA,GAkGiB,CAAA,MAAA,EAlGI,SAkGJ,EAAA,GAlGkB,OAkGlB,CAAA,IAAA,CAAA;;;;;;AAGJ;AAgGxB;AAOD;;;;;;;AA2BY,iBAvNI,UAAA,CAuNJ,IAAA,EAvNqB,IAuNrB,EAAA,MAAA,EAvNmC,UAuNnC,CAAA,EAvNgD,IAuNhD;AAAO;AA8CnB;;AACW,UA5PM,iBAAA,CA4PN;EAAuB;;;EACN,SAAA,CAAA,EAzPd,aAyPc;EA4HZ;;;EAAkC,OAAG,CAAA,EAAA;IAAO,MAAA,CAAA,IAAA,EAAA,MAAA,CAAA,EAhXxB,UAgXwB;EAAe,CAAA;EAmB1D;AAiLjB;;;;;;;AAG4B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAxfZ,aAAA,SACN,0BACC,oBACR,OAAO;KAkGL,aAAA;;;;UAKY,kBAAA;;;;;cAKH,mBAAmB;;;;;;;;;;;;;;;;aAiBpB,OAAO,UAAU;;;;YAKlB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA8CI,cAAA,WACL,qBACR,QAAQ,OAAO;;;;;;;;;;;;;;;;;;;;;iBA4HF,aAAA,YAAyB,YAAY,OAAO;;;;;UAmB3C,qBAAA;;;;;;;0BAOS;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA0KV,cAAA,OACR,gBACG,wBACR,QAAQ,OAAO"}
|
package/dist/sink.d.ts
CHANGED
|
@@ -265,6 +265,17 @@ interface FingersCrossedOptions {
|
|
|
265
265
|
* })
|
|
266
266
|
* ```
|
|
267
267
|
*
|
|
268
|
+
* @example With TTL-based buffer cleanup
|
|
269
|
+
* ```typescript
|
|
270
|
+
* fingersCrossed(sink, {
|
|
271
|
+
* isolateByContext: {
|
|
272
|
+
* keys: ['requestId'],
|
|
273
|
+
* bufferTtlMs: 30000, // 30 seconds
|
|
274
|
+
* cleanupIntervalMs: 10000 // cleanup every 10 seconds
|
|
275
|
+
* }
|
|
276
|
+
* })
|
|
277
|
+
* ```
|
|
278
|
+
*
|
|
268
279
|
* @default `undefined` (no context isolation)
|
|
269
280
|
* @since 1.2.0
|
|
270
281
|
*/
|
|
@@ -274,6 +285,41 @@ interface FingersCrossedOptions {
|
|
|
274
285
|
* Buffers will be separate for different combinations of these context values.
|
|
275
286
|
*/
|
|
276
287
|
readonly keys: readonly string[];
|
|
288
|
+
/**
|
|
289
|
+
* Maximum number of context buffers to maintain simultaneously.
|
|
290
|
+
* When this limit is exceeded, the least recently used (LRU) buffers
|
|
291
|
+
* will be evicted to make room for new ones.
|
|
292
|
+
*
|
|
293
|
+
* This provides memory protection in high-concurrency scenarios where
|
|
294
|
+
* many different context values might be active simultaneously.
|
|
295
|
+
*
|
|
296
|
+
* When set to 0 or undefined, no limit is enforced.
|
|
297
|
+
*
|
|
298
|
+
* @default `undefined` (no limit)
|
|
299
|
+
* @since 1.2.0
|
|
300
|
+
*/
|
|
301
|
+
readonly maxContexts?: number;
|
|
302
|
+
/**
|
|
303
|
+
* Time-to-live for context buffers in milliseconds.
|
|
304
|
+
* Buffers that haven't been accessed for this duration will be automatically
|
|
305
|
+
* cleaned up to prevent memory leaks in long-running applications.
|
|
306
|
+
*
|
|
307
|
+
* When set to 0 or undefined, buffers will never expire based on time.
|
|
308
|
+
*
|
|
309
|
+
* @default `undefined` (no TTL)
|
|
310
|
+
* @since 1.2.0
|
|
311
|
+
*/
|
|
312
|
+
readonly bufferTtlMs?: number;
|
|
313
|
+
/**
|
|
314
|
+
* Interval in milliseconds for running cleanup operations.
|
|
315
|
+
* The cleanup process removes expired buffers based on {@link bufferTtlMs}.
|
|
316
|
+
*
|
|
317
|
+
* This option is ignored if {@link bufferTtlMs} is not set.
|
|
318
|
+
*
|
|
319
|
+
* @default `30000` (30 seconds)
|
|
320
|
+
* @since 1.2.0
|
|
321
|
+
*/
|
|
322
|
+
readonly cleanupIntervalMs?: number;
|
|
277
323
|
};
|
|
278
324
|
}
|
|
279
325
|
/**
|
|
@@ -311,7 +357,7 @@ interface FingersCrossedOptions {
|
|
|
311
357
|
* @returns A sink that buffers records until the trigger level is reached.
|
|
312
358
|
* @since 1.1.0
|
|
313
359
|
*/
|
|
314
|
-
declare function fingersCrossed(sink: Sink, options?: FingersCrossedOptions): Sink;
|
|
360
|
+
declare function fingersCrossed(sink: Sink, options?: FingersCrossedOptions): Sink | (Sink & Disposable);
|
|
315
361
|
//#endregion
|
|
316
362
|
export { AsyncSink, ConsoleSinkOptions, FingersCrossedOptions, Sink, StreamSinkOptions, fingersCrossed, fromAsyncSink, getConsoleSink, getStreamSink, withFilter };
|
|
317
363
|
//# sourceMappingURL=sink.d.ts.map
|
package/dist/sink.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.d.ts","names":[],"sources":["../src/sink.ts"],"sourcesContent":[],"mappings":";;;;;;;;;AAmBA;AAWA;;;;AAAsD;AAgBtD;AAA0B,KA3Bd,IAAA,GA2Bc,CAAA,MAAA,EA3BE,SA2BF,EAAA,GAAA,IAAA;;;;AAAsC;AAUhE;;;;AAS8C;AA+D9B,KAlGJ,SAAA,GAkGiB,CAAA,MAAA,EAlGI,SAkGJ,EAAA,GAlGkB,OAkGlB,CAAA,IAAA,CAAA;;;;;;AAGJ;AAgGxB;AAOD;;;;;;;AA2BY,iBAvNI,UAAA,CAuNJ,IAAA,EAvNqB,IAuNrB,EAAA,MAAA,EAvNmC,UAuNnC,CAAA,EAvNgD,IAuNhD;AAAO;AA8CnB;;AACW,UA5PM,iBAAA,CA4PN;EAAuB;;;EACN,SAAA,CAAA,EAzPd,aAyPc;EA4HZ;;;EAAkC,OAAG,CAAA,EAAA;IAAO,MAAA,CAAA,IAAA,EAAA,MAAA,CAAA,EAhXxB,UAgXwB;EAAe,CAAA;EAmB1D;
|
|
1
|
+
{"version":3,"file":"sink.d.ts","names":[],"sources":["../src/sink.ts"],"sourcesContent":[],"mappings":";;;;;;;;;AAmBA;AAWA;;;;AAAsD;AAgBtD;AAA0B,KA3Bd,IAAA,GA2Bc,CAAA,MAAA,EA3BE,SA2BF,EAAA,GAAA,IAAA;;;;AAAsC;AAUhE;;;;AAS8C;AA+D9B,KAlGJ,SAAA,GAkGiB,CAAA,MAAA,EAlGI,SAkGJ,EAAA,GAlGkB,OAkGlB,CAAA,IAAA,CAAA;;;;;;AAGJ;AAgGxB;AAOD;;;;;;;AA2BY,iBAvNI,UAAA,CAuNJ,IAAA,EAvNqB,IAuNrB,EAAA,MAAA,EAvNmC,UAuNnC,CAAA,EAvNgD,IAuNhD;AAAO;AA8CnB;;AACW,UA5PM,iBAAA,CA4PN;EAAuB;;;EACN,SAAA,CAAA,EAzPd,aAyPc;EA4HZ;;;EAAkC,OAAG,CAAA,EAAA;IAAO,MAAA,CAAA,IAAA,EAAA,MAAA,CAAA,EAhXxB,UAgXwB;EAAe,CAAA;EAmB1D;AAiLjB;;;;;;;AAG4B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAxfZ,aAAA,SACN,0BACC,oBACR,OAAO;KAkGL,aAAA;;;;UAKY,kBAAA;;;;;cAKH,mBAAmB;;;;;;;;;;;;;;;;aAiBpB,OAAO,UAAU;;;;YAKlB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA8CI,cAAA,WACL,qBACR,QAAQ,OAAO;;;;;;;;;;;;;;;;;;;;;iBA4HF,aAAA,YAAyB,YAAY,OAAO;;;;;UAmB3C,qBAAA;;;;;;;0BAOS;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA0KV,cAAA,OACR,gBACG,wBACR,QAAQ,OAAO"}
|
package/dist/sink.js
CHANGED
|
@@ -257,6 +257,11 @@ function fingersCrossed(sink, options = {}) {
|
|
|
257
257
|
const maxBufferSize = Math.max(0, options.maxBufferSize ?? 1e3);
|
|
258
258
|
const isolateByCategory = options.isolateByCategory;
|
|
259
259
|
const isolateByContext = options.isolateByContext;
|
|
260
|
+
const bufferTtlMs = isolateByContext?.bufferTtlMs;
|
|
261
|
+
const cleanupIntervalMs = isolateByContext?.cleanupIntervalMs ?? 3e4;
|
|
262
|
+
const maxContexts = isolateByContext?.maxContexts;
|
|
263
|
+
const hasTtl = bufferTtlMs != null && bufferTtlMs > 0;
|
|
264
|
+
const hasLru = maxContexts != null && maxContexts > 0;
|
|
260
265
|
try {
|
|
261
266
|
compareLogLevel("trace", triggerLevel);
|
|
262
267
|
} catch (error) {
|
|
@@ -320,6 +325,27 @@ function fingersCrossed(sink, options = {}) {
|
|
|
320
325
|
context: contextPart
|
|
321
326
|
};
|
|
322
327
|
}
|
|
328
|
+
function cleanupExpiredBuffers(buffers) {
|
|
329
|
+
if (!hasTtl) return;
|
|
330
|
+
const now = Date.now();
|
|
331
|
+
const expiredKeys = [];
|
|
332
|
+
for (const [key, metadata] of buffers) {
|
|
333
|
+
if (metadata.buffer.length === 0) continue;
|
|
334
|
+
const lastRecordTimestamp = metadata.buffer[metadata.buffer.length - 1].timestamp;
|
|
335
|
+
if (now - lastRecordTimestamp > bufferTtlMs) expiredKeys.push(key);
|
|
336
|
+
}
|
|
337
|
+
for (const key of expiredKeys) buffers.delete(key);
|
|
338
|
+
}
|
|
339
|
+
function evictLruBuffers(buffers, numToEvict) {
|
|
340
|
+
if (!hasLru) return;
|
|
341
|
+
const toEvict = numToEvict ?? Math.max(0, buffers.size - maxContexts);
|
|
342
|
+
if (toEvict <= 0) return;
|
|
343
|
+
const sortedEntries = Array.from(buffers.entries()).sort(([, a], [, b]) => a.lastAccess - b.lastAccess);
|
|
344
|
+
for (let i = 0; i < toEvict; i++) {
|
|
345
|
+
const [key] = sortedEntries[i];
|
|
346
|
+
buffers.delete(key);
|
|
347
|
+
}
|
|
348
|
+
}
|
|
323
349
|
if (!isolateByCategory && !isolateByContext) {
|
|
324
350
|
const buffer = [];
|
|
325
351
|
let triggered = false;
|
|
@@ -341,7 +367,11 @@ function fingersCrossed(sink, options = {}) {
|
|
|
341
367
|
} else {
|
|
342
368
|
const buffers = /* @__PURE__ */ new Map();
|
|
343
369
|
const triggered = /* @__PURE__ */ new Set();
|
|
344
|
-
|
|
370
|
+
let cleanupTimer = null;
|
|
371
|
+
if (hasTtl) cleanupTimer = setInterval(() => {
|
|
372
|
+
cleanupExpiredBuffers(buffers);
|
|
373
|
+
}, cleanupIntervalMs);
|
|
374
|
+
const fingersCrossedSink = (record) => {
|
|
345
375
|
const bufferKey = getBufferKey(record.category, record.properties);
|
|
346
376
|
if (triggered.has(bufferKey)) {
|
|
347
377
|
sink(record);
|
|
@@ -365,9 +395,9 @@ function fingersCrossed(sink, options = {}) {
|
|
|
365
395
|
}
|
|
366
396
|
const allRecordsToFlush = [];
|
|
367
397
|
for (const key of keysToFlush) {
|
|
368
|
-
const
|
|
369
|
-
if (
|
|
370
|
-
allRecordsToFlush.push(...buffer);
|
|
398
|
+
const metadata = buffers.get(key);
|
|
399
|
+
if (metadata) {
|
|
400
|
+
allRecordsToFlush.push(...metadata.buffer);
|
|
371
401
|
buffers.delete(key);
|
|
372
402
|
triggered.add(key);
|
|
373
403
|
}
|
|
@@ -377,15 +407,30 @@ function fingersCrossed(sink, options = {}) {
|
|
|
377
407
|
triggered.add(bufferKey);
|
|
378
408
|
sink(record);
|
|
379
409
|
} else {
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
buffers.
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
410
|
+
const now = Date.now();
|
|
411
|
+
let metadata = buffers.get(bufferKey);
|
|
412
|
+
if (!metadata) {
|
|
413
|
+
if (hasLru && buffers.size >= maxContexts) {
|
|
414
|
+
const numToEvict = buffers.size - maxContexts + 1;
|
|
415
|
+
evictLruBuffers(buffers, numToEvict);
|
|
416
|
+
}
|
|
417
|
+
metadata = {
|
|
418
|
+
buffer: [],
|
|
419
|
+
lastAccess: now
|
|
420
|
+
};
|
|
421
|
+
buffers.set(bufferKey, metadata);
|
|
422
|
+
} else metadata.lastAccess = now;
|
|
423
|
+
metadata.buffer.push(record);
|
|
424
|
+
while (metadata.buffer.length > maxBufferSize) metadata.buffer.shift();
|
|
425
|
+
}
|
|
426
|
+
};
|
|
427
|
+
if (cleanupTimer !== null) fingersCrossedSink[Symbol.dispose] = () => {
|
|
428
|
+
if (cleanupTimer !== null) {
|
|
429
|
+
clearInterval(cleanupTimer);
|
|
430
|
+
cleanupTimer = null;
|
|
387
431
|
}
|
|
388
432
|
};
|
|
433
|
+
return fingersCrossedSink;
|
|
389
434
|
}
|
|
390
435
|
}
|
|
391
436
|
|
package/dist/sink.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sink.js","names":["sink: Sink","filter: FilterLike","record: LogRecord","stream: WritableStream","options: StreamSinkOptions","sink: Sink & AsyncDisposable","buffer: LogRecord[]","flushTimer: ReturnType<typeof setInterval> | null","activeFlush: Promise<void> | null","nonBlockingSink: Sink & AsyncDisposable","options: ConsoleSinkOptions","levelMap: Record<LogLevel, ConsoleMethod>","nonBlockingSink: Sink & Disposable","asyncSink: AsyncSink","options: FingersCrossedOptions","parent: readonly string[]","child: readonly string[]","shouldFlushBuffer:\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean)\n | null","category: readonly string[]","key: string","properties: Record<string, unknown>","contextValues: Record<string, unknown>","allRecordsToFlush: LogRecord[]"],"sources":["../src/sink.ts"],"sourcesContent":["import { type FilterLike, toFilter } from \"./filter.ts\";\nimport {\n type ConsoleFormatter,\n defaultConsoleFormatter,\n defaultTextFormatter,\n type TextFormatter,\n} from \"./formatter.ts\";\nimport { compareLogLevel, type LogLevel } from \"./level.ts\";\nimport type { LogRecord } from \"./record.ts\";\n\n/**\n * A sink is a function that accepts a log record and prints it somewhere.\n * Thrown exceptions will be suppressed and then logged to the meta logger,\n * a {@link Logger} with the category `[\"logtape\", \"meta\"]`. (In that case,\n * the meta log record will not be passed to the sink to avoid infinite\n * recursion.)\n *\n * @param record The log record to sink.\n */\nexport type Sink = (record: LogRecord) => void;\n\n/**\n * An async sink is a function that accepts a log record and asynchronously\n * processes it. This type is used with {@link fromAsyncSink} to create\n * a regular sink that properly handles asynchronous operations.\n *\n * @param record The log record to process asynchronously.\n * @returns A promise that resolves when the record has been processed.\n * @since 1.0.0\n */\nexport type AsyncSink = (record: LogRecord) => Promise<void>;\n\n/**\n * Turns a sink into a filtered sink. The returned sink only logs records that\n * pass the filter.\n *\n * @example Filter a console sink to only log records with the info level\n * ```typescript\n * const sink = withFilter(getConsoleSink(), \"info\");\n * ```\n *\n * @param sink A sink to be filtered.\n * @param filter A filter to apply to the sink. It can be either a filter\n * function or a {@link LogLevel} string.\n * @returns A sink that only logs records that pass the filter.\n */\nexport function withFilter(sink: Sink, filter: FilterLike): Sink {\n const filterFunc = toFilter(filter);\n return (record: LogRecord) => {\n if (filterFunc(record)) sink(record);\n };\n}\n\n/**\n * Options for the {@link getStreamSink} function.\n */\nexport interface StreamSinkOptions {\n /**\n * The text formatter to use. Defaults to {@link defaultTextFormatter}.\n */\n formatter?: TextFormatter;\n\n /**\n * The text encoder to use. Defaults to an instance of {@link TextEncoder}.\n */\n encoder?: { encode(text: string): Uint8Array };\n\n /**\n * Enable non-blocking mode with optional buffer configuration.\n * When enabled, log records are buffered and flushed in the background.\n *\n * @example Simple non-blocking mode\n * ```typescript\n * getStreamSink(stream, { nonBlocking: true });\n * ```\n *\n * @example Custom buffer configuration\n * ```typescript\n * getStreamSink(stream, {\n * nonBlocking: {\n * bufferSize: 1000,\n * flushInterval: 50\n * }\n * });\n * ```\n *\n * @default `false`\n * @since 1.0.0\n */\n nonBlocking?: boolean | {\n /**\n * Maximum number of records to buffer before flushing.\n * @default `100`\n */\n bufferSize?: number;\n\n /**\n * Interval in milliseconds between automatic flushes.\n * @default `100`\n */\n flushInterval?: number;\n };\n}\n\n/**\n * A factory that returns a sink that writes to a {@link WritableStream}.\n *\n * Note that the `stream` is of Web Streams API, which is different from\n * Node.js streams. You can convert a Node.js stream to a Web Streams API\n * stream using [`stream.Writable.toWeb()`] method.\n *\n * [`stream.Writable.toWeb()`]: https://nodejs.org/api/stream.html#streamwritabletowebstreamwritable\n *\n * @example Sink to the standard error in Deno\n * ```typescript\n * const stderrSink = getStreamSink(Deno.stderr.writable);\n * ```\n *\n * @example Sink to the standard error in Node.js\n * ```typescript\n * import stream from \"node:stream\";\n * const stderrSink = getStreamSink(stream.Writable.toWeb(process.stderr));\n * ```\n *\n * @param stream The stream to write to.\n * @param options The options for the sink.\n * @returns A sink that writes to the stream.\n */\nexport function getStreamSink(\n stream: WritableStream,\n options: StreamSinkOptions = {},\n): Sink & AsyncDisposable {\n const formatter = options.formatter ?? defaultTextFormatter;\n const encoder = options.encoder ?? new TextEncoder();\n const writer = stream.getWriter();\n\n if (!options.nonBlocking) {\n let lastPromise = Promise.resolve();\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n const bytes = encoder.encode(formatter(record));\n lastPromise = lastPromise\n .then(() => writer.ready)\n .then(() => writer.write(bytes));\n };\n sink[Symbol.asyncDispose] = async () => {\n await lastPromise;\n await writer.close();\n };\n return sink;\n }\n\n // Non-blocking mode implementation\n const nonBlockingConfig = options.nonBlocking === true\n ? {}\n : options.nonBlocking;\n const bufferSize = nonBlockingConfig.bufferSize ?? 100;\n const flushInterval = nonBlockingConfig.flushInterval ?? 100;\n\n const buffer: LogRecord[] = [];\n let flushTimer: ReturnType<typeof setInterval> | null = null;\n let disposed = false;\n let activeFlush: Promise<void> | null = null;\n const maxBufferSize = bufferSize * 2; // Overflow protection\n\n async function flush(): Promise<void> {\n if (buffer.length === 0) return;\n\n const records = buffer.splice(0);\n for (const record of records) {\n try {\n const bytes = encoder.encode(formatter(record));\n await writer.ready;\n await writer.write(bytes);\n } catch {\n // Silently ignore errors in non-blocking mode to avoid disrupting the application\n }\n }\n }\n\n function scheduleFlush(): void {\n if (activeFlush) return;\n\n activeFlush = flush().finally(() => {\n activeFlush = null;\n });\n }\n\n function startFlushTimer(): void {\n if (flushTimer !== null || disposed) return;\n\n flushTimer = setInterval(() => {\n scheduleFlush();\n }, flushInterval);\n }\n\n const nonBlockingSink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Buffer overflow protection: drop oldest records if buffer is too large\n if (buffer.length >= maxBufferSize) {\n buffer.shift(); // Remove oldest record\n }\n\n buffer.push(record);\n\n if (buffer.length >= bufferSize) {\n scheduleFlush();\n } else if (flushTimer === null) {\n startFlushTimer();\n }\n };\n\n nonBlockingSink[Symbol.asyncDispose] = async () => {\n disposed = true;\n if (flushTimer !== null) {\n clearInterval(flushTimer);\n flushTimer = null;\n }\n await flush();\n try {\n await writer.close();\n } catch {\n // Writer might already be closed or errored\n }\n };\n\n return nonBlockingSink;\n}\n\ntype ConsoleMethod = \"debug\" | \"info\" | \"log\" | \"warn\" | \"error\";\n\n/**\n * Options for the {@link getConsoleSink} function.\n */\nexport interface ConsoleSinkOptions {\n /**\n * The console formatter or text formatter to use.\n * Defaults to {@link defaultConsoleFormatter}.\n */\n formatter?: ConsoleFormatter | TextFormatter;\n\n /**\n * The mapping from log levels to console methods. Defaults to:\n *\n * ```typescript\n * {\n * trace: \"trace\",\n * debug: \"debug\",\n * info: \"info\",\n * warning: \"warn\",\n * error: \"error\",\n * fatal: \"error\",\n * }\n * ```\n * @since 0.9.0\n */\n levelMap?: Record<LogLevel, ConsoleMethod>;\n\n /**\n * The console to log to. Defaults to {@link console}.\n */\n console?: Console;\n\n /**\n * Enable non-blocking mode with optional buffer configuration.\n * When enabled, log records are buffered and flushed in the background.\n *\n * @example Simple non-blocking mode\n * ```typescript\n * getConsoleSink({ nonBlocking: true });\n * ```\n *\n * @example Custom buffer configuration\n * ```typescript\n * getConsoleSink({\n * nonBlocking: {\n * bufferSize: 1000,\n * flushInterval: 50\n * }\n * });\n * ```\n *\n * @default `false`\n * @since 1.0.0\n */\n nonBlocking?: boolean | {\n /**\n * Maximum number of records to buffer before flushing.\n * @default `100`\n */\n bufferSize?: number;\n\n /**\n * Interval in milliseconds between automatic flushes.\n * @default `100`\n */\n flushInterval?: number;\n };\n}\n\n/**\n * A console sink factory that returns a sink that logs to the console.\n *\n * @param options The options for the sink.\n * @returns A sink that logs to the console. If `nonBlocking` is enabled,\n * returns a sink that also implements {@link Disposable}.\n */\nexport function getConsoleSink(\n options: ConsoleSinkOptions = {},\n): Sink | (Sink & Disposable) {\n const formatter = options.formatter ?? defaultConsoleFormatter;\n const levelMap: Record<LogLevel, ConsoleMethod> = {\n trace: \"debug\",\n debug: \"debug\",\n info: \"info\",\n warning: \"warn\",\n error: \"error\",\n fatal: \"error\",\n ...(options.levelMap ?? {}),\n };\n const console = options.console ?? globalThis.console;\n\n const baseSink = (record: LogRecord) => {\n const args = formatter(record);\n const method = levelMap[record.level];\n if (method === undefined) {\n throw new TypeError(`Invalid log level: ${record.level}.`);\n }\n if (typeof args === \"string\") {\n const msg = args.replace(/\\r?\\n$/, \"\");\n console[method](msg);\n } else {\n console[method](...args);\n }\n };\n\n if (!options.nonBlocking) {\n return baseSink;\n }\n\n // Non-blocking mode implementation\n const nonBlockingConfig = options.nonBlocking === true\n ? {}\n : options.nonBlocking;\n const bufferSize = nonBlockingConfig.bufferSize ?? 100;\n const flushInterval = nonBlockingConfig.flushInterval ?? 100;\n\n const buffer: LogRecord[] = [];\n let flushTimer: ReturnType<typeof setInterval> | null = null;\n let disposed = false;\n let flushScheduled = false;\n const maxBufferSize = bufferSize * 2; // Overflow protection\n\n function flush(): void {\n if (buffer.length === 0) return;\n\n const records = buffer.splice(0);\n for (const record of records) {\n try {\n baseSink(record);\n } catch {\n // Silently ignore errors in non-blocking mode to avoid disrupting the application\n }\n }\n }\n\n function scheduleFlush(): void {\n if (flushScheduled) return;\n\n flushScheduled = true;\n setTimeout(() => {\n flushScheduled = false;\n flush();\n }, 0);\n }\n\n function startFlushTimer(): void {\n if (flushTimer !== null || disposed) return;\n\n flushTimer = setInterval(() => {\n flush();\n }, flushInterval);\n }\n\n const nonBlockingSink: Sink & Disposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Buffer overflow protection: drop oldest records if buffer is too large\n if (buffer.length >= maxBufferSize) {\n buffer.shift(); // Remove oldest record\n }\n\n buffer.push(record);\n\n if (buffer.length >= bufferSize) {\n scheduleFlush();\n } else if (flushTimer === null) {\n startFlushTimer();\n }\n };\n\n nonBlockingSink[Symbol.dispose] = () => {\n disposed = true;\n if (flushTimer !== null) {\n clearInterval(flushTimer);\n flushTimer = null;\n }\n flush();\n };\n\n return nonBlockingSink;\n}\n\n/**\n * Converts an async sink into a regular sink with proper async handling.\n * The returned sink chains async operations to ensure proper ordering and\n * implements AsyncDisposable to wait for all pending operations on disposal.\n *\n * @example Create a sink that asynchronously posts to a webhook\n * ```typescript\n * const asyncSink: AsyncSink = async (record) => {\n * await fetch(\"https://example.com/logs\", {\n * method: \"POST\",\n * body: JSON.stringify(record),\n * });\n * };\n * const sink = fromAsyncSink(asyncSink);\n * ```\n *\n * @param asyncSink The async sink function to convert.\n * @returns A sink that properly handles async operations and disposal.\n * @since 1.0.0\n */\nexport function fromAsyncSink(asyncSink: AsyncSink): Sink & AsyncDisposable {\n let lastPromise = Promise.resolve();\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n lastPromise = lastPromise\n .then(() => asyncSink(record))\n .catch(() => {\n // Errors are handled by the sink infrastructure\n });\n };\n sink[Symbol.asyncDispose] = async () => {\n await lastPromise;\n };\n return sink;\n}\n\n/**\n * Options for the {@link fingersCrossed} function.\n * @since 1.1.0\n */\nexport interface FingersCrossedOptions {\n /**\n * Minimum log level that triggers buffer flush.\n * When a log record at or above this level is received, all buffered\n * records are flushed to the wrapped sink.\n * @default `\"error\"`\n */\n readonly triggerLevel?: LogLevel;\n\n /**\n * Maximum buffer size before oldest records are dropped.\n * When the buffer exceeds this size, the oldest records are removed\n * to prevent unbounded memory growth.\n * @default `1000`\n */\n readonly maxBufferSize?: number;\n\n /**\n * Category isolation mode or custom matcher function.\n *\n * When `undefined` (default), all log records share a single buffer.\n *\n * When set to a mode string:\n *\n * - `\"descendant\"`: Flush child category buffers when parent triggers\n * - `\"ancestor\"`: Flush parent category buffers when child triggers\n * - `\"both\"`: Flush both parent and child category buffers\n *\n * When set to a function, it receives the trigger category and buffered\n * category and should return true if the buffered category should be flushed.\n *\n * @default `undefined` (no isolation, single global buffer)\n */\n readonly isolateByCategory?:\n | \"descendant\"\n | \"ancestor\"\n | \"both\"\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean);\n\n /**\n * Enable context-based buffer isolation.\n * When enabled, buffers are isolated based on specified context keys.\n * This is useful for scenarios like HTTP request tracing where logs\n * should be isolated per request.\n *\n * @example\n * ```typescript\n * fingersCrossed(sink, {\n * isolateByContext: { keys: ['requestId'] }\n * })\n * ```\n *\n * @example Combined with category isolation\n * ```typescript\n * fingersCrossed(sink, {\n * isolateByCategory: 'descendant',\n * isolateByContext: { keys: ['requestId', 'sessionId'] }\n * })\n * ```\n *\n * @default `undefined` (no context isolation)\n * @since 1.2.0\n */\n readonly isolateByContext?: {\n /**\n * Context keys to use for isolation.\n * Buffers will be separate for different combinations of these context values.\n */\n readonly keys: readonly string[];\n };\n}\n\n/**\n * Creates a sink that buffers log records until a trigger level is reached.\n * This pattern, known as \"fingers crossed\" logging, keeps detailed debug logs\n * in memory and only outputs them when an error or other significant event occurs.\n *\n * @example Basic usage with default settings\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink());\n * // Debug and info logs are buffered\n * // When an error occurs, all buffered logs + the error are output\n * ```\n *\n * @example Custom trigger level and buffer size\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink(), {\n * triggerLevel: \"warning\", // Trigger on warning or higher\n * maxBufferSize: 500 // Keep last 500 records\n * });\n * ```\n *\n * @example Category isolation\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink(), {\n * isolateByCategory: \"descendant\" // Separate buffers per category\n * });\n * // Error in [\"app\"] triggers flush of [\"app\"] and [\"app\", \"module\"] buffers\n * // But not [\"other\"] buffer\n * ```\n *\n * @param sink The sink to wrap. Buffered records are sent to this sink when\n * triggered.\n * @param options Configuration options for the fingers crossed behavior.\n * @returns A sink that buffers records until the trigger level is reached.\n * @since 1.1.0\n */\nexport function fingersCrossed(\n sink: Sink,\n options: FingersCrossedOptions = {},\n): Sink {\n const triggerLevel = options.triggerLevel ?? \"error\";\n const maxBufferSize = Math.max(0, options.maxBufferSize ?? 1000);\n const isolateByCategory = options.isolateByCategory;\n const isolateByContext = options.isolateByContext;\n\n // Validate trigger level early\n try {\n compareLogLevel(\"trace\", triggerLevel); // Test with any valid level\n } catch (error) {\n throw new TypeError(\n `Invalid triggerLevel: ${JSON.stringify(triggerLevel)}. ${\n error instanceof Error ? error.message : String(error)\n }`,\n );\n }\n\n // Helper functions for category matching\n function isDescendant(\n parent: readonly string[],\n child: readonly string[],\n ): boolean {\n if (parent.length === 0 || child.length === 0) return false; // Empty categories are isolated\n if (parent.length > child.length) return false;\n return parent.every((p, i) => p === child[i]);\n }\n\n function isAncestor(\n child: readonly string[],\n parent: readonly string[],\n ): boolean {\n if (child.length === 0 || parent.length === 0) return false; // Empty categories are isolated\n if (child.length < parent.length) return false;\n return parent.every((p, i) => p === child[i]);\n }\n\n // Determine matcher function based on isolation mode\n let shouldFlushBuffer:\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean)\n | null = null;\n\n if (isolateByCategory) {\n if (typeof isolateByCategory === \"function\") {\n shouldFlushBuffer = isolateByCategory;\n } else {\n switch (isolateByCategory) {\n case \"descendant\":\n shouldFlushBuffer = (trigger, buffered) =>\n isDescendant(trigger, buffered);\n break;\n case \"ancestor\":\n shouldFlushBuffer = (trigger, buffered) =>\n isAncestor(trigger, buffered);\n break;\n case \"both\":\n shouldFlushBuffer = (trigger, buffered) =>\n isDescendant(trigger, buffered) || isAncestor(trigger, buffered);\n break;\n }\n }\n }\n\n // Helper functions for category serialization\n function getCategoryKey(category: readonly string[]): string {\n return JSON.stringify(category);\n }\n\n function parseCategoryKey(key: string): string[] {\n return JSON.parse(key);\n }\n\n // Helper function to extract context values from properties\n function getContextKey(properties: Record<string, unknown>): string {\n if (!isolateByContext || isolateByContext.keys.length === 0) {\n return \"\";\n }\n const contextValues: Record<string, unknown> = {};\n for (const key of isolateByContext.keys) {\n if (key in properties) {\n contextValues[key] = properties[key];\n }\n }\n return JSON.stringify(contextValues);\n }\n\n // Helper function to generate buffer key\n function getBufferKey(\n category: readonly string[],\n properties: Record<string, unknown>,\n ): string {\n const categoryKey = getCategoryKey(category);\n if (!isolateByContext) {\n return categoryKey;\n }\n const contextKey = getContextKey(properties);\n return `${categoryKey}:${contextKey}`;\n }\n\n // Helper function to parse buffer key\n function parseBufferKey(key: string): {\n category: string[];\n context: string;\n } {\n if (!isolateByContext) {\n return { category: parseCategoryKey(key), context: \"\" };\n }\n // Find the separator between category and context\n // The category part is JSON-encoded, so we need to find where it ends\n // We look for \"]:\" which indicates end of category array and start of context\n const separatorIndex = key.indexOf(\"]:\");\n if (separatorIndex === -1) {\n // No context part, entire key is category\n return { category: parseCategoryKey(key), context: \"\" };\n }\n const categoryPart = key.substring(0, separatorIndex + 1); // Include the ]\n const contextPart = key.substring(separatorIndex + 2); // Skip ]:\n return { category: parseCategoryKey(categoryPart), context: contextPart };\n }\n\n // Buffer management\n if (!isolateByCategory && !isolateByContext) {\n // Single global buffer\n const buffer: LogRecord[] = [];\n let triggered = false;\n\n return (record: LogRecord) => {\n if (triggered) {\n // Already triggered, pass through directly\n sink(record);\n return;\n }\n\n // Check if this record triggers flush\n if (compareLogLevel(record.level, triggerLevel) >= 0) {\n triggered = true;\n\n // Flush buffer\n for (const bufferedRecord of buffer) {\n sink(bufferedRecord);\n }\n buffer.length = 0;\n\n // Send trigger record\n sink(record);\n } else {\n // Buffer the record\n buffer.push(record);\n\n // Enforce max buffer size\n while (buffer.length > maxBufferSize) {\n buffer.shift();\n }\n }\n };\n } else {\n // Category and/or context-isolated buffers\n const buffers = new Map<string, LogRecord[]>();\n const triggered = new Set<string>();\n\n return (record: LogRecord) => {\n const bufferKey = getBufferKey(record.category, record.properties);\n\n // Check if this buffer is already triggered\n if (triggered.has(bufferKey)) {\n sink(record);\n return;\n }\n\n // Check if this record triggers flush\n if (compareLogLevel(record.level, triggerLevel) >= 0) {\n // Find all buffers that should be flushed\n const keysToFlush = new Set<string>();\n\n for (const [bufferedKey] of buffers) {\n if (bufferedKey === bufferKey) {\n keysToFlush.add(bufferedKey);\n } else {\n const { category: bufferedCategory, context: bufferedContext } =\n parseBufferKey(bufferedKey);\n const { context: triggerContext } = parseBufferKey(bufferKey);\n\n // Check context match\n let contextMatches = true;\n if (isolateByContext) {\n contextMatches = bufferedContext === triggerContext;\n }\n\n // Check category match\n let categoryMatches = false;\n if (!isolateByCategory) {\n // No category isolation, so all categories match if context matches\n categoryMatches = contextMatches;\n } else if (shouldFlushBuffer) {\n try {\n categoryMatches = shouldFlushBuffer(\n record.category,\n bufferedCategory,\n );\n } catch {\n // Ignore errors from custom matcher\n }\n } else {\n // Same category only\n categoryMatches = getCategoryKey(record.category) ===\n getCategoryKey(bufferedCategory);\n }\n\n // Both must match for the buffer to be flushed\n if (contextMatches && categoryMatches) {\n keysToFlush.add(bufferedKey);\n }\n }\n }\n\n // Flush matching buffers\n const allRecordsToFlush: LogRecord[] = [];\n for (const key of keysToFlush) {\n const buffer = buffers.get(key);\n if (buffer) {\n allRecordsToFlush.push(...buffer);\n buffers.delete(key);\n triggered.add(key);\n }\n }\n\n // Sort by timestamp to maintain chronological order\n allRecordsToFlush.sort((a, b) => a.timestamp - b.timestamp);\n\n // Flush all records\n for (const bufferedRecord of allRecordsToFlush) {\n sink(bufferedRecord);\n }\n\n // Mark trigger buffer as triggered and send trigger record\n triggered.add(bufferKey);\n sink(record);\n } else {\n // Buffer the record\n let buffer = buffers.get(bufferKey);\n if (!buffer) {\n buffer = [];\n buffers.set(bufferKey, buffer);\n }\n\n buffer.push(record);\n\n // Enforce max buffer size per buffer\n while (buffer.length > maxBufferSize) {\n buffer.shift();\n }\n }\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AA8CA,SAAgB,WAAWA,MAAYC,QAA0B;CAC/D,MAAM,aAAa,SAAS,OAAO;AACnC,QAAO,CAACC,WAAsB;AAC5B,MAAI,WAAW,OAAO,CAAE,MAAK,OAAO;CACrC;AACF;;;;;;;;;;;;;;;;;;;;;;;;;AA6ED,SAAgB,cACdC,QACAC,UAA6B,CAAE,GACP;CACxB,MAAM,YAAY,QAAQ,aAAa;CACvC,MAAM,UAAU,QAAQ,WAAW,IAAI;CACvC,MAAM,SAAS,OAAO,WAAW;AAEjC,MAAK,QAAQ,aAAa;EACxB,IAAI,cAAc,QAAQ,SAAS;EACnC,MAAMC,OAA+B,CAACH,WAAsB;GAC1D,MAAM,QAAQ,QAAQ,OAAO,UAAU,OAAO,CAAC;AAC/C,iBAAc,YACX,KAAK,MAAM,OAAO,MAAM,CACxB,KAAK,MAAM,OAAO,MAAM,MAAM,CAAC;EACnC;AACD,OAAK,OAAO,gBAAgB,YAAY;AACtC,SAAM;AACN,SAAM,OAAO,OAAO;EACrB;AACD,SAAO;CACR;CAGD,MAAM,oBAAoB,QAAQ,gBAAgB,OAC9C,CAAE,IACF,QAAQ;CACZ,MAAM,aAAa,kBAAkB,cAAc;CACnD,MAAM,gBAAgB,kBAAkB,iBAAiB;CAEzD,MAAMI,SAAsB,CAAE;CAC9B,IAAIC,aAAoD;CACxD,IAAI,WAAW;CACf,IAAIC,cAAoC;CACxC,MAAM,gBAAgB,aAAa;CAEnC,eAAe,QAAuB;AACpC,MAAI,OAAO,WAAW,EAAG;EAEzB,MAAM,UAAU,OAAO,OAAO,EAAE;AAChC,OAAK,MAAM,UAAU,QACnB,KAAI;GACF,MAAM,QAAQ,QAAQ,OAAO,UAAU,OAAO,CAAC;AAC/C,SAAM,OAAO;AACb,SAAM,OAAO,MAAM,MAAM;EAC1B,QAAO,CAEP;CAEJ;CAED,SAAS,gBAAsB;AAC7B,MAAI,YAAa;AAEjB,gBAAc,OAAO,CAAC,QAAQ,MAAM;AAClC,iBAAc;EACf,EAAC;CACH;CAED,SAAS,kBAAwB;AAC/B,MAAI,eAAe,QAAQ,SAAU;AAErC,eAAa,YAAY,MAAM;AAC7B,kBAAe;EAChB,GAAE,cAAc;CAClB;CAED,MAAMC,kBAA0C,CAACP,WAAsB;AACrE,MAAI,SAAU;AAGd,MAAI,OAAO,UAAU,cACnB,QAAO,OAAO;AAGhB,SAAO,KAAK,OAAO;AAEnB,MAAI,OAAO,UAAU,WACnB,gBAAe;WACN,eAAe,KACxB,kBAAiB;CAEpB;AAED,iBAAgB,OAAO,gBAAgB,YAAY;AACjD,aAAW;AACX,MAAI,eAAe,MAAM;AACvB,iBAAc,WAAW;AACzB,gBAAa;EACd;AACD,QAAM,OAAO;AACb,MAAI;AACF,SAAM,OAAO,OAAO;EACrB,QAAO,CAEP;CACF;AAED,QAAO;AACR;;;;;;;;AAgFD,SAAgB,eACdQ,UAA8B,CAAE,GACJ;CAC5B,MAAM,YAAY,QAAQ,aAAa;CACvC,MAAMC,WAA4C;EAChD,OAAO;EACP,OAAO;EACP,MAAM;EACN,SAAS;EACT,OAAO;EACP,OAAO;EACP,GAAI,QAAQ,YAAY,CAAE;CAC3B;CACD,MAAM,UAAU,QAAQ,WAAW,WAAW;CAE9C,MAAM,WAAW,CAACT,WAAsB;EACtC,MAAM,OAAO,UAAU,OAAO;EAC9B,MAAM,SAAS,SAAS,OAAO;AAC/B,MAAI,kBACF,OAAM,IAAI,WAAW,qBAAqB,OAAO,MAAM;AAEzD,aAAW,SAAS,UAAU;GAC5B,MAAM,MAAM,KAAK,QAAQ,UAAU,GAAG;AACtC,WAAQ,QAAQ,IAAI;EACrB,MACC,SAAQ,QAAQ,GAAG,KAAK;CAE3B;AAED,MAAK,QAAQ,YACX,QAAO;CAIT,MAAM,oBAAoB,QAAQ,gBAAgB,OAC9C,CAAE,IACF,QAAQ;CACZ,MAAM,aAAa,kBAAkB,cAAc;CACnD,MAAM,gBAAgB,kBAAkB,iBAAiB;CAEzD,MAAMI,SAAsB,CAAE;CAC9B,IAAIC,aAAoD;CACxD,IAAI,WAAW;CACf,IAAI,iBAAiB;CACrB,MAAM,gBAAgB,aAAa;CAEnC,SAAS,QAAc;AACrB,MAAI,OAAO,WAAW,EAAG;EAEzB,MAAM,UAAU,OAAO,OAAO,EAAE;AAChC,OAAK,MAAM,UAAU,QACnB,KAAI;AACF,YAAS,OAAO;EACjB,QAAO,CAEP;CAEJ;CAED,SAAS,gBAAsB;AAC7B,MAAI,eAAgB;AAEpB,mBAAiB;AACjB,aAAW,MAAM;AACf,oBAAiB;AACjB,UAAO;EACR,GAAE,EAAE;CACN;CAED,SAAS,kBAAwB;AAC/B,MAAI,eAAe,QAAQ,SAAU;AAErC,eAAa,YAAY,MAAM;AAC7B,UAAO;EACR,GAAE,cAAc;CAClB;CAED,MAAMK,kBAAqC,CAACV,WAAsB;AAChE,MAAI,SAAU;AAGd,MAAI,OAAO,UAAU,cACnB,QAAO,OAAO;AAGhB,SAAO,KAAK,OAAO;AAEnB,MAAI,OAAO,UAAU,WACnB,gBAAe;WACN,eAAe,KACxB,kBAAiB;CAEpB;AAED,iBAAgB,OAAO,WAAW,MAAM;AACtC,aAAW;AACX,MAAI,eAAe,MAAM;AACvB,iBAAc,WAAW;AACzB,gBAAa;EACd;AACD,SAAO;CACR;AAED,QAAO;AACR;;;;;;;;;;;;;;;;;;;;;AAsBD,SAAgB,cAAcW,WAA8C;CAC1E,IAAI,cAAc,QAAQ,SAAS;CACnC,MAAMR,OAA+B,CAACH,WAAsB;AAC1D,gBAAc,YACX,KAAK,MAAM,UAAU,OAAO,CAAC,CAC7B,MAAM,MAAM,CAEZ,EAAC;CACL;AACD,MAAK,OAAO,gBAAgB,YAAY;AACtC,QAAM;CACP;AACD,QAAO;AACR;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAoHD,SAAgB,eACdF,MACAc,UAAiC,CAAE,GAC7B;CACN,MAAM,eAAe,QAAQ,gBAAgB;CAC7C,MAAM,gBAAgB,KAAK,IAAI,GAAG,QAAQ,iBAAiB,IAAK;CAChE,MAAM,oBAAoB,QAAQ;CAClC,MAAM,mBAAmB,QAAQ;AAGjC,KAAI;AACF,kBAAgB,SAAS,aAAa;CACvC,SAAQ,OAAO;AACd,QAAM,IAAI,WACP,wBAAwB,KAAK,UAAU,aAAa,CAAC,IACpD,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,CACvD;CAEJ;CAGD,SAAS,aACPC,QACAC,OACS;AACT,MAAI,OAAO,WAAW,KAAK,MAAM,WAAW,EAAG,QAAO;AACtD,MAAI,OAAO,SAAS,MAAM,OAAQ,QAAO;AACzC,SAAO,OAAO,MAAM,CAAC,GAAG,MAAM,MAAM,MAAM,GAAG;CAC9C;CAED,SAAS,WACPA,OACAD,QACS;AACT,MAAI,MAAM,WAAW,KAAK,OAAO,WAAW,EAAG,QAAO;AACtD,MAAI,MAAM,SAAS,OAAO,OAAQ,QAAO;AACzC,SAAO,OAAO,MAAM,CAAC,GAAG,MAAM,MAAM,MAAM,GAAG;CAC9C;CAGD,IAAIE,oBAKO;AAEX,KAAI,kBACF,YAAW,sBAAsB,WAC/B,qBAAoB;KAEpB,SAAQ,mBAAR;EACE,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,aAAa,SAAS,SAAS;AACjC;EACF,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,WAAW,SAAS,SAAS;AAC/B;EACF,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,aAAa,SAAS,SAAS,IAAI,WAAW,SAAS,SAAS;AAClE;CACH;CAKL,SAAS,eAAeC,UAAqC;AAC3D,SAAO,KAAK,UAAU,SAAS;CAChC;CAED,SAAS,iBAAiBC,KAAuB;AAC/C,SAAO,KAAK,MAAM,IAAI;CACvB;CAGD,SAAS,cAAcC,YAA6C;AAClE,OAAK,oBAAoB,iBAAiB,KAAK,WAAW,EACxD,QAAO;EAET,MAAMC,gBAAyC,CAAE;AACjD,OAAK,MAAM,OAAO,iBAAiB,KACjC,KAAI,OAAO,WACT,eAAc,OAAO,WAAW;AAGpC,SAAO,KAAK,UAAU,cAAc;CACrC;CAGD,SAAS,aACPH,UACAE,YACQ;EACR,MAAM,cAAc,eAAe,SAAS;AAC5C,OAAK,iBACH,QAAO;EAET,MAAM,aAAa,cAAc,WAAW;AAC5C,UAAQ,EAAE,YAAY,GAAG,WAAW;CACrC;CAGD,SAAS,eAAeD,KAGtB;AACA,OAAK,iBACH,QAAO;GAAE,UAAU,iBAAiB,IAAI;GAAE,SAAS;EAAI;EAKzD,MAAM,iBAAiB,IAAI,QAAQ,KAAK;AACxC,MAAI,mBAAmB,GAErB,QAAO;GAAE,UAAU,iBAAiB,IAAI;GAAE,SAAS;EAAI;EAEzD,MAAM,eAAe,IAAI,UAAU,GAAG,iBAAiB,EAAE;EACzD,MAAM,cAAc,IAAI,UAAU,iBAAiB,EAAE;AACrD,SAAO;GAAE,UAAU,iBAAiB,aAAa;GAAE,SAAS;EAAa;CAC1E;AAGD,MAAK,sBAAsB,kBAAkB;EAE3C,MAAMb,SAAsB,CAAE;EAC9B,IAAI,YAAY;AAEhB,SAAO,CAACJ,WAAsB;AAC5B,OAAI,WAAW;AAEb,SAAK,OAAO;AACZ;GACD;AAGD,OAAI,gBAAgB,OAAO,OAAO,aAAa,IAAI,GAAG;AACpD,gBAAY;AAGZ,SAAK,MAAM,kBAAkB,OAC3B,MAAK,eAAe;AAEtB,WAAO,SAAS;AAGhB,SAAK,OAAO;GACb,OAAM;AAEL,WAAO,KAAK,OAAO;AAGnB,WAAO,OAAO,SAAS,cACrB,QAAO,OAAO;GAEjB;EACF;CACF,OAAM;EAEL,MAAM,0BAAU,IAAI;EACpB,MAAM,4BAAY,IAAI;AAEtB,SAAO,CAACA,WAAsB;GAC5B,MAAM,YAAY,aAAa,OAAO,UAAU,OAAO,WAAW;AAGlE,OAAI,UAAU,IAAI,UAAU,EAAE;AAC5B,SAAK,OAAO;AACZ;GACD;AAGD,OAAI,gBAAgB,OAAO,OAAO,aAAa,IAAI,GAAG;IAEpD,MAAM,8BAAc,IAAI;AAExB,SAAK,MAAM,CAAC,YAAY,IAAI,QAC1B,KAAI,gBAAgB,UAClB,aAAY,IAAI,YAAY;SACvB;KACL,MAAM,EAAE,UAAU,kBAAkB,SAAS,iBAAiB,GAC5D,eAAe,YAAY;KAC7B,MAAM,EAAE,SAAS,gBAAgB,GAAG,eAAe,UAAU;KAG7D,IAAI,iBAAiB;AACrB,SAAI,iBACF,kBAAiB,oBAAoB;KAIvC,IAAI,kBAAkB;AACtB,UAAK,kBAEH,mBAAkB;cACT,kBACT,KAAI;AACF,wBAAkB,kBAChB,OAAO,UACP,iBACD;KACF,QAAO,CAEP;SAGD,mBAAkB,eAAe,OAAO,SAAS,KAC/C,eAAe,iBAAiB;AAIpC,SAAI,kBAAkB,gBACpB,aAAY,IAAI,YAAY;IAE/B;IAIH,MAAMoB,oBAAiC,CAAE;AACzC,SAAK,MAAM,OAAO,aAAa;KAC7B,MAAM,SAAS,QAAQ,IAAI,IAAI;AAC/B,SAAI,QAAQ;AACV,wBAAkB,KAAK,GAAG,OAAO;AACjC,cAAQ,OAAO,IAAI;AACnB,gBAAU,IAAI,IAAI;KACnB;IACF;AAGD,sBAAkB,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,UAAU;AAG3D,SAAK,MAAM,kBAAkB,kBAC3B,MAAK,eAAe;AAItB,cAAU,IAAI,UAAU;AACxB,SAAK,OAAO;GACb,OAAM;IAEL,IAAI,SAAS,QAAQ,IAAI,UAAU;AACnC,SAAK,QAAQ;AACX,cAAS,CAAE;AACX,aAAQ,IAAI,WAAW,OAAO;IAC/B;AAED,WAAO,KAAK,OAAO;AAGnB,WAAO,OAAO,SAAS,cACrB,QAAO,OAAO;GAEjB;EACF;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"sink.js","names":["sink: Sink","filter: FilterLike","record: LogRecord","stream: WritableStream","options: StreamSinkOptions","sink: Sink & AsyncDisposable","buffer: LogRecord[]","flushTimer: ReturnType<typeof setInterval> | null","activeFlush: Promise<void> | null","nonBlockingSink: Sink & AsyncDisposable","options: ConsoleSinkOptions","levelMap: Record<LogLevel, ConsoleMethod>","nonBlockingSink: Sink & Disposable","asyncSink: AsyncSink","options: FingersCrossedOptions","parent: readonly string[]","child: readonly string[]","shouldFlushBuffer:\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean)\n | null","category: readonly string[]","key: string","properties: Record<string, unknown>","contextValues: Record<string, unknown>","buffers: Map<string, BufferMetadata>","expiredKeys: string[]","numToEvict?: number","cleanupTimer: ReturnType<typeof setInterval> | null","allRecordsToFlush: LogRecord[]"],"sources":["../src/sink.ts"],"sourcesContent":["import { type FilterLike, toFilter } from \"./filter.ts\";\nimport {\n type ConsoleFormatter,\n defaultConsoleFormatter,\n defaultTextFormatter,\n type TextFormatter,\n} from \"./formatter.ts\";\nimport { compareLogLevel, type LogLevel } from \"./level.ts\";\nimport type { LogRecord } from \"./record.ts\";\n\n/**\n * A sink is a function that accepts a log record and prints it somewhere.\n * Thrown exceptions will be suppressed and then logged to the meta logger,\n * a {@link Logger} with the category `[\"logtape\", \"meta\"]`. (In that case,\n * the meta log record will not be passed to the sink to avoid infinite\n * recursion.)\n *\n * @param record The log record to sink.\n */\nexport type Sink = (record: LogRecord) => void;\n\n/**\n * An async sink is a function that accepts a log record and asynchronously\n * processes it. This type is used with {@link fromAsyncSink} to create\n * a regular sink that properly handles asynchronous operations.\n *\n * @param record The log record to process asynchronously.\n * @returns A promise that resolves when the record has been processed.\n * @since 1.0.0\n */\nexport type AsyncSink = (record: LogRecord) => Promise<void>;\n\n/**\n * Turns a sink into a filtered sink. The returned sink only logs records that\n * pass the filter.\n *\n * @example Filter a console sink to only log records with the info level\n * ```typescript\n * const sink = withFilter(getConsoleSink(), \"info\");\n * ```\n *\n * @param sink A sink to be filtered.\n * @param filter A filter to apply to the sink. It can be either a filter\n * function or a {@link LogLevel} string.\n * @returns A sink that only logs records that pass the filter.\n */\nexport function withFilter(sink: Sink, filter: FilterLike): Sink {\n const filterFunc = toFilter(filter);\n return (record: LogRecord) => {\n if (filterFunc(record)) sink(record);\n };\n}\n\n/**\n * Options for the {@link getStreamSink} function.\n */\nexport interface StreamSinkOptions {\n /**\n * The text formatter to use. Defaults to {@link defaultTextFormatter}.\n */\n formatter?: TextFormatter;\n\n /**\n * The text encoder to use. Defaults to an instance of {@link TextEncoder}.\n */\n encoder?: { encode(text: string): Uint8Array };\n\n /**\n * Enable non-blocking mode with optional buffer configuration.\n * When enabled, log records are buffered and flushed in the background.\n *\n * @example Simple non-blocking mode\n * ```typescript\n * getStreamSink(stream, { nonBlocking: true });\n * ```\n *\n * @example Custom buffer configuration\n * ```typescript\n * getStreamSink(stream, {\n * nonBlocking: {\n * bufferSize: 1000,\n * flushInterval: 50\n * }\n * });\n * ```\n *\n * @default `false`\n * @since 1.0.0\n */\n nonBlocking?: boolean | {\n /**\n * Maximum number of records to buffer before flushing.\n * @default `100`\n */\n bufferSize?: number;\n\n /**\n * Interval in milliseconds between automatic flushes.\n * @default `100`\n */\n flushInterval?: number;\n };\n}\n\n/**\n * A factory that returns a sink that writes to a {@link WritableStream}.\n *\n * Note that the `stream` is of Web Streams API, which is different from\n * Node.js streams. You can convert a Node.js stream to a Web Streams API\n * stream using [`stream.Writable.toWeb()`] method.\n *\n * [`stream.Writable.toWeb()`]: https://nodejs.org/api/stream.html#streamwritabletowebstreamwritable\n *\n * @example Sink to the standard error in Deno\n * ```typescript\n * const stderrSink = getStreamSink(Deno.stderr.writable);\n * ```\n *\n * @example Sink to the standard error in Node.js\n * ```typescript\n * import stream from \"node:stream\";\n * const stderrSink = getStreamSink(stream.Writable.toWeb(process.stderr));\n * ```\n *\n * @param stream The stream to write to.\n * @param options The options for the sink.\n * @returns A sink that writes to the stream.\n */\nexport function getStreamSink(\n stream: WritableStream,\n options: StreamSinkOptions = {},\n): Sink & AsyncDisposable {\n const formatter = options.formatter ?? defaultTextFormatter;\n const encoder = options.encoder ?? new TextEncoder();\n const writer = stream.getWriter();\n\n if (!options.nonBlocking) {\n let lastPromise = Promise.resolve();\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n const bytes = encoder.encode(formatter(record));\n lastPromise = lastPromise\n .then(() => writer.ready)\n .then(() => writer.write(bytes));\n };\n sink[Symbol.asyncDispose] = async () => {\n await lastPromise;\n await writer.close();\n };\n return sink;\n }\n\n // Non-blocking mode implementation\n const nonBlockingConfig = options.nonBlocking === true\n ? {}\n : options.nonBlocking;\n const bufferSize = nonBlockingConfig.bufferSize ?? 100;\n const flushInterval = nonBlockingConfig.flushInterval ?? 100;\n\n const buffer: LogRecord[] = [];\n let flushTimer: ReturnType<typeof setInterval> | null = null;\n let disposed = false;\n let activeFlush: Promise<void> | null = null;\n const maxBufferSize = bufferSize * 2; // Overflow protection\n\n async function flush(): Promise<void> {\n if (buffer.length === 0) return;\n\n const records = buffer.splice(0);\n for (const record of records) {\n try {\n const bytes = encoder.encode(formatter(record));\n await writer.ready;\n await writer.write(bytes);\n } catch {\n // Silently ignore errors in non-blocking mode to avoid disrupting the application\n }\n }\n }\n\n function scheduleFlush(): void {\n if (activeFlush) return;\n\n activeFlush = flush().finally(() => {\n activeFlush = null;\n });\n }\n\n function startFlushTimer(): void {\n if (flushTimer !== null || disposed) return;\n\n flushTimer = setInterval(() => {\n scheduleFlush();\n }, flushInterval);\n }\n\n const nonBlockingSink: Sink & AsyncDisposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Buffer overflow protection: drop oldest records if buffer is too large\n if (buffer.length >= maxBufferSize) {\n buffer.shift(); // Remove oldest record\n }\n\n buffer.push(record);\n\n if (buffer.length >= bufferSize) {\n scheduleFlush();\n } else if (flushTimer === null) {\n startFlushTimer();\n }\n };\n\n nonBlockingSink[Symbol.asyncDispose] = async () => {\n disposed = true;\n if (flushTimer !== null) {\n clearInterval(flushTimer);\n flushTimer = null;\n }\n await flush();\n try {\n await writer.close();\n } catch {\n // Writer might already be closed or errored\n }\n };\n\n return nonBlockingSink;\n}\n\ntype ConsoleMethod = \"debug\" | \"info\" | \"log\" | \"warn\" | \"error\";\n\n/**\n * Options for the {@link getConsoleSink} function.\n */\nexport interface ConsoleSinkOptions {\n /**\n * The console formatter or text formatter to use.\n * Defaults to {@link defaultConsoleFormatter}.\n */\n formatter?: ConsoleFormatter | TextFormatter;\n\n /**\n * The mapping from log levels to console methods. Defaults to:\n *\n * ```typescript\n * {\n * trace: \"trace\",\n * debug: \"debug\",\n * info: \"info\",\n * warning: \"warn\",\n * error: \"error\",\n * fatal: \"error\",\n * }\n * ```\n * @since 0.9.0\n */\n levelMap?: Record<LogLevel, ConsoleMethod>;\n\n /**\n * The console to log to. Defaults to {@link console}.\n */\n console?: Console;\n\n /**\n * Enable non-blocking mode with optional buffer configuration.\n * When enabled, log records are buffered and flushed in the background.\n *\n * @example Simple non-blocking mode\n * ```typescript\n * getConsoleSink({ nonBlocking: true });\n * ```\n *\n * @example Custom buffer configuration\n * ```typescript\n * getConsoleSink({\n * nonBlocking: {\n * bufferSize: 1000,\n * flushInterval: 50\n * }\n * });\n * ```\n *\n * @default `false`\n * @since 1.0.0\n */\n nonBlocking?: boolean | {\n /**\n * Maximum number of records to buffer before flushing.\n * @default `100`\n */\n bufferSize?: number;\n\n /**\n * Interval in milliseconds between automatic flushes.\n * @default `100`\n */\n flushInterval?: number;\n };\n}\n\n/**\n * A console sink factory that returns a sink that logs to the console.\n *\n * @param options The options for the sink.\n * @returns A sink that logs to the console. If `nonBlocking` is enabled,\n * returns a sink that also implements {@link Disposable}.\n */\nexport function getConsoleSink(\n options: ConsoleSinkOptions = {},\n): Sink | (Sink & Disposable) {\n const formatter = options.formatter ?? defaultConsoleFormatter;\n const levelMap: Record<LogLevel, ConsoleMethod> = {\n trace: \"debug\",\n debug: \"debug\",\n info: \"info\",\n warning: \"warn\",\n error: \"error\",\n fatal: \"error\",\n ...(options.levelMap ?? {}),\n };\n const console = options.console ?? globalThis.console;\n\n const baseSink = (record: LogRecord) => {\n const args = formatter(record);\n const method = levelMap[record.level];\n if (method === undefined) {\n throw new TypeError(`Invalid log level: ${record.level}.`);\n }\n if (typeof args === \"string\") {\n const msg = args.replace(/\\r?\\n$/, \"\");\n console[method](msg);\n } else {\n console[method](...args);\n }\n };\n\n if (!options.nonBlocking) {\n return baseSink;\n }\n\n // Non-blocking mode implementation\n const nonBlockingConfig = options.nonBlocking === true\n ? {}\n : options.nonBlocking;\n const bufferSize = nonBlockingConfig.bufferSize ?? 100;\n const flushInterval = nonBlockingConfig.flushInterval ?? 100;\n\n const buffer: LogRecord[] = [];\n let flushTimer: ReturnType<typeof setInterval> | null = null;\n let disposed = false;\n let flushScheduled = false;\n const maxBufferSize = bufferSize * 2; // Overflow protection\n\n function flush(): void {\n if (buffer.length === 0) return;\n\n const records = buffer.splice(0);\n for (const record of records) {\n try {\n baseSink(record);\n } catch {\n // Silently ignore errors in non-blocking mode to avoid disrupting the application\n }\n }\n }\n\n function scheduleFlush(): void {\n if (flushScheduled) return;\n\n flushScheduled = true;\n setTimeout(() => {\n flushScheduled = false;\n flush();\n }, 0);\n }\n\n function startFlushTimer(): void {\n if (flushTimer !== null || disposed) return;\n\n flushTimer = setInterval(() => {\n flush();\n }, flushInterval);\n }\n\n const nonBlockingSink: Sink & Disposable = (record: LogRecord) => {\n if (disposed) return;\n\n // Buffer overflow protection: drop oldest records if buffer is too large\n if (buffer.length >= maxBufferSize) {\n buffer.shift(); // Remove oldest record\n }\n\n buffer.push(record);\n\n if (buffer.length >= bufferSize) {\n scheduleFlush();\n } else if (flushTimer === null) {\n startFlushTimer();\n }\n };\n\n nonBlockingSink[Symbol.dispose] = () => {\n disposed = true;\n if (flushTimer !== null) {\n clearInterval(flushTimer);\n flushTimer = null;\n }\n flush();\n };\n\n return nonBlockingSink;\n}\n\n/**\n * Converts an async sink into a regular sink with proper async handling.\n * The returned sink chains async operations to ensure proper ordering and\n * implements AsyncDisposable to wait for all pending operations on disposal.\n *\n * @example Create a sink that asynchronously posts to a webhook\n * ```typescript\n * const asyncSink: AsyncSink = async (record) => {\n * await fetch(\"https://example.com/logs\", {\n * method: \"POST\",\n * body: JSON.stringify(record),\n * });\n * };\n * const sink = fromAsyncSink(asyncSink);\n * ```\n *\n * @param asyncSink The async sink function to convert.\n * @returns A sink that properly handles async operations and disposal.\n * @since 1.0.0\n */\nexport function fromAsyncSink(asyncSink: AsyncSink): Sink & AsyncDisposable {\n let lastPromise = Promise.resolve();\n const sink: Sink & AsyncDisposable = (record: LogRecord) => {\n lastPromise = lastPromise\n .then(() => asyncSink(record))\n .catch(() => {\n // Errors are handled by the sink infrastructure\n });\n };\n sink[Symbol.asyncDispose] = async () => {\n await lastPromise;\n };\n return sink;\n}\n\n/**\n * Options for the {@link fingersCrossed} function.\n * @since 1.1.0\n */\nexport interface FingersCrossedOptions {\n /**\n * Minimum log level that triggers buffer flush.\n * When a log record at or above this level is received, all buffered\n * records are flushed to the wrapped sink.\n * @default `\"error\"`\n */\n readonly triggerLevel?: LogLevel;\n\n /**\n * Maximum buffer size before oldest records are dropped.\n * When the buffer exceeds this size, the oldest records are removed\n * to prevent unbounded memory growth.\n * @default `1000`\n */\n readonly maxBufferSize?: number;\n\n /**\n * Category isolation mode or custom matcher function.\n *\n * When `undefined` (default), all log records share a single buffer.\n *\n * When set to a mode string:\n *\n * - `\"descendant\"`: Flush child category buffers when parent triggers\n * - `\"ancestor\"`: Flush parent category buffers when child triggers\n * - `\"both\"`: Flush both parent and child category buffers\n *\n * When set to a function, it receives the trigger category and buffered\n * category and should return true if the buffered category should be flushed.\n *\n * @default `undefined` (no isolation, single global buffer)\n */\n readonly isolateByCategory?:\n | \"descendant\"\n | \"ancestor\"\n | \"both\"\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean);\n\n /**\n * Enable context-based buffer isolation.\n * When enabled, buffers are isolated based on specified context keys.\n * This is useful for scenarios like HTTP request tracing where logs\n * should be isolated per request.\n *\n * @example\n * ```typescript\n * fingersCrossed(sink, {\n * isolateByContext: { keys: ['requestId'] }\n * })\n * ```\n *\n * @example Combined with category isolation\n * ```typescript\n * fingersCrossed(sink, {\n * isolateByCategory: 'descendant',\n * isolateByContext: { keys: ['requestId', 'sessionId'] }\n * })\n * ```\n *\n * @example With TTL-based buffer cleanup\n * ```typescript\n * fingersCrossed(sink, {\n * isolateByContext: {\n * keys: ['requestId'],\n * bufferTtlMs: 30000, // 30 seconds\n * cleanupIntervalMs: 10000 // cleanup every 10 seconds\n * }\n * })\n * ```\n *\n * @default `undefined` (no context isolation)\n * @since 1.2.0\n */\n readonly isolateByContext?: {\n /**\n * Context keys to use for isolation.\n * Buffers will be separate for different combinations of these context values.\n */\n readonly keys: readonly string[];\n\n /**\n * Maximum number of context buffers to maintain simultaneously.\n * When this limit is exceeded, the least recently used (LRU) buffers\n * will be evicted to make room for new ones.\n *\n * This provides memory protection in high-concurrency scenarios where\n * many different context values might be active simultaneously.\n *\n * When set to 0 or undefined, no limit is enforced.\n *\n * @default `undefined` (no limit)\n * @since 1.2.0\n */\n readonly maxContexts?: number;\n\n /**\n * Time-to-live for context buffers in milliseconds.\n * Buffers that haven't been accessed for this duration will be automatically\n * cleaned up to prevent memory leaks in long-running applications.\n *\n * When set to 0 or undefined, buffers will never expire based on time.\n *\n * @default `undefined` (no TTL)\n * @since 1.2.0\n */\n readonly bufferTtlMs?: number;\n\n /**\n * Interval in milliseconds for running cleanup operations.\n * The cleanup process removes expired buffers based on {@link bufferTtlMs}.\n *\n * This option is ignored if {@link bufferTtlMs} is not set.\n *\n * @default `30000` (30 seconds)\n * @since 1.2.0\n */\n readonly cleanupIntervalMs?: number;\n };\n}\n\n/**\n * Metadata for context-based buffer tracking.\n * Used internally by {@link fingersCrossed} to manage buffer lifecycle with LRU support.\n * @since 1.2.0\n */\ninterface BufferMetadata {\n /**\n * The actual log records buffer.\n */\n readonly buffer: LogRecord[];\n\n /**\n * Timestamp of the last access to this buffer (in milliseconds).\n * Used for LRU-based eviction when {@link FingersCrossedOptions.isolateByContext.maxContexts} is set.\n */\n lastAccess: number;\n}\n\n/**\n * Creates a sink that buffers log records until a trigger level is reached.\n * This pattern, known as \"fingers crossed\" logging, keeps detailed debug logs\n * in memory and only outputs them when an error or other significant event occurs.\n *\n * @example Basic usage with default settings\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink());\n * // Debug and info logs are buffered\n * // When an error occurs, all buffered logs + the error are output\n * ```\n *\n * @example Custom trigger level and buffer size\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink(), {\n * triggerLevel: \"warning\", // Trigger on warning or higher\n * maxBufferSize: 500 // Keep last 500 records\n * });\n * ```\n *\n * @example Category isolation\n * ```typescript\n * const sink = fingersCrossed(getConsoleSink(), {\n * isolateByCategory: \"descendant\" // Separate buffers per category\n * });\n * // Error in [\"app\"] triggers flush of [\"app\"] and [\"app\", \"module\"] buffers\n * // But not [\"other\"] buffer\n * ```\n *\n * @param sink The sink to wrap. Buffered records are sent to this sink when\n * triggered.\n * @param options Configuration options for the fingers crossed behavior.\n * @returns A sink that buffers records until the trigger level is reached.\n * @since 1.1.0\n */\nexport function fingersCrossed(\n sink: Sink,\n options: FingersCrossedOptions = {},\n): Sink | (Sink & Disposable) {\n const triggerLevel = options.triggerLevel ?? \"error\";\n const maxBufferSize = Math.max(0, options.maxBufferSize ?? 1000);\n const isolateByCategory = options.isolateByCategory;\n const isolateByContext = options.isolateByContext;\n\n // TTL and LRU configuration\n const bufferTtlMs = isolateByContext?.bufferTtlMs;\n const cleanupIntervalMs = isolateByContext?.cleanupIntervalMs ?? 30000;\n const maxContexts = isolateByContext?.maxContexts;\n const hasTtl = bufferTtlMs != null && bufferTtlMs > 0;\n const hasLru = maxContexts != null && maxContexts > 0;\n\n // Validate trigger level early\n try {\n compareLogLevel(\"trace\", triggerLevel); // Test with any valid level\n } catch (error) {\n throw new TypeError(\n `Invalid triggerLevel: ${JSON.stringify(triggerLevel)}. ${\n error instanceof Error ? error.message : String(error)\n }`,\n );\n }\n\n // Helper functions for category matching\n function isDescendant(\n parent: readonly string[],\n child: readonly string[],\n ): boolean {\n if (parent.length === 0 || child.length === 0) return false; // Empty categories are isolated\n if (parent.length > child.length) return false;\n return parent.every((p, i) => p === child[i]);\n }\n\n function isAncestor(\n child: readonly string[],\n parent: readonly string[],\n ): boolean {\n if (child.length === 0 || parent.length === 0) return false; // Empty categories are isolated\n if (child.length < parent.length) return false;\n return parent.every((p, i) => p === child[i]);\n }\n\n // Determine matcher function based on isolation mode\n let shouldFlushBuffer:\n | ((\n triggerCategory: readonly string[],\n bufferedCategory: readonly string[],\n ) => boolean)\n | null = null;\n\n if (isolateByCategory) {\n if (typeof isolateByCategory === \"function\") {\n shouldFlushBuffer = isolateByCategory;\n } else {\n switch (isolateByCategory) {\n case \"descendant\":\n shouldFlushBuffer = (trigger, buffered) =>\n isDescendant(trigger, buffered);\n break;\n case \"ancestor\":\n shouldFlushBuffer = (trigger, buffered) =>\n isAncestor(trigger, buffered);\n break;\n case \"both\":\n shouldFlushBuffer = (trigger, buffered) =>\n isDescendant(trigger, buffered) || isAncestor(trigger, buffered);\n break;\n }\n }\n }\n\n // Helper functions for category serialization\n function getCategoryKey(category: readonly string[]): string {\n return JSON.stringify(category);\n }\n\n function parseCategoryKey(key: string): string[] {\n return JSON.parse(key);\n }\n\n // Helper function to extract context values from properties\n function getContextKey(properties: Record<string, unknown>): string {\n if (!isolateByContext || isolateByContext.keys.length === 0) {\n return \"\";\n }\n const contextValues: Record<string, unknown> = {};\n for (const key of isolateByContext.keys) {\n if (key in properties) {\n contextValues[key] = properties[key];\n }\n }\n return JSON.stringify(contextValues);\n }\n\n // Helper function to generate buffer key\n function getBufferKey(\n category: readonly string[],\n properties: Record<string, unknown>,\n ): string {\n const categoryKey = getCategoryKey(category);\n if (!isolateByContext) {\n return categoryKey;\n }\n const contextKey = getContextKey(properties);\n return `${categoryKey}:${contextKey}`;\n }\n\n // Helper function to parse buffer key\n function parseBufferKey(key: string): {\n category: string[];\n context: string;\n } {\n if (!isolateByContext) {\n return { category: parseCategoryKey(key), context: \"\" };\n }\n // Find the separator between category and context\n // The category part is JSON-encoded, so we need to find where it ends\n // We look for \"]:\" which indicates end of category array and start of context\n const separatorIndex = key.indexOf(\"]:\");\n if (separatorIndex === -1) {\n // No context part, entire key is category\n return { category: parseCategoryKey(key), context: \"\" };\n }\n const categoryPart = key.substring(0, separatorIndex + 1); // Include the ]\n const contextPart = key.substring(separatorIndex + 2); // Skip ]:\n return { category: parseCategoryKey(categoryPart), context: contextPart };\n }\n\n // TTL-based cleanup function\n function cleanupExpiredBuffers(buffers: Map<string, BufferMetadata>): void {\n if (!hasTtl) return;\n\n const now = Date.now();\n const expiredKeys: string[] = [];\n\n for (const [key, metadata] of buffers) {\n if (metadata.buffer.length === 0) continue;\n\n // Use the timestamp of the last (most recent) record in the buffer\n const lastRecordTimestamp =\n metadata.buffer[metadata.buffer.length - 1].timestamp;\n if (now - lastRecordTimestamp > bufferTtlMs!) {\n expiredKeys.push(key);\n }\n }\n\n // Remove expired buffers\n for (const key of expiredKeys) {\n buffers.delete(key);\n }\n }\n\n // LRU-based eviction function\n function evictLruBuffers(\n buffers: Map<string, BufferMetadata>,\n numToEvict?: number,\n ): void {\n if (!hasLru) return;\n\n // Use provided numToEvict or calculate based on current size vs limit\n const toEvict = numToEvict ?? Math.max(0, buffers.size - maxContexts!);\n if (toEvict <= 0) return;\n\n // Sort by lastAccess timestamp (oldest first)\n const sortedEntries = Array.from(buffers.entries())\n .sort(([, a], [, b]) => a.lastAccess - b.lastAccess);\n\n // Remove the oldest buffers\n for (let i = 0; i < toEvict; i++) {\n const [key] = sortedEntries[i];\n buffers.delete(key);\n }\n }\n\n // Buffer management\n if (!isolateByCategory && !isolateByContext) {\n // Single global buffer\n const buffer: LogRecord[] = [];\n let triggered = false;\n\n return (record: LogRecord) => {\n if (triggered) {\n // Already triggered, pass through directly\n sink(record);\n return;\n }\n\n // Check if this record triggers flush\n if (compareLogLevel(record.level, triggerLevel) >= 0) {\n triggered = true;\n\n // Flush buffer\n for (const bufferedRecord of buffer) {\n sink(bufferedRecord);\n }\n buffer.length = 0;\n\n // Send trigger record\n sink(record);\n } else {\n // Buffer the record\n buffer.push(record);\n\n // Enforce max buffer size\n while (buffer.length > maxBufferSize) {\n buffer.shift();\n }\n }\n };\n } else {\n // Category and/or context-isolated buffers\n const buffers = new Map<string, BufferMetadata>();\n const triggered = new Set<string>();\n\n // Set up TTL cleanup timer if enabled\n let cleanupTimer: ReturnType<typeof setInterval> | null = null;\n if (hasTtl) {\n cleanupTimer = setInterval(() => {\n cleanupExpiredBuffers(buffers);\n }, cleanupIntervalMs);\n }\n\n const fingersCrossedSink = (record: LogRecord) => {\n const bufferKey = getBufferKey(record.category, record.properties);\n\n // Check if this buffer is already triggered\n if (triggered.has(bufferKey)) {\n sink(record);\n return;\n }\n\n // Check if this record triggers flush\n if (compareLogLevel(record.level, triggerLevel) >= 0) {\n // Find all buffers that should be flushed\n const keysToFlush = new Set<string>();\n\n for (const [bufferedKey] of buffers) {\n if (bufferedKey === bufferKey) {\n keysToFlush.add(bufferedKey);\n } else {\n const { category: bufferedCategory, context: bufferedContext } =\n parseBufferKey(bufferedKey);\n const { context: triggerContext } = parseBufferKey(bufferKey);\n\n // Check context match\n let contextMatches = true;\n if (isolateByContext) {\n contextMatches = bufferedContext === triggerContext;\n }\n\n // Check category match\n let categoryMatches = false;\n if (!isolateByCategory) {\n // No category isolation, so all categories match if context matches\n categoryMatches = contextMatches;\n } else if (shouldFlushBuffer) {\n try {\n categoryMatches = shouldFlushBuffer(\n record.category,\n bufferedCategory,\n );\n } catch {\n // Ignore errors from custom matcher\n }\n } else {\n // Same category only\n categoryMatches = getCategoryKey(record.category) ===\n getCategoryKey(bufferedCategory);\n }\n\n // Both must match for the buffer to be flushed\n if (contextMatches && categoryMatches) {\n keysToFlush.add(bufferedKey);\n }\n }\n }\n\n // Flush matching buffers\n const allRecordsToFlush: LogRecord[] = [];\n for (const key of keysToFlush) {\n const metadata = buffers.get(key);\n if (metadata) {\n allRecordsToFlush.push(...metadata.buffer);\n buffers.delete(key);\n triggered.add(key);\n }\n }\n\n // Sort by timestamp to maintain chronological order\n allRecordsToFlush.sort((a, b) => a.timestamp - b.timestamp);\n\n // Flush all records\n for (const bufferedRecord of allRecordsToFlush) {\n sink(bufferedRecord);\n }\n\n // Mark trigger buffer as triggered and send trigger record\n triggered.add(bufferKey);\n sink(record);\n } else {\n // Buffer the record\n const now = Date.now();\n let metadata = buffers.get(bufferKey);\n if (!metadata) {\n // Apply LRU eviction if adding new buffer would exceed capacity\n if (hasLru && buffers.size >= maxContexts!) {\n // Calculate how many buffers to evict to make room for the new one\n const numToEvict = buffers.size - maxContexts! + 1;\n evictLruBuffers(buffers, numToEvict);\n }\n\n metadata = {\n buffer: [],\n lastAccess: now,\n };\n buffers.set(bufferKey, metadata);\n } else {\n // Update last access time for LRU\n metadata.lastAccess = now;\n }\n\n metadata.buffer.push(record);\n\n // Enforce max buffer size per buffer\n while (metadata.buffer.length > maxBufferSize) {\n metadata.buffer.shift();\n }\n }\n };\n\n // Add disposal functionality to clean up timer\n if (cleanupTimer !== null) {\n (fingersCrossedSink as Sink & Disposable)[Symbol.dispose] = () => {\n if (cleanupTimer !== null) {\n clearInterval(cleanupTimer);\n cleanupTimer = null;\n }\n };\n }\n\n return fingersCrossedSink;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AA8CA,SAAgB,WAAWA,MAAYC,QAA0B;CAC/D,MAAM,aAAa,SAAS,OAAO;AACnC,QAAO,CAACC,WAAsB;AAC5B,MAAI,WAAW,OAAO,CAAE,MAAK,OAAO;CACrC;AACF;;;;;;;;;;;;;;;;;;;;;;;;;AA6ED,SAAgB,cACdC,QACAC,UAA6B,CAAE,GACP;CACxB,MAAM,YAAY,QAAQ,aAAa;CACvC,MAAM,UAAU,QAAQ,WAAW,IAAI;CACvC,MAAM,SAAS,OAAO,WAAW;AAEjC,MAAK,QAAQ,aAAa;EACxB,IAAI,cAAc,QAAQ,SAAS;EACnC,MAAMC,OAA+B,CAACH,WAAsB;GAC1D,MAAM,QAAQ,QAAQ,OAAO,UAAU,OAAO,CAAC;AAC/C,iBAAc,YACX,KAAK,MAAM,OAAO,MAAM,CACxB,KAAK,MAAM,OAAO,MAAM,MAAM,CAAC;EACnC;AACD,OAAK,OAAO,gBAAgB,YAAY;AACtC,SAAM;AACN,SAAM,OAAO,OAAO;EACrB;AACD,SAAO;CACR;CAGD,MAAM,oBAAoB,QAAQ,gBAAgB,OAC9C,CAAE,IACF,QAAQ;CACZ,MAAM,aAAa,kBAAkB,cAAc;CACnD,MAAM,gBAAgB,kBAAkB,iBAAiB;CAEzD,MAAMI,SAAsB,CAAE;CAC9B,IAAIC,aAAoD;CACxD,IAAI,WAAW;CACf,IAAIC,cAAoC;CACxC,MAAM,gBAAgB,aAAa;CAEnC,eAAe,QAAuB;AACpC,MAAI,OAAO,WAAW,EAAG;EAEzB,MAAM,UAAU,OAAO,OAAO,EAAE;AAChC,OAAK,MAAM,UAAU,QACnB,KAAI;GACF,MAAM,QAAQ,QAAQ,OAAO,UAAU,OAAO,CAAC;AAC/C,SAAM,OAAO;AACb,SAAM,OAAO,MAAM,MAAM;EAC1B,QAAO,CAEP;CAEJ;CAED,SAAS,gBAAsB;AAC7B,MAAI,YAAa;AAEjB,gBAAc,OAAO,CAAC,QAAQ,MAAM;AAClC,iBAAc;EACf,EAAC;CACH;CAED,SAAS,kBAAwB;AAC/B,MAAI,eAAe,QAAQ,SAAU;AAErC,eAAa,YAAY,MAAM;AAC7B,kBAAe;EAChB,GAAE,cAAc;CAClB;CAED,MAAMC,kBAA0C,CAACP,WAAsB;AACrE,MAAI,SAAU;AAGd,MAAI,OAAO,UAAU,cACnB,QAAO,OAAO;AAGhB,SAAO,KAAK,OAAO;AAEnB,MAAI,OAAO,UAAU,WACnB,gBAAe;WACN,eAAe,KACxB,kBAAiB;CAEpB;AAED,iBAAgB,OAAO,gBAAgB,YAAY;AACjD,aAAW;AACX,MAAI,eAAe,MAAM;AACvB,iBAAc,WAAW;AACzB,gBAAa;EACd;AACD,QAAM,OAAO;AACb,MAAI;AACF,SAAM,OAAO,OAAO;EACrB,QAAO,CAEP;CACF;AAED,QAAO;AACR;;;;;;;;AAgFD,SAAgB,eACdQ,UAA8B,CAAE,GACJ;CAC5B,MAAM,YAAY,QAAQ,aAAa;CACvC,MAAMC,WAA4C;EAChD,OAAO;EACP,OAAO;EACP,MAAM;EACN,SAAS;EACT,OAAO;EACP,OAAO;EACP,GAAI,QAAQ,YAAY,CAAE;CAC3B;CACD,MAAM,UAAU,QAAQ,WAAW,WAAW;CAE9C,MAAM,WAAW,CAACT,WAAsB;EACtC,MAAM,OAAO,UAAU,OAAO;EAC9B,MAAM,SAAS,SAAS,OAAO;AAC/B,MAAI,kBACF,OAAM,IAAI,WAAW,qBAAqB,OAAO,MAAM;AAEzD,aAAW,SAAS,UAAU;GAC5B,MAAM,MAAM,KAAK,QAAQ,UAAU,GAAG;AACtC,WAAQ,QAAQ,IAAI;EACrB,MACC,SAAQ,QAAQ,GAAG,KAAK;CAE3B;AAED,MAAK,QAAQ,YACX,QAAO;CAIT,MAAM,oBAAoB,QAAQ,gBAAgB,OAC9C,CAAE,IACF,QAAQ;CACZ,MAAM,aAAa,kBAAkB,cAAc;CACnD,MAAM,gBAAgB,kBAAkB,iBAAiB;CAEzD,MAAMI,SAAsB,CAAE;CAC9B,IAAIC,aAAoD;CACxD,IAAI,WAAW;CACf,IAAI,iBAAiB;CACrB,MAAM,gBAAgB,aAAa;CAEnC,SAAS,QAAc;AACrB,MAAI,OAAO,WAAW,EAAG;EAEzB,MAAM,UAAU,OAAO,OAAO,EAAE;AAChC,OAAK,MAAM,UAAU,QACnB,KAAI;AACF,YAAS,OAAO;EACjB,QAAO,CAEP;CAEJ;CAED,SAAS,gBAAsB;AAC7B,MAAI,eAAgB;AAEpB,mBAAiB;AACjB,aAAW,MAAM;AACf,oBAAiB;AACjB,UAAO;EACR,GAAE,EAAE;CACN;CAED,SAAS,kBAAwB;AAC/B,MAAI,eAAe,QAAQ,SAAU;AAErC,eAAa,YAAY,MAAM;AAC7B,UAAO;EACR,GAAE,cAAc;CAClB;CAED,MAAMK,kBAAqC,CAACV,WAAsB;AAChE,MAAI,SAAU;AAGd,MAAI,OAAO,UAAU,cACnB,QAAO,OAAO;AAGhB,SAAO,KAAK,OAAO;AAEnB,MAAI,OAAO,UAAU,WACnB,gBAAe;WACN,eAAe,KACxB,kBAAiB;CAEpB;AAED,iBAAgB,OAAO,WAAW,MAAM;AACtC,aAAW;AACX,MAAI,eAAe,MAAM;AACvB,iBAAc,WAAW;AACzB,gBAAa;EACd;AACD,SAAO;CACR;AAED,QAAO;AACR;;;;;;;;;;;;;;;;;;;;;AAsBD,SAAgB,cAAcW,WAA8C;CAC1E,IAAI,cAAc,QAAQ,SAAS;CACnC,MAAMR,OAA+B,CAACH,WAAsB;AAC1D,gBAAc,YACX,KAAK,MAAM,UAAU,OAAO,CAAC,CAC7B,MAAM,MAAM,CAEZ,EAAC;CACL;AACD,MAAK,OAAO,gBAAgB,YAAY;AACtC,QAAM;CACP;AACD,QAAO;AACR;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuLD,SAAgB,eACdF,MACAc,UAAiC,CAAE,GACP;CAC5B,MAAM,eAAe,QAAQ,gBAAgB;CAC7C,MAAM,gBAAgB,KAAK,IAAI,GAAG,QAAQ,iBAAiB,IAAK;CAChE,MAAM,oBAAoB,QAAQ;CAClC,MAAM,mBAAmB,QAAQ;CAGjC,MAAM,cAAc,kBAAkB;CACtC,MAAM,oBAAoB,kBAAkB,qBAAqB;CACjE,MAAM,cAAc,kBAAkB;CACtC,MAAM,SAAS,eAAe,QAAQ,cAAc;CACpD,MAAM,SAAS,eAAe,QAAQ,cAAc;AAGpD,KAAI;AACF,kBAAgB,SAAS,aAAa;CACvC,SAAQ,OAAO;AACd,QAAM,IAAI,WACP,wBAAwB,KAAK,UAAU,aAAa,CAAC,IACpD,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,CACvD;CAEJ;CAGD,SAAS,aACPC,QACAC,OACS;AACT,MAAI,OAAO,WAAW,KAAK,MAAM,WAAW,EAAG,QAAO;AACtD,MAAI,OAAO,SAAS,MAAM,OAAQ,QAAO;AACzC,SAAO,OAAO,MAAM,CAAC,GAAG,MAAM,MAAM,MAAM,GAAG;CAC9C;CAED,SAAS,WACPA,OACAD,QACS;AACT,MAAI,MAAM,WAAW,KAAK,OAAO,WAAW,EAAG,QAAO;AACtD,MAAI,MAAM,SAAS,OAAO,OAAQ,QAAO;AACzC,SAAO,OAAO,MAAM,CAAC,GAAG,MAAM,MAAM,MAAM,GAAG;CAC9C;CAGD,IAAIE,oBAKO;AAEX,KAAI,kBACF,YAAW,sBAAsB,WAC/B,qBAAoB;KAEpB,SAAQ,mBAAR;EACE,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,aAAa,SAAS,SAAS;AACjC;EACF,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,WAAW,SAAS,SAAS;AAC/B;EACF,KAAK;AACH,uBAAoB,CAAC,SAAS,aAC5B,aAAa,SAAS,SAAS,IAAI,WAAW,SAAS,SAAS;AAClE;CACH;CAKL,SAAS,eAAeC,UAAqC;AAC3D,SAAO,KAAK,UAAU,SAAS;CAChC;CAED,SAAS,iBAAiBC,KAAuB;AAC/C,SAAO,KAAK,MAAM,IAAI;CACvB;CAGD,SAAS,cAAcC,YAA6C;AAClE,OAAK,oBAAoB,iBAAiB,KAAK,WAAW,EACxD,QAAO;EAET,MAAMC,gBAAyC,CAAE;AACjD,OAAK,MAAM,OAAO,iBAAiB,KACjC,KAAI,OAAO,WACT,eAAc,OAAO,WAAW;AAGpC,SAAO,KAAK,UAAU,cAAc;CACrC;CAGD,SAAS,aACPH,UACAE,YACQ;EACR,MAAM,cAAc,eAAe,SAAS;AAC5C,OAAK,iBACH,QAAO;EAET,MAAM,aAAa,cAAc,WAAW;AAC5C,UAAQ,EAAE,YAAY,GAAG,WAAW;CACrC;CAGD,SAAS,eAAeD,KAGtB;AACA,OAAK,iBACH,QAAO;GAAE,UAAU,iBAAiB,IAAI;GAAE,SAAS;EAAI;EAKzD,MAAM,iBAAiB,IAAI,QAAQ,KAAK;AACxC,MAAI,mBAAmB,GAErB,QAAO;GAAE,UAAU,iBAAiB,IAAI;GAAE,SAAS;EAAI;EAEzD,MAAM,eAAe,IAAI,UAAU,GAAG,iBAAiB,EAAE;EACzD,MAAM,cAAc,IAAI,UAAU,iBAAiB,EAAE;AACrD,SAAO;GAAE,UAAU,iBAAiB,aAAa;GAAE,SAAS;EAAa;CAC1E;CAGD,SAAS,sBAAsBG,SAA4C;AACzE,OAAK,OAAQ;EAEb,MAAM,MAAM,KAAK,KAAK;EACtB,MAAMC,cAAwB,CAAE;AAEhC,OAAK,MAAM,CAAC,KAAK,SAAS,IAAI,SAAS;AACrC,OAAI,SAAS,OAAO,WAAW,EAAG;GAGlC,MAAM,sBACJ,SAAS,OAAO,SAAS,OAAO,SAAS,GAAG;AAC9C,OAAI,MAAM,sBAAsB,YAC9B,aAAY,KAAK,IAAI;EAExB;AAGD,OAAK,MAAM,OAAO,YAChB,SAAQ,OAAO,IAAI;CAEtB;CAGD,SAAS,gBACPD,SACAE,YACM;AACN,OAAK,OAAQ;EAGb,MAAM,UAAU,cAAc,KAAK,IAAI,GAAG,QAAQ,OAAO,YAAa;AACtE,MAAI,WAAW,EAAG;EAGlB,MAAM,gBAAgB,MAAM,KAAK,QAAQ,SAAS,CAAC,CAChD,KAAK,CAAC,GAAG,EAAE,EAAE,GAAG,EAAE,KAAK,EAAE,aAAa,EAAE,WAAW;AAGtD,OAAK,IAAI,IAAI,GAAG,IAAI,SAAS,KAAK;GAChC,MAAM,CAAC,IAAI,GAAG,cAAc;AAC5B,WAAQ,OAAO,IAAI;EACpB;CACF;AAGD,MAAK,sBAAsB,kBAAkB;EAE3C,MAAMlB,SAAsB,CAAE;EAC9B,IAAI,YAAY;AAEhB,SAAO,CAACJ,WAAsB;AAC5B,OAAI,WAAW;AAEb,SAAK,OAAO;AACZ;GACD;AAGD,OAAI,gBAAgB,OAAO,OAAO,aAAa,IAAI,GAAG;AACpD,gBAAY;AAGZ,SAAK,MAAM,kBAAkB,OAC3B,MAAK,eAAe;AAEtB,WAAO,SAAS;AAGhB,SAAK,OAAO;GACb,OAAM;AAEL,WAAO,KAAK,OAAO;AAGnB,WAAO,OAAO,SAAS,cACrB,QAAO,OAAO;GAEjB;EACF;CACF,OAAM;EAEL,MAAM,0BAAU,IAAI;EACpB,MAAM,4BAAY,IAAI;EAGtB,IAAIuB,eAAsD;AAC1D,MAAI,OACF,gBAAe,YAAY,MAAM;AAC/B,yBAAsB,QAAQ;EAC/B,GAAE,kBAAkB;EAGvB,MAAM,qBAAqB,CAACvB,WAAsB;GAChD,MAAM,YAAY,aAAa,OAAO,UAAU,OAAO,WAAW;AAGlE,OAAI,UAAU,IAAI,UAAU,EAAE;AAC5B,SAAK,OAAO;AACZ;GACD;AAGD,OAAI,gBAAgB,OAAO,OAAO,aAAa,IAAI,GAAG;IAEpD,MAAM,8BAAc,IAAI;AAExB,SAAK,MAAM,CAAC,YAAY,IAAI,QAC1B,KAAI,gBAAgB,UAClB,aAAY,IAAI,YAAY;SACvB;KACL,MAAM,EAAE,UAAU,kBAAkB,SAAS,iBAAiB,GAC5D,eAAe,YAAY;KAC7B,MAAM,EAAE,SAAS,gBAAgB,GAAG,eAAe,UAAU;KAG7D,IAAI,iBAAiB;AACrB,SAAI,iBACF,kBAAiB,oBAAoB;KAIvC,IAAI,kBAAkB;AACtB,UAAK,kBAEH,mBAAkB;cACT,kBACT,KAAI;AACF,wBAAkB,kBAChB,OAAO,UACP,iBACD;KACF,QAAO,CAEP;SAGD,mBAAkB,eAAe,OAAO,SAAS,KAC/C,eAAe,iBAAiB;AAIpC,SAAI,kBAAkB,gBACpB,aAAY,IAAI,YAAY;IAE/B;IAIH,MAAMwB,oBAAiC,CAAE;AACzC,SAAK,MAAM,OAAO,aAAa;KAC7B,MAAM,WAAW,QAAQ,IAAI,IAAI;AACjC,SAAI,UAAU;AACZ,wBAAkB,KAAK,GAAG,SAAS,OAAO;AAC1C,cAAQ,OAAO,IAAI;AACnB,gBAAU,IAAI,IAAI;KACnB;IACF;AAGD,sBAAkB,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,UAAU;AAG3D,SAAK,MAAM,kBAAkB,kBAC3B,MAAK,eAAe;AAItB,cAAU,IAAI,UAAU;AACxB,SAAK,OAAO;GACb,OAAM;IAEL,MAAM,MAAM,KAAK,KAAK;IACtB,IAAI,WAAW,QAAQ,IAAI,UAAU;AACrC,SAAK,UAAU;AAEb,SAAI,UAAU,QAAQ,QAAQ,aAAc;MAE1C,MAAM,aAAa,QAAQ,OAAO,cAAe;AACjD,sBAAgB,SAAS,WAAW;KACrC;AAED,gBAAW;MACT,QAAQ,CAAE;MACV,YAAY;KACb;AACD,aAAQ,IAAI,WAAW,SAAS;IACjC,MAEC,UAAS,aAAa;AAGxB,aAAS,OAAO,KAAK,OAAO;AAG5B,WAAO,SAAS,OAAO,SAAS,cAC9B,UAAS,OAAO,OAAO;GAE1B;EACF;AAGD,MAAI,iBAAiB,KACnB,CAAC,mBAAyC,OAAO,WAAW,MAAM;AAChE,OAAI,iBAAiB,MAAM;AACzB,kBAAc,aAAa;AAC3B,mBAAe;GAChB;EACF;AAGH,SAAO;CACR;AACF"}
|
package/package.json
CHANGED
package/src/sink.test.ts
CHANGED
|
@@ -2207,3 +2207,384 @@ test("fingersCrossed() - context isolation after trigger", () => {
|
|
|
2207
2207
|
assertEquals(buffer[2], req2Debug);
|
|
2208
2208
|
assertEquals(buffer[3], req2Error);
|
|
2209
2209
|
});
|
|
2210
|
+
|
|
2211
|
+
test("fingersCrossed() - TTL-based buffer cleanup", async () => {
|
|
2212
|
+
const buffer: LogRecord[] = [];
|
|
2213
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2214
|
+
isolateByContext: {
|
|
2215
|
+
keys: ["requestId"],
|
|
2216
|
+
bufferTtlMs: 100, // 100ms TTL
|
|
2217
|
+
cleanupIntervalMs: 50, // cleanup every 50ms
|
|
2218
|
+
},
|
|
2219
|
+
}) as Sink & Disposable;
|
|
2220
|
+
|
|
2221
|
+
try {
|
|
2222
|
+
// Create records with different request IDs
|
|
2223
|
+
const req1Record: LogRecord = {
|
|
2224
|
+
...debug,
|
|
2225
|
+
properties: { requestId: "req-1" },
|
|
2226
|
+
timestamp: Date.now(),
|
|
2227
|
+
};
|
|
2228
|
+
const req2Record: LogRecord = {
|
|
2229
|
+
...debug,
|
|
2230
|
+
properties: { requestId: "req-2" },
|
|
2231
|
+
timestamp: Date.now(),
|
|
2232
|
+
};
|
|
2233
|
+
|
|
2234
|
+
// Add records to buffers
|
|
2235
|
+
sink(req1Record);
|
|
2236
|
+
sink(req2Record);
|
|
2237
|
+
|
|
2238
|
+
// Wait for TTL to expire and cleanup to run
|
|
2239
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
2240
|
+
|
|
2241
|
+
// Add a new record after TTL expiry
|
|
2242
|
+
const req3Record: LogRecord = {
|
|
2243
|
+
...debug,
|
|
2244
|
+
properties: { requestId: "req-3" },
|
|
2245
|
+
timestamp: Date.now(),
|
|
2246
|
+
};
|
|
2247
|
+
sink(req3Record);
|
|
2248
|
+
|
|
2249
|
+
// Trigger an error for req-1 (should not flush expired req-1 buffer)
|
|
2250
|
+
const req1Error: LogRecord = {
|
|
2251
|
+
...error,
|
|
2252
|
+
properties: { requestId: "req-1" },
|
|
2253
|
+
timestamp: Date.now(),
|
|
2254
|
+
};
|
|
2255
|
+
sink(req1Error);
|
|
2256
|
+
|
|
2257
|
+
// Should only have req-1 error (req-1 debug was cleaned up by TTL)
|
|
2258
|
+
assertEquals(buffer.length, 1);
|
|
2259
|
+
assertEquals(buffer[0], req1Error);
|
|
2260
|
+
|
|
2261
|
+
// Trigger an error for req-3 (should flush req-3 buffer)
|
|
2262
|
+
buffer.length = 0; // Clear buffer
|
|
2263
|
+
const req3Error: LogRecord = {
|
|
2264
|
+
...error,
|
|
2265
|
+
properties: { requestId: "req-3" },
|
|
2266
|
+
timestamp: Date.now(),
|
|
2267
|
+
};
|
|
2268
|
+
sink(req3Error);
|
|
2269
|
+
|
|
2270
|
+
// Should have both req-3 debug and error
|
|
2271
|
+
assertEquals(buffer.length, 2);
|
|
2272
|
+
assertEquals(buffer[0], req3Record);
|
|
2273
|
+
assertEquals(buffer[1], req3Error);
|
|
2274
|
+
} finally {
|
|
2275
|
+
// Clean up timer
|
|
2276
|
+
sink[Symbol.dispose]();
|
|
2277
|
+
}
|
|
2278
|
+
});
|
|
2279
|
+
|
|
2280
|
+
test("fingersCrossed() - TTL disabled when bufferTtlMs is zero", () => {
|
|
2281
|
+
const buffer: LogRecord[] = [];
|
|
2282
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2283
|
+
isolateByContext: {
|
|
2284
|
+
keys: ["requestId"],
|
|
2285
|
+
bufferTtlMs: 0, // TTL disabled
|
|
2286
|
+
},
|
|
2287
|
+
});
|
|
2288
|
+
|
|
2289
|
+
// Should return a regular sink without disposal functionality
|
|
2290
|
+
assertEquals("dispose" in sink, false);
|
|
2291
|
+
|
|
2292
|
+
// Add a record
|
|
2293
|
+
const record: LogRecord = {
|
|
2294
|
+
...debug,
|
|
2295
|
+
properties: { requestId: "req-1" },
|
|
2296
|
+
};
|
|
2297
|
+
sink(record);
|
|
2298
|
+
|
|
2299
|
+
// Trigger should work normally
|
|
2300
|
+
const errorRecord: LogRecord = {
|
|
2301
|
+
...error,
|
|
2302
|
+
properties: { requestId: "req-1" },
|
|
2303
|
+
};
|
|
2304
|
+
sink(errorRecord);
|
|
2305
|
+
|
|
2306
|
+
assertEquals(buffer.length, 2);
|
|
2307
|
+
assertEquals(buffer[0], record);
|
|
2308
|
+
assertEquals(buffer[1], errorRecord);
|
|
2309
|
+
});
|
|
2310
|
+
|
|
2311
|
+
test("fingersCrossed() - TTL disabled when bufferTtlMs is undefined", () => {
|
|
2312
|
+
const buffer: LogRecord[] = [];
|
|
2313
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2314
|
+
isolateByContext: {
|
|
2315
|
+
keys: ["requestId"],
|
|
2316
|
+
// bufferTtlMs not specified
|
|
2317
|
+
},
|
|
2318
|
+
});
|
|
2319
|
+
|
|
2320
|
+
// Should return a regular sink without disposal functionality
|
|
2321
|
+
assertEquals("dispose" in sink, false);
|
|
2322
|
+
});
|
|
2323
|
+
|
|
2324
|
+
test("fingersCrossed() - LRU-based buffer eviction", () => {
|
|
2325
|
+
const buffer: LogRecord[] = [];
|
|
2326
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2327
|
+
isolateByContext: {
|
|
2328
|
+
keys: ["requestId"],
|
|
2329
|
+
maxContexts: 2, // Only keep 2 context buffers
|
|
2330
|
+
},
|
|
2331
|
+
});
|
|
2332
|
+
|
|
2333
|
+
// Step 1: Add req-1
|
|
2334
|
+
const req1Record: LogRecord = {
|
|
2335
|
+
...debug,
|
|
2336
|
+
properties: { requestId: "req-1" },
|
|
2337
|
+
};
|
|
2338
|
+
sink(req1Record);
|
|
2339
|
+
|
|
2340
|
+
// Step 2: Add req-2
|
|
2341
|
+
const req2Record: LogRecord = {
|
|
2342
|
+
...debug,
|
|
2343
|
+
properties: { requestId: "req-2" },
|
|
2344
|
+
};
|
|
2345
|
+
sink(req2Record);
|
|
2346
|
+
|
|
2347
|
+
// Step 3: Add req-3 (should evict req-1)
|
|
2348
|
+
const req3Record: LogRecord = {
|
|
2349
|
+
...debug,
|
|
2350
|
+
properties: { requestId: "req-3" },
|
|
2351
|
+
};
|
|
2352
|
+
sink(req3Record);
|
|
2353
|
+
|
|
2354
|
+
// Test req-1 was evicted by triggering error
|
|
2355
|
+
const req1Error: LogRecord = {
|
|
2356
|
+
...error,
|
|
2357
|
+
properties: { requestId: "req-1" },
|
|
2358
|
+
};
|
|
2359
|
+
sink(req1Error);
|
|
2360
|
+
|
|
2361
|
+
// If req-1 was evicted, should only have error (length=1)
|
|
2362
|
+
// If req-1 wasn't evicted, should have debug+error (length=2)
|
|
2363
|
+
assertEquals(buffer.length, 1, "req-1 should have been evicted by LRU");
|
|
2364
|
+
assertEquals(buffer[0], req1Error);
|
|
2365
|
+
});
|
|
2366
|
+
|
|
2367
|
+
test("fingersCrossed() - LRU eviction order with access updates", async () => {
|
|
2368
|
+
const buffer: LogRecord[] = [];
|
|
2369
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2370
|
+
isolateByContext: {
|
|
2371
|
+
keys: ["requestId"],
|
|
2372
|
+
maxContexts: 2,
|
|
2373
|
+
},
|
|
2374
|
+
});
|
|
2375
|
+
|
|
2376
|
+
// Add two contexts with time gap to ensure different timestamps
|
|
2377
|
+
const req1Record: LogRecord = {
|
|
2378
|
+
...debug,
|
|
2379
|
+
properties: { requestId: "req-1" },
|
|
2380
|
+
};
|
|
2381
|
+
sink(req1Record); // req-1 is oldest
|
|
2382
|
+
|
|
2383
|
+
// Small delay to ensure different lastAccess times
|
|
2384
|
+
await new Promise((resolve) => setTimeout(resolve, 1));
|
|
2385
|
+
|
|
2386
|
+
const req2Record: LogRecord = {
|
|
2387
|
+
...debug,
|
|
2388
|
+
properties: { requestId: "req-2" },
|
|
2389
|
+
};
|
|
2390
|
+
sink(req2Record); // req-2 is newest
|
|
2391
|
+
|
|
2392
|
+
// Access req-1 again after another delay to make it more recent
|
|
2393
|
+
await new Promise((resolve) => setTimeout(resolve, 1));
|
|
2394
|
+
|
|
2395
|
+
const req1Second: LogRecord = {
|
|
2396
|
+
...debug,
|
|
2397
|
+
properties: { requestId: "req-1" },
|
|
2398
|
+
};
|
|
2399
|
+
sink(req1Second); // Now req-2 is oldest, req-1 is newest
|
|
2400
|
+
|
|
2401
|
+
// Add third context - should evict req-2 (now the oldest)
|
|
2402
|
+
const req3Record: LogRecord = {
|
|
2403
|
+
...debug,
|
|
2404
|
+
properties: { requestId: "req-3" },
|
|
2405
|
+
};
|
|
2406
|
+
sink(req3Record);
|
|
2407
|
+
|
|
2408
|
+
// Verify req-2 was evicted
|
|
2409
|
+
const req2Error: LogRecord = {
|
|
2410
|
+
...error,
|
|
2411
|
+
properties: { requestId: "req-2" },
|
|
2412
|
+
};
|
|
2413
|
+
sink(req2Error);
|
|
2414
|
+
|
|
2415
|
+
// Should only have error record (no buffered records)
|
|
2416
|
+
assertEquals(buffer.length, 1, "req-2 should have been evicted");
|
|
2417
|
+
assertEquals(buffer[0], req2Error);
|
|
2418
|
+
});
|
|
2419
|
+
|
|
2420
|
+
test("fingersCrossed() - LRU disabled when maxContexts is zero", () => {
|
|
2421
|
+
const buffer: LogRecord[] = [];
|
|
2422
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2423
|
+
isolateByContext: {
|
|
2424
|
+
keys: ["requestId"],
|
|
2425
|
+
maxContexts: 0, // LRU disabled
|
|
2426
|
+
},
|
|
2427
|
+
});
|
|
2428
|
+
|
|
2429
|
+
// Create many contexts - should not be limited
|
|
2430
|
+
for (let i = 0; i < 100; i++) {
|
|
2431
|
+
const record: LogRecord = {
|
|
2432
|
+
...debug,
|
|
2433
|
+
properties: { requestId: `req-${i}` },
|
|
2434
|
+
};
|
|
2435
|
+
sink(record);
|
|
2436
|
+
}
|
|
2437
|
+
|
|
2438
|
+
// Trigger the last context
|
|
2439
|
+
const errorRecord: LogRecord = {
|
|
2440
|
+
...error,
|
|
2441
|
+
properties: { requestId: "req-99" },
|
|
2442
|
+
};
|
|
2443
|
+
sink(errorRecord);
|
|
2444
|
+
|
|
2445
|
+
// Should have both debug and error records
|
|
2446
|
+
assertEquals(buffer.length, 2);
|
|
2447
|
+
assertEquals(buffer[0].properties?.requestId, "req-99");
|
|
2448
|
+
assertEquals(buffer[1], errorRecord);
|
|
2449
|
+
});
|
|
2450
|
+
|
|
2451
|
+
test("fingersCrossed() - LRU disabled when maxContexts is undefined", () => {
|
|
2452
|
+
const buffer: LogRecord[] = [];
|
|
2453
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2454
|
+
isolateByContext: {
|
|
2455
|
+
keys: ["requestId"],
|
|
2456
|
+
// maxContexts not specified
|
|
2457
|
+
},
|
|
2458
|
+
});
|
|
2459
|
+
|
|
2460
|
+
// Should work normally without LRU limits
|
|
2461
|
+
const record: LogRecord = {
|
|
2462
|
+
...debug,
|
|
2463
|
+
properties: { requestId: "req-1" },
|
|
2464
|
+
};
|
|
2465
|
+
sink(record);
|
|
2466
|
+
|
|
2467
|
+
const errorRecord: LogRecord = {
|
|
2468
|
+
...error,
|
|
2469
|
+
properties: { requestId: "req-1" },
|
|
2470
|
+
};
|
|
2471
|
+
sink(errorRecord);
|
|
2472
|
+
|
|
2473
|
+
assertEquals(buffer.length, 2);
|
|
2474
|
+
assertEquals(buffer[0], record);
|
|
2475
|
+
assertEquals(buffer[1], errorRecord);
|
|
2476
|
+
});
|
|
2477
|
+
|
|
2478
|
+
test("fingersCrossed() - Combined TTL and LRU functionality", async () => {
|
|
2479
|
+
const buffer: LogRecord[] = [];
|
|
2480
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2481
|
+
isolateByContext: {
|
|
2482
|
+
keys: ["requestId"],
|
|
2483
|
+
maxContexts: 2, // LRU limit
|
|
2484
|
+
bufferTtlMs: 100, // TTL limit
|
|
2485
|
+
cleanupIntervalMs: 50, // cleanup interval
|
|
2486
|
+
},
|
|
2487
|
+
}) as Sink & Disposable;
|
|
2488
|
+
|
|
2489
|
+
try {
|
|
2490
|
+
// Create records for multiple contexts
|
|
2491
|
+
const req1Record: LogRecord = {
|
|
2492
|
+
...debug,
|
|
2493
|
+
properties: { requestId: "req-1" },
|
|
2494
|
+
timestamp: Date.now(),
|
|
2495
|
+
};
|
|
2496
|
+
const req2Record: LogRecord = {
|
|
2497
|
+
...debug,
|
|
2498
|
+
properties: { requestId: "req-2" },
|
|
2499
|
+
timestamp: Date.now(),
|
|
2500
|
+
};
|
|
2501
|
+
|
|
2502
|
+
// Add two contexts (within LRU limit)
|
|
2503
|
+
sink(req1Record);
|
|
2504
|
+
sink(req2Record);
|
|
2505
|
+
|
|
2506
|
+
// Wait for TTL to expire
|
|
2507
|
+
await new Promise((resolve) => setTimeout(resolve, 150));
|
|
2508
|
+
|
|
2509
|
+
// Add a third context (should work because TTL cleaned up old ones)
|
|
2510
|
+
const req3Record: LogRecord = {
|
|
2511
|
+
...debug,
|
|
2512
|
+
properties: { requestId: "req-3" },
|
|
2513
|
+
timestamp: Date.now(),
|
|
2514
|
+
};
|
|
2515
|
+
sink(req3Record);
|
|
2516
|
+
|
|
2517
|
+
// Trigger req-1 (should not find buffered records due to TTL expiry)
|
|
2518
|
+
const req1Error: LogRecord = {
|
|
2519
|
+
...error,
|
|
2520
|
+
properties: { requestId: "req-1" },
|
|
2521
|
+
timestamp: Date.now(),
|
|
2522
|
+
};
|
|
2523
|
+
sink(req1Error);
|
|
2524
|
+
|
|
2525
|
+
// Should only have the error record
|
|
2526
|
+
assertEquals(buffer.length, 1);
|
|
2527
|
+
assertEquals(buffer[0], req1Error);
|
|
2528
|
+
|
|
2529
|
+
// Clear buffer and trigger req-3 (should have recent record)
|
|
2530
|
+
buffer.length = 0;
|
|
2531
|
+
const req3Error: LogRecord = {
|
|
2532
|
+
...error,
|
|
2533
|
+
properties: { requestId: "req-3" },
|
|
2534
|
+
timestamp: Date.now(),
|
|
2535
|
+
};
|
|
2536
|
+
sink(req3Error);
|
|
2537
|
+
|
|
2538
|
+
// Should have both debug and error records
|
|
2539
|
+
assertEquals(buffer.length, 2);
|
|
2540
|
+
assertEquals(buffer[0], req3Record);
|
|
2541
|
+
assertEquals(buffer[1], req3Error);
|
|
2542
|
+
} finally {
|
|
2543
|
+
sink[Symbol.dispose]();
|
|
2544
|
+
}
|
|
2545
|
+
});
|
|
2546
|
+
|
|
2547
|
+
test("fingersCrossed() - LRU priority over TTL for active contexts", () => {
|
|
2548
|
+
const buffer: LogRecord[] = [];
|
|
2549
|
+
const sink = fingersCrossed(buffer.push.bind(buffer), {
|
|
2550
|
+
isolateByContext: {
|
|
2551
|
+
keys: ["requestId"],
|
|
2552
|
+
maxContexts: 2,
|
|
2553
|
+
bufferTtlMs: 10000, // Long TTL (10 seconds)
|
|
2554
|
+
},
|
|
2555
|
+
}) as Sink & Disposable;
|
|
2556
|
+
|
|
2557
|
+
try {
|
|
2558
|
+
// Create 3 contexts quickly (before TTL expires)
|
|
2559
|
+
const req1Record: LogRecord = {
|
|
2560
|
+
...debug,
|
|
2561
|
+
properties: { requestId: "req-1" },
|
|
2562
|
+
};
|
|
2563
|
+
const req2Record: LogRecord = {
|
|
2564
|
+
...debug,
|
|
2565
|
+
properties: { requestId: "req-2" },
|
|
2566
|
+
};
|
|
2567
|
+
const req3Record: LogRecord = {
|
|
2568
|
+
...debug,
|
|
2569
|
+
properties: { requestId: "req-3" },
|
|
2570
|
+
};
|
|
2571
|
+
|
|
2572
|
+
sink(req1Record); // LRU position: oldest
|
|
2573
|
+
sink(req2Record); // LRU position: middle
|
|
2574
|
+
sink(req3Record); // LRU position: newest, should evict req-1 due to LRU
|
|
2575
|
+
|
|
2576
|
+
// Now trigger req-2 (should have buffered record)
|
|
2577
|
+
const req2Error: LogRecord = {
|
|
2578
|
+
...error,
|
|
2579
|
+
properties: { requestId: "req-2" },
|
|
2580
|
+
};
|
|
2581
|
+
sink(req2Error);
|
|
2582
|
+
|
|
2583
|
+
// Should have both debug and error records
|
|
2584
|
+
assertEquals(buffer.length, 2);
|
|
2585
|
+
assertEquals(buffer[0], req2Record);
|
|
2586
|
+
assertEquals(buffer[1], req2Error);
|
|
2587
|
+
} finally {
|
|
2588
|
+
sink[Symbol.dispose]();
|
|
2589
|
+
}
|
|
2590
|
+
});
|
package/src/sink.ts
CHANGED
|
@@ -513,6 +513,17 @@ export interface FingersCrossedOptions {
|
|
|
513
513
|
* })
|
|
514
514
|
* ```
|
|
515
515
|
*
|
|
516
|
+
* @example With TTL-based buffer cleanup
|
|
517
|
+
* ```typescript
|
|
518
|
+
* fingersCrossed(sink, {
|
|
519
|
+
* isolateByContext: {
|
|
520
|
+
* keys: ['requestId'],
|
|
521
|
+
* bufferTtlMs: 30000, // 30 seconds
|
|
522
|
+
* cleanupIntervalMs: 10000 // cleanup every 10 seconds
|
|
523
|
+
* }
|
|
524
|
+
* })
|
|
525
|
+
* ```
|
|
526
|
+
*
|
|
516
527
|
* @default `undefined` (no context isolation)
|
|
517
528
|
* @since 1.2.0
|
|
518
529
|
*/
|
|
@@ -522,9 +533,65 @@ export interface FingersCrossedOptions {
|
|
|
522
533
|
* Buffers will be separate for different combinations of these context values.
|
|
523
534
|
*/
|
|
524
535
|
readonly keys: readonly string[];
|
|
536
|
+
|
|
537
|
+
/**
|
|
538
|
+
* Maximum number of context buffers to maintain simultaneously.
|
|
539
|
+
* When this limit is exceeded, the least recently used (LRU) buffers
|
|
540
|
+
* will be evicted to make room for new ones.
|
|
541
|
+
*
|
|
542
|
+
* This provides memory protection in high-concurrency scenarios where
|
|
543
|
+
* many different context values might be active simultaneously.
|
|
544
|
+
*
|
|
545
|
+
* When set to 0 or undefined, no limit is enforced.
|
|
546
|
+
*
|
|
547
|
+
* @default `undefined` (no limit)
|
|
548
|
+
* @since 1.2.0
|
|
549
|
+
*/
|
|
550
|
+
readonly maxContexts?: number;
|
|
551
|
+
|
|
552
|
+
/**
|
|
553
|
+
* Time-to-live for context buffers in milliseconds.
|
|
554
|
+
* Buffers that haven't been accessed for this duration will be automatically
|
|
555
|
+
* cleaned up to prevent memory leaks in long-running applications.
|
|
556
|
+
*
|
|
557
|
+
* When set to 0 or undefined, buffers will never expire based on time.
|
|
558
|
+
*
|
|
559
|
+
* @default `undefined` (no TTL)
|
|
560
|
+
* @since 1.2.0
|
|
561
|
+
*/
|
|
562
|
+
readonly bufferTtlMs?: number;
|
|
563
|
+
|
|
564
|
+
/**
|
|
565
|
+
* Interval in milliseconds for running cleanup operations.
|
|
566
|
+
* The cleanup process removes expired buffers based on {@link bufferTtlMs}.
|
|
567
|
+
*
|
|
568
|
+
* This option is ignored if {@link bufferTtlMs} is not set.
|
|
569
|
+
*
|
|
570
|
+
* @default `30000` (30 seconds)
|
|
571
|
+
* @since 1.2.0
|
|
572
|
+
*/
|
|
573
|
+
readonly cleanupIntervalMs?: number;
|
|
525
574
|
};
|
|
526
575
|
}
|
|
527
576
|
|
|
577
|
+
/**
|
|
578
|
+
* Metadata for context-based buffer tracking.
|
|
579
|
+
* Used internally by {@link fingersCrossed} to manage buffer lifecycle with LRU support.
|
|
580
|
+
* @since 1.2.0
|
|
581
|
+
*/
|
|
582
|
+
interface BufferMetadata {
|
|
583
|
+
/**
|
|
584
|
+
* The actual log records buffer.
|
|
585
|
+
*/
|
|
586
|
+
readonly buffer: LogRecord[];
|
|
587
|
+
|
|
588
|
+
/**
|
|
589
|
+
* Timestamp of the last access to this buffer (in milliseconds).
|
|
590
|
+
* Used for LRU-based eviction when {@link FingersCrossedOptions.isolateByContext.maxContexts} is set.
|
|
591
|
+
*/
|
|
592
|
+
lastAccess: number;
|
|
593
|
+
}
|
|
594
|
+
|
|
528
595
|
/**
|
|
529
596
|
* Creates a sink that buffers log records until a trigger level is reached.
|
|
530
597
|
* This pattern, known as "fingers crossed" logging, keeps detailed debug logs
|
|
@@ -563,12 +630,19 @@ export interface FingersCrossedOptions {
|
|
|
563
630
|
export function fingersCrossed(
|
|
564
631
|
sink: Sink,
|
|
565
632
|
options: FingersCrossedOptions = {},
|
|
566
|
-
): Sink {
|
|
633
|
+
): Sink | (Sink & Disposable) {
|
|
567
634
|
const triggerLevel = options.triggerLevel ?? "error";
|
|
568
635
|
const maxBufferSize = Math.max(0, options.maxBufferSize ?? 1000);
|
|
569
636
|
const isolateByCategory = options.isolateByCategory;
|
|
570
637
|
const isolateByContext = options.isolateByContext;
|
|
571
638
|
|
|
639
|
+
// TTL and LRU configuration
|
|
640
|
+
const bufferTtlMs = isolateByContext?.bufferTtlMs;
|
|
641
|
+
const cleanupIntervalMs = isolateByContext?.cleanupIntervalMs ?? 30000;
|
|
642
|
+
const maxContexts = isolateByContext?.maxContexts;
|
|
643
|
+
const hasTtl = bufferTtlMs != null && bufferTtlMs > 0;
|
|
644
|
+
const hasLru = maxContexts != null && maxContexts > 0;
|
|
645
|
+
|
|
572
646
|
// Validate trigger level early
|
|
573
647
|
try {
|
|
574
648
|
compareLogLevel("trace", triggerLevel); // Test with any valid level
|
|
@@ -685,6 +759,52 @@ export function fingersCrossed(
|
|
|
685
759
|
return { category: parseCategoryKey(categoryPart), context: contextPart };
|
|
686
760
|
}
|
|
687
761
|
|
|
762
|
+
// TTL-based cleanup function
|
|
763
|
+
function cleanupExpiredBuffers(buffers: Map<string, BufferMetadata>): void {
|
|
764
|
+
if (!hasTtl) return;
|
|
765
|
+
|
|
766
|
+
const now = Date.now();
|
|
767
|
+
const expiredKeys: string[] = [];
|
|
768
|
+
|
|
769
|
+
for (const [key, metadata] of buffers) {
|
|
770
|
+
if (metadata.buffer.length === 0) continue;
|
|
771
|
+
|
|
772
|
+
// Use the timestamp of the last (most recent) record in the buffer
|
|
773
|
+
const lastRecordTimestamp =
|
|
774
|
+
metadata.buffer[metadata.buffer.length - 1].timestamp;
|
|
775
|
+
if (now - lastRecordTimestamp > bufferTtlMs!) {
|
|
776
|
+
expiredKeys.push(key);
|
|
777
|
+
}
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
// Remove expired buffers
|
|
781
|
+
for (const key of expiredKeys) {
|
|
782
|
+
buffers.delete(key);
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
// LRU-based eviction function
|
|
787
|
+
function evictLruBuffers(
|
|
788
|
+
buffers: Map<string, BufferMetadata>,
|
|
789
|
+
numToEvict?: number,
|
|
790
|
+
): void {
|
|
791
|
+
if (!hasLru) return;
|
|
792
|
+
|
|
793
|
+
// Use provided numToEvict or calculate based on current size vs limit
|
|
794
|
+
const toEvict = numToEvict ?? Math.max(0, buffers.size - maxContexts!);
|
|
795
|
+
if (toEvict <= 0) return;
|
|
796
|
+
|
|
797
|
+
// Sort by lastAccess timestamp (oldest first)
|
|
798
|
+
const sortedEntries = Array.from(buffers.entries())
|
|
799
|
+
.sort(([, a], [, b]) => a.lastAccess - b.lastAccess);
|
|
800
|
+
|
|
801
|
+
// Remove the oldest buffers
|
|
802
|
+
for (let i = 0; i < toEvict; i++) {
|
|
803
|
+
const [key] = sortedEntries[i];
|
|
804
|
+
buffers.delete(key);
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
|
|
688
808
|
// Buffer management
|
|
689
809
|
if (!isolateByCategory && !isolateByContext) {
|
|
690
810
|
// Single global buffer
|
|
@@ -722,10 +842,18 @@ export function fingersCrossed(
|
|
|
722
842
|
};
|
|
723
843
|
} else {
|
|
724
844
|
// Category and/or context-isolated buffers
|
|
725
|
-
const buffers = new Map<string,
|
|
845
|
+
const buffers = new Map<string, BufferMetadata>();
|
|
726
846
|
const triggered = new Set<string>();
|
|
727
847
|
|
|
728
|
-
|
|
848
|
+
// Set up TTL cleanup timer if enabled
|
|
849
|
+
let cleanupTimer: ReturnType<typeof setInterval> | null = null;
|
|
850
|
+
if (hasTtl) {
|
|
851
|
+
cleanupTimer = setInterval(() => {
|
|
852
|
+
cleanupExpiredBuffers(buffers);
|
|
853
|
+
}, cleanupIntervalMs);
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
const fingersCrossedSink = (record: LogRecord) => {
|
|
729
857
|
const bufferKey = getBufferKey(record.category, record.properties);
|
|
730
858
|
|
|
731
859
|
// Check if this buffer is already triggered
|
|
@@ -783,9 +911,9 @@ export function fingersCrossed(
|
|
|
783
911
|
// Flush matching buffers
|
|
784
912
|
const allRecordsToFlush: LogRecord[] = [];
|
|
785
913
|
for (const key of keysToFlush) {
|
|
786
|
-
const
|
|
787
|
-
if (
|
|
788
|
-
allRecordsToFlush.push(...buffer);
|
|
914
|
+
const metadata = buffers.get(key);
|
|
915
|
+
if (metadata) {
|
|
916
|
+
allRecordsToFlush.push(...metadata.buffer);
|
|
789
917
|
buffers.delete(key);
|
|
790
918
|
triggered.add(key);
|
|
791
919
|
}
|
|
@@ -804,19 +932,45 @@ export function fingersCrossed(
|
|
|
804
932
|
sink(record);
|
|
805
933
|
} else {
|
|
806
934
|
// Buffer the record
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
935
|
+
const now = Date.now();
|
|
936
|
+
let metadata = buffers.get(bufferKey);
|
|
937
|
+
if (!metadata) {
|
|
938
|
+
// Apply LRU eviction if adding new buffer would exceed capacity
|
|
939
|
+
if (hasLru && buffers.size >= maxContexts!) {
|
|
940
|
+
// Calculate how many buffers to evict to make room for the new one
|
|
941
|
+
const numToEvict = buffers.size - maxContexts! + 1;
|
|
942
|
+
evictLruBuffers(buffers, numToEvict);
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
metadata = {
|
|
946
|
+
buffer: [],
|
|
947
|
+
lastAccess: now,
|
|
948
|
+
};
|
|
949
|
+
buffers.set(bufferKey, metadata);
|
|
950
|
+
} else {
|
|
951
|
+
// Update last access time for LRU
|
|
952
|
+
metadata.lastAccess = now;
|
|
811
953
|
}
|
|
812
954
|
|
|
813
|
-
buffer.push(record);
|
|
955
|
+
metadata.buffer.push(record);
|
|
814
956
|
|
|
815
957
|
// Enforce max buffer size per buffer
|
|
816
|
-
while (buffer.length > maxBufferSize) {
|
|
817
|
-
buffer.shift();
|
|
958
|
+
while (metadata.buffer.length > maxBufferSize) {
|
|
959
|
+
metadata.buffer.shift();
|
|
818
960
|
}
|
|
819
961
|
}
|
|
820
962
|
};
|
|
963
|
+
|
|
964
|
+
// Add disposal functionality to clean up timer
|
|
965
|
+
if (cleanupTimer !== null) {
|
|
966
|
+
(fingersCrossedSink as Sink & Disposable)[Symbol.dispose] = () => {
|
|
967
|
+
if (cleanupTimer !== null) {
|
|
968
|
+
clearInterval(cleanupTimer);
|
|
969
|
+
cleanupTimer = null;
|
|
970
|
+
}
|
|
971
|
+
};
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
return fingersCrossedSink;
|
|
821
975
|
}
|
|
822
976
|
}
|