web-csv-toolbox 0.13.0-next-bd865d6ddb1cf9691d7b9a83d0790651f074dd47 → 0.13.0-next-7d51d5285be9cffa5103de58469d8de0c98959d7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -357,8 +357,134 @@ ideal for developers looking for in-depth control and flexibility.
357
357
 
358
358
  - **`class CSVLexerTransformer`**: [📑](https://kamiazya.github.io/web-csv-toolbox/classes/CSVLexerTransformer.html)
359
359
  - A TransformStream class for lexical analysis of CSV data.
360
+ - Supports custom queuing strategies for controlling backpressure and memory usage.
360
361
  - **`class CSVRecordAssemblerTransformer`**: [📑](https://kamiazya.github.io/web-csv-toolbox/classes/CSVRecordAssemblerTransformer.html)
361
362
  - Handles the assembly of parsed data into records.
363
+ - Supports custom queuing strategies for controlling backpressure and memory usage.
364
+
365
+ #### Customizing Queuing Strategies
366
+
367
+ Both `CSVLexerTransformer` and `CSVRecordAssemblerTransformer` support custom queuing strategies following the Web Streams API pattern. Strategies are passed as constructor arguments with **data-type-aware size counting** and **configurable backpressure handling**.
368
+
369
+ **Constructor signature:**
370
+ ```typescript
371
+ new CSVLexerTransformer(options?, writableStrategy?, readableStrategy?)
372
+ new CSVRecordAssemblerTransformer(options?, writableStrategy?, readableStrategy?)
373
+ ```
374
+
375
+ **Default queuing strategies (starting points, not benchmarked):**
376
+ ```typescript
377
+ // CSVLexerTransformer defaults
378
+ writableStrategy: {
379
+ highWaterMark: 65536, // 64KB of characters
380
+ size: (chunk) => chunk.length, // Count by string length
381
+ checkInterval: 100 // Check backpressure every 100 tokens
382
+ }
383
+ readableStrategy: {
384
+ highWaterMark: 1024, // 1024 tokens
385
+ size: (tokens) => tokens.length, // Count by number of tokens
386
+ checkInterval: 100 // Check backpressure every 100 tokens
387
+ }
388
+
389
+ // CSVRecordAssemblerTransformer defaults
390
+ writableStrategy: {
391
+ highWaterMark: 1024, // 1024 tokens
392
+ size: (tokens) => tokens.length, // Count by number of tokens
393
+ checkInterval: 10 // Check backpressure every 10 records
394
+ }
395
+ readableStrategy: {
396
+ highWaterMark: 256, // 256 records
397
+ size: () => 1, // Each record counts as 1
398
+ checkInterval: 10 // Check backpressure every 10 records
399
+ }
400
+ ```
401
+
402
+ **Key Features:**
403
+
404
+ 🎯 **Smart Size Counting:**
405
+ - Character-based counting for string inputs (accurate memory tracking)
406
+ - Token-based counting between transformers (smooth pipeline flow)
407
+ - Record-based counting for output (intuitive and predictable)
408
+
409
+ ⚡ **Cooperative Backpressure:**
410
+ - Monitors `controller.desiredSize` during processing
411
+ - Yields to event loop when backpressure detected
412
+ - Prevents blocking the main thread
413
+ - Critical for browser UI responsiveness
414
+
415
+ 🔧 **Tunable Check Interval:**
416
+ - `checkInterval`: How often to check for backpressure
417
+ - Lower values (5-25): More responsive, slight overhead
418
+ - Higher values (100-500): Less overhead, slower response
419
+ - Customize based on downstream consumer speed
420
+
421
+ > ⚠️ **Important**: These defaults are theoretical starting points based on data flow characteristics, **not empirical benchmarks**. Optimal values vary by runtime (browser/Node.js/Deno), file size, memory constraints, and CPU performance. **Profile your specific use case** to find the best values.
422
+
423
+ **When to customize:**
424
+ - 🚀 **High-throughput servers**: Higher `highWaterMark` (128KB+, 2048+ tokens), higher `checkInterval` (200-500)
425
+ - 📱 **Memory-constrained environments**: Lower `highWaterMark` (16KB, 256 tokens), lower `checkInterval` (10-25)
426
+ - 🐌 **Slow consumers** (DB writes, API calls): Lower `highWaterMark`, lower `checkInterval` for responsive backpressure
427
+ - 🏃 **Fast processing**: Higher values to reduce overhead
428
+
429
+ **Example - High-throughput server:**
430
+ ```typescript
431
+ import { CSVLexerTransformer, CSVRecordAssemblerTransformer } from 'web-csv-toolbox';
432
+
433
+ const response = await fetch('large-dataset.csv');
434
+ await response.body
435
+ .pipeThrough(new TextDecoderStream())
436
+ .pipeThrough(new CSVLexerTransformer(
437
+ {},
438
+ {
439
+ highWaterMark: 131072, // 128KB
440
+ size: (chunk) => chunk.length,
441
+ checkInterval: 200 // Less frequent checks
442
+ },
443
+ {
444
+ highWaterMark: 2048, // 2048 tokens
445
+ size: (tokens) => tokens.length,
446
+ checkInterval: 100
447
+ }
448
+ ))
449
+ .pipeThrough(new CSVRecordAssemblerTransformer(
450
+ {},
451
+ {
452
+ highWaterMark: 2048, // 2048 tokens
453
+ size: (tokens) => tokens.length,
454
+ checkInterval: 20
455
+ },
456
+ {
457
+ highWaterMark: 512, // 512 records
458
+ size: () => 1,
459
+ checkInterval: 10
460
+ }
461
+ ))
462
+ .pipeTo(yourRecordProcessor);
463
+ ```
464
+
465
+ **Example - Slow consumer (API writes):**
466
+ ```typescript
467
+ await csvStream
468
+ .pipeThrough(new CSVLexerTransformer()) // Use defaults
469
+ .pipeThrough(new CSVRecordAssemblerTransformer(
470
+ {},
471
+ { highWaterMark: 512, size: (t) => t.length, checkInterval: 5 },
472
+ { highWaterMark: 64, size: () => 1, checkInterval: 2 } // Very responsive
473
+ ))
474
+ .pipeTo(new WritableStream({
475
+ async write(record) {
476
+ await fetch('/api/save', { method: 'POST', body: JSON.stringify(record) });
477
+ }
478
+ }));
479
+ ```
480
+
481
+ **Benchmarking:**
482
+ Use the provided benchmark tool to find optimal values for your use case:
483
+ ```bash
484
+ pnpm --filter web-csv-toolbox-benchmark queuing-strategy
485
+ ```
486
+
487
+ See `benchmark/queuing-strategy.bench.ts` for implementation details.
362
488
 
363
489
  ### Experimental APIs 🧪
364
490
 
@@ -1,12 +1,32 @@
1
1
  import { CSVLexer } from './CSVLexer.ts';
2
- import { CSVLexerTransformerOptions, Token } from './common/types.ts';
2
+ import { CSVLexerTransformerOptions, ExtendedQueuingStrategy, Token } from './common/types.ts';
3
3
  import { DEFAULT_DELIMITER, DEFAULT_QUOTATION } from './constants.ts';
4
4
  /**
5
5
  * A transform stream that converts a stream of strings into a stream of tokens.
6
6
  *
7
7
  * @category Low-level API
8
8
  *
9
- * @example Parse a CSV with headers by data
9
+ * @param options - CSV-specific options (delimiter, quotation, etc.)
10
+ * @param writableStrategy - Strategy for the writable side (default: `{ highWaterMark: 65536, size: chunk => chunk.length, checkInterval: 100 }`)
11
+ * @param readableStrategy - Strategy for the readable side (default: `{ highWaterMark: 1024, size: tokens => tokens.length, checkInterval: 100 }`)
12
+ *
13
+ * @remarks
14
+ * Follows the Web Streams API pattern where queuing strategies are passed as
15
+ * constructor arguments, similar to the standard `TransformStream`.
16
+ *
17
+ * **Default Queuing Strategy:**
18
+ * - Writable side: Counts by string length (characters). Default highWaterMark is 65536 characters (≈64KB).
19
+ * - Readable side: Counts by number of tokens in each array. Default highWaterMark is 1024 tokens.
20
+ *
21
+ * **Backpressure Handling:**
22
+ * The transformer monitors `controller.desiredSize` and yields to the event loop when backpressure
23
+ * is detected (desiredSize ≤ 0). This prevents blocking the main thread during heavy processing
24
+ * and allows the downstream consumer to catch up.
25
+ *
26
+ * These defaults are starting points based on data flow characteristics, not empirical benchmarks.
27
+ * Optimal values depend on your runtime environment, data size, and performance requirements.
28
+ *
29
+ * @example Basic usage
10
30
  * ```ts
11
31
  * new ReadableStream({
12
32
  * start(controller) {
@@ -30,8 +50,37 @@ import { DEFAULT_DELIMITER, DEFAULT_QUOTATION } from './constants.ts';
30
50
  * // { type: Field, value: "20" }
31
51
  * // { type: RecordDelimiter, value: "\r\n", location: {...} }
32
52
  * ```
53
+ *
54
+ * @example Custom queuing strategies with backpressure tuning
55
+ * ```ts
56
+ * const transformer = new CSVLexerTransformer(
57
+ * { delimiter: ',' },
58
+ * {
59
+ * highWaterMark: 131072, // 128KB of characters
60
+ * size: (chunk) => chunk.length, // Count by character length
61
+ * checkInterval: 200 // Check backpressure every 200 tokens
62
+ * },
63
+ * {
64
+ * highWaterMark: 2048, // 2048 tokens
65
+ * size: (tokens) => tokens.length, // Count by token count
66
+ * checkInterval: 50 // Check backpressure every 50 tokens
67
+ * }
68
+ * );
69
+ *
70
+ * await fetch('large-file.csv')
71
+ * .then(res => res.body)
72
+ * .pipeThrough(new TextDecoderStream())
73
+ * .pipeThrough(transformer)
74
+ * .pipeTo(yourProcessor);
75
+ * ```
33
76
  */
34
77
  export declare class CSVLexerTransformer<Delimiter extends string = DEFAULT_DELIMITER, Quotation extends string = DEFAULT_QUOTATION> extends TransformStream<string, Token[]> {
35
78
  readonly lexer: CSVLexer<Delimiter, Quotation>;
36
- constructor(options?: CSVLexerTransformerOptions<Delimiter, Quotation>);
79
+ /**
80
+ * Yields to the event loop to allow backpressure handling.
81
+ * Can be overridden for testing purposes.
82
+ * @internal
83
+ */
84
+ protected yieldToEventLoop(): Promise<void>;
85
+ constructor(options?: CSVLexerTransformerOptions<Delimiter, Quotation>, writableStrategy?: ExtendedQueuingStrategy<string>, readableStrategy?: ExtendedQueuingStrategy<Token[]>);
37
86
  }
@@ -2,26 +2,71 @@ import { CSVLexer } from './CSVLexer.js';
2
2
 
3
3
  class CSVLexerTransformer extends TransformStream {
4
4
  lexer;
5
- constructor(options = {}) {
5
+ /**
6
+ * Yields to the event loop to allow backpressure handling.
7
+ * Can be overridden for testing purposes.
8
+ * @internal
9
+ */
10
+ async yieldToEventLoop() {
11
+ await new Promise((resolve) => setTimeout(resolve, 0));
12
+ }
13
+ constructor(options = {}, writableStrategy = {
14
+ highWaterMark: 65536,
15
+ // 64KB worth of characters
16
+ size: (chunk) => chunk.length,
17
+ // Count by string length (character count)
18
+ checkInterval: 100
19
+ // Check backpressure every 100 tokens
20
+ }, readableStrategy = {
21
+ highWaterMark: 1024,
22
+ // 1024 tokens
23
+ size: (tokens) => tokens.length,
24
+ // Count by number of tokens in array
25
+ checkInterval: 100
26
+ // Check backpressure every 100 tokens
27
+ }) {
6
28
  const lexer = new CSVLexer(options);
7
- super({
8
- transform: (chunk, controller) => {
9
- if (chunk.length !== 0) {
29
+ const checkInterval = writableStrategy.checkInterval ?? readableStrategy.checkInterval ?? 100;
30
+ super(
31
+ {
32
+ transform: async (chunk, controller) => {
33
+ if (chunk.length !== 0) {
34
+ try {
35
+ const tokens = [];
36
+ for (const token of lexer.lex(chunk, { stream: true })) {
37
+ tokens.push(token);
38
+ if (tokens.length % checkInterval === 0 && controller.desiredSize !== null && controller.desiredSize <= 0) {
39
+ await this.yieldToEventLoop();
40
+ }
41
+ }
42
+ if (tokens.length > 0) {
43
+ controller.enqueue(tokens);
44
+ }
45
+ } catch (error) {
46
+ controller.error(error);
47
+ }
48
+ }
49
+ },
50
+ flush: async (controller) => {
10
51
  try {
11
- controller.enqueue([...lexer.lex(chunk, { stream: true })]);
52
+ const tokens = [];
53
+ for (const token of lexer.lex()) {
54
+ tokens.push(token);
55
+ if (tokens.length % checkInterval === 0 && controller.desiredSize !== null && controller.desiredSize <= 0) {
56
+ await this.yieldToEventLoop();
57
+ }
58
+ }
59
+ if (tokens.length > 0) {
60
+ controller.enqueue(tokens);
61
+ }
12
62
  } catch (error) {
13
63
  controller.error(error);
14
64
  }
15
65
  }
16
66
  },
17
- flush: (controller) => {
18
- try {
19
- controller.enqueue([...lexer.lex()]);
20
- } catch (error) {
21
- controller.error(error);
22
- }
23
- }
24
- });
67
+ writableStrategy,
68
+ readableStrategy
69
+ );
25
70
  this.lexer = lexer;
26
71
  }
27
72
  }
@@ -1 +1 @@
1
- {"version":3,"file":"CSVLexerTransformer.js","sources":["../src/CSVLexerTransformer.ts"],"sourcesContent":["import { CSVLexer } from \"./CSVLexer.ts\";\nimport type { CSVLexerTransformerOptions, Token } from \"./common/types.ts\";\nimport type { DEFAULT_DELIMITER, DEFAULT_QUOTATION } from \"./constants.ts\";\n\n/**\n * A transform stream that converts a stream of strings into a stream of tokens.\n *\n * @category Low-level API\n *\n * @example Parse a CSV with headers by data\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"name,age\\r\\n\");\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.close();\n * }\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeTo(new WritableStream({ write(tokens) {\n * for (const token of tokens) {\n * console.log(token);\n * }\n * }}));\n * // { type: Field, value: \"name\", location: {...} }\n * // { type: FieldDelimiter, value: \",\", location: {...} }\n * // { type: Field, value: \"age\", location: {...} }\n * // { type: RecordDelimiter, value: \"\\r\\n\", location: {...} }\n * // { type: Field, value: \"Alice\", location: {...} }\n * // { type: FieldDelimiter, value: \",\", location: {...} }\n * // { type: Field, value: \"20\" }\n * // { type: RecordDelimiter, value: \"\\r\\n\", location: {...} }\n * ```\n */\nexport class CSVLexerTransformer<\n Delimiter extends string = DEFAULT_DELIMITER,\n Quotation extends string = DEFAULT_QUOTATION,\n> extends TransformStream<string, Token[]> {\n public readonly lexer: CSVLexer<Delimiter, Quotation>;\n constructor(options: CSVLexerTransformerOptions<Delimiter, Quotation> = {}) {\n const lexer = new CSVLexer(options);\n super({\n transform: (chunk, controller) => {\n if (chunk.length !== 0) {\n try {\n controller.enqueue([...lexer.lex(chunk, { stream: true })]);\n } catch (error) {\n controller.error(error);\n }\n }\n },\n flush: (controller) => {\n try {\n controller.enqueue([...lexer.lex()]);\n } catch (error) {\n controller.error(error);\n }\n },\n });\n this.lexer = lexer;\n }\n}\n"],"names":[],"mappings":";;AAkCO,MAAM,4BAGH,eAAiC,CAAA;AAAA,EACzB,KAAA;AAAA,EAChB,WAAA,CAAY,OAA4D,GAAA,EAAI,EAAA;AAC1E,IAAM,MAAA,KAAA,GAAQ,IAAI,QAAA,CAAS,OAAO,CAAA;AAClC,IAAM,KAAA,CAAA;AAAA,MACJ,SAAA,EAAW,CAAC,KAAA,EAAO,UAAe,KAAA;AAChC,QAAI,IAAA,KAAA,CAAM,WAAW,CAAG,EAAA;AACtB,UAAI,IAAA;AACF,YAAW,UAAA,CAAA,OAAA,CAAQ,CAAC,GAAG,KAAM,CAAA,GAAA,CAAI,KAAO,EAAA,EAAE,MAAQ,EAAA,IAAA,EAAM,CAAC,CAAC,CAAA;AAAA,mBACnD,KAAO,EAAA;AACd,YAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,OACF;AAAA,MACA,KAAA,EAAO,CAAC,UAAe,KAAA;AACrB,QAAI,IAAA;AACF,UAAA,UAAA,CAAW,QAAQ,CAAC,GAAG,KAAM,CAAA,GAAA,EAAK,CAAC,CAAA;AAAA,iBAC5B,KAAO,EAAA;AACd,UAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,KACD,CAAA;AACD,IAAA,IAAA,CAAK,KAAQ,GAAA,KAAA;AAAA;AAEjB;;;;"}
1
+ {"version":3,"file":"CSVLexerTransformer.js","sources":["../src/CSVLexerTransformer.ts"],"sourcesContent":["import { CSVLexer } from \"./CSVLexer.ts\";\nimport type {\n CSVLexerTransformerOptions,\n ExtendedQueuingStrategy,\n Token,\n} from \"./common/types.ts\";\nimport type { DEFAULT_DELIMITER, DEFAULT_QUOTATION } from \"./constants.ts\";\n\n/**\n * A transform stream that converts a stream of strings into a stream of tokens.\n *\n * @category Low-level API\n *\n * @param options - CSV-specific options (delimiter, quotation, etc.)\n * @param writableStrategy - Strategy for the writable side (default: `{ highWaterMark: 65536, size: chunk => chunk.length, checkInterval: 100 }`)\n * @param readableStrategy - Strategy for the readable side (default: `{ highWaterMark: 1024, size: tokens => tokens.length, checkInterval: 100 }`)\n *\n * @remarks\n * Follows the Web Streams API pattern where queuing strategies are passed as\n * constructor arguments, similar to the standard `TransformStream`.\n *\n * **Default Queuing Strategy:**\n * - Writable side: Counts by string length (characters). Default highWaterMark is 65536 characters (≈64KB).\n * - Readable side: Counts by number of tokens in each array. Default highWaterMark is 1024 tokens.\n *\n * **Backpressure Handling:**\n * The transformer monitors `controller.desiredSize` and yields to the event loop when backpressure\n * is detected (desiredSize ≤ 0). This prevents blocking the main thread during heavy processing\n * and allows the downstream consumer to catch up.\n *\n * These defaults are starting points based on data flow characteristics, not empirical benchmarks.\n * Optimal values depend on your runtime environment, data size, and performance requirements.\n *\n * @example Basic usage\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"name,age\\r\\n\");\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.close();\n * }\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeTo(new WritableStream({ write(tokens) {\n * for (const token of tokens) {\n * console.log(token);\n * }\n * }}));\n * // { type: Field, value: \"name\", location: {...} }\n * // { type: FieldDelimiter, value: \",\", location: {...} }\n * // { type: Field, value: \"age\", location: {...} }\n * // { type: RecordDelimiter, value: \"\\r\\n\", location: {...} }\n * // { type: Field, value: \"Alice\", location: {...} }\n * // { type: FieldDelimiter, value: \",\", location: {...} }\n * // { type: Field, value: \"20\" }\n * // { type: RecordDelimiter, value: \"\\r\\n\", location: {...} }\n * ```\n *\n * @example Custom queuing strategies with backpressure tuning\n * ```ts\n * const transformer = new CSVLexerTransformer(\n * { delimiter: ',' },\n * {\n * highWaterMark: 131072, // 128KB of characters\n * size: (chunk) => chunk.length, // Count by character length\n * checkInterval: 200 // Check backpressure every 200 tokens\n * },\n * {\n * highWaterMark: 2048, // 2048 tokens\n * size: (tokens) => tokens.length, // Count by token count\n * checkInterval: 50 // Check backpressure every 50 tokens\n * }\n * );\n *\n * await fetch('large-file.csv')\n * .then(res => res.body)\n * .pipeThrough(new TextDecoderStream())\n * .pipeThrough(transformer)\n * .pipeTo(yourProcessor);\n * ```\n */\nexport class CSVLexerTransformer<\n Delimiter extends string = DEFAULT_DELIMITER,\n Quotation extends string = DEFAULT_QUOTATION,\n> extends TransformStream<string, Token[]> {\n public readonly lexer: CSVLexer<Delimiter, Quotation>;\n\n /**\n * Yields to the event loop to allow backpressure handling.\n * Can be overridden for testing purposes.\n * @internal\n */\n protected async yieldToEventLoop(): Promise<void> {\n await new Promise((resolve) => setTimeout(resolve, 0));\n }\n\n constructor(\n options: CSVLexerTransformerOptions<Delimiter, Quotation> = {},\n writableStrategy: ExtendedQueuingStrategy<string> = {\n highWaterMark: 65536, // 64KB worth of characters\n size: (chunk) => chunk.length, // Count by string length (character count)\n checkInterval: 100, // Check backpressure every 100 tokens\n },\n readableStrategy: ExtendedQueuingStrategy<Token[]> = {\n highWaterMark: 1024, // 1024 tokens\n size: (tokens) => tokens.length, // Count by number of tokens in array\n checkInterval: 100, // Check backpressure every 100 tokens\n },\n ) {\n const lexer = new CSVLexer(options);\n const checkInterval =\n writableStrategy.checkInterval ?? readableStrategy.checkInterval ?? 100;\n\n super(\n {\n transform: async (chunk, controller) => {\n if (chunk.length !== 0) {\n try {\n const tokens: Token[] = [];\n for (const token of lexer.lex(chunk, { stream: true })) {\n tokens.push(token);\n\n // Check backpressure periodically based on checkInterval\n if (\n tokens.length % checkInterval === 0 &&\n controller.desiredSize !== null &&\n controller.desiredSize <= 0\n ) {\n // Yield to event loop when backpressure is detected\n await this.yieldToEventLoop();\n }\n }\n\n if (tokens.length > 0) {\n controller.enqueue(tokens);\n }\n } catch (error) {\n controller.error(error);\n }\n }\n },\n flush: async (controller) => {\n try {\n const tokens: Token[] = [];\n for (const token of lexer.lex()) {\n tokens.push(token);\n\n // Check backpressure periodically based on checkInterval\n if (\n tokens.length % checkInterval === 0 &&\n controller.desiredSize !== null &&\n controller.desiredSize <= 0\n ) {\n await this.yieldToEventLoop();\n }\n }\n\n if (tokens.length > 0) {\n controller.enqueue(tokens);\n }\n } catch (error) {\n controller.error(error);\n }\n },\n },\n writableStrategy,\n readableStrategy,\n );\n this.lexer = lexer;\n }\n}\n"],"names":[],"mappings":";;AAiFO,MAAM,4BAGH,eAAiC,CAAA;AAAA,EACzB,KAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOhB,MAAgB,gBAAkC,GAAA;AAChD,IAAA,MAAM,IAAI,OAAQ,CAAA,CAAC,YAAY,UAAW,CAAA,OAAA,EAAS,CAAC,CAAC,CAAA;AAAA;AACvD,EAEA,WACE,CAAA,OAAA,GAA4D,EAAC,EAC7D,gBAAoD,GAAA;AAAA,IAClD,aAAe,EAAA,KAAA;AAAA;AAAA,IACf,IAAA,EAAM,CAAC,KAAA,KAAU,KAAM,CAAA,MAAA;AAAA;AAAA,IACvB,aAAe,EAAA;AAAA;AAAA,KAEjB,gBAAqD,GAAA;AAAA,IACnD,aAAe,EAAA,IAAA;AAAA;AAAA,IACf,IAAA,EAAM,CAAC,MAAA,KAAW,MAAO,CAAA,MAAA;AAAA;AAAA,IACzB,aAAe,EAAA;AAAA;AAAA,GAEjB,EAAA;AACA,IAAM,MAAA,KAAA,GAAQ,IAAI,QAAA,CAAS,OAAO,CAAA;AAClC,IAAA,MAAM,aACJ,GAAA,gBAAA,CAAiB,aAAiB,IAAA,gBAAA,CAAiB,aAAiB,IAAA,GAAA;AAEtE,IAAA,KAAA;AAAA,MACE;AAAA,QACE,SAAA,EAAW,OAAO,KAAA,EAAO,UAAe,KAAA;AACtC,UAAI,IAAA,KAAA,CAAM,WAAW,CAAG,EAAA;AACtB,YAAI,IAAA;AACF,cAAA,MAAM,SAAkB,EAAC;AACzB,cAAW,KAAA,MAAA,KAAA,IAAS,MAAM,GAAI,CAAA,KAAA,EAAO,EAAE,MAAQ,EAAA,IAAA,EAAM,CAAG,EAAA;AACtD,gBAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAGjB,gBACE,IAAA,MAAA,CAAO,SAAS,aAAkB,KAAA,CAAA,IAClC,WAAW,WAAgB,KAAA,IAAA,IAC3B,UAAW,CAAA,WAAA,IAAe,CAC1B,EAAA;AAEA,kBAAA,MAAM,KAAK,gBAAiB,EAAA;AAAA;AAC9B;AAGF,cAAI,IAAA,MAAA,CAAO,SAAS,CAAG,EAAA;AACrB,gBAAA,UAAA,CAAW,QAAQ,MAAM,CAAA;AAAA;AAC3B,qBACO,KAAO,EAAA;AACd,cAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,SACF;AAAA,QACA,KAAA,EAAO,OAAO,UAAe,KAAA;AAC3B,UAAI,IAAA;AACF,YAAA,MAAM,SAAkB,EAAC;AACzB,YAAW,KAAA,MAAA,KAAA,IAAS,KAAM,CAAA,GAAA,EAAO,EAAA;AAC/B,cAAA,MAAA,CAAO,KAAK,KAAK,CAAA;AAGjB,cACE,IAAA,MAAA,CAAO,SAAS,aAAkB,KAAA,CAAA,IAClC,WAAW,WAAgB,KAAA,IAAA,IAC3B,UAAW,CAAA,WAAA,IAAe,CAC1B,EAAA;AACA,gBAAA,MAAM,KAAK,gBAAiB,EAAA;AAAA;AAC9B;AAGF,YAAI,IAAA,MAAA,CAAO,SAAS,CAAG,EAAA;AACrB,cAAA,UAAA,CAAW,QAAQ,MAAM,CAAA;AAAA;AAC3B,mBACO,KAAO,EAAA;AACd,YAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,OACF;AAAA,MACA,gBAAA;AAAA,MACA;AAAA,KACF;AACA,IAAA,IAAA,CAAK,KAAQ,GAAA,KAAA;AAAA;AAEjB;;;;"}
@@ -1,12 +1,31 @@
1
1
  import { CSVRecordAssembler } from './CSVRecordAssembler.ts';
2
- import { CSVRecord, CSVRecordAssemblerOptions, Token } from './common/types.ts';
2
+ import { CSVRecord, CSVRecordAssemblerOptions, ExtendedQueuingStrategy, Token } from './common/types.ts';
3
3
  /**
4
4
  * A transform stream that converts a stream of tokens into a stream of CSV records.
5
+ *
5
6
  * @template Header The type of the header row.
6
- * @param options The options for the parser.
7
+ * @param options - CSV-specific options (header, maxFieldCount, etc.)
8
+ * @param writableStrategy - Strategy for the writable side (default: `{ highWaterMark: 1024, size: tokens => tokens.length, checkInterval: 10 }`)
9
+ * @param readableStrategy - Strategy for the readable side (default: `{ highWaterMark: 256, size: () => 1, checkInterval: 10 }`)
7
10
  *
8
11
  * @category Low-level API
9
12
  *
13
+ * @remarks
14
+ * Follows the Web Streams API pattern where queuing strategies are passed as
15
+ * constructor arguments, similar to the standard `TransformStream`.
16
+ *
17
+ * **Default Queuing Strategy:**
18
+ * - Writable side: Counts by number of tokens in each array. Default highWaterMark is 1024 tokens.
19
+ * - Readable side: Counts each record as 1. Default highWaterMark is 256 records.
20
+ *
21
+ * **Backpressure Handling:**
22
+ * The transformer monitors `controller.desiredSize` and yields to the event loop when backpressure
23
+ * is detected (desiredSize ≤ 0). This prevents blocking the main thread during heavy processing
24
+ * and allows the downstream consumer to catch up.
25
+ *
26
+ * These defaults are starting points based on data flow characteristics, not empirical benchmarks.
27
+ * Optimal values depend on your runtime environment, data size, and performance requirements.
28
+ *
10
29
  * @example Parse a CSV with headers by data
11
30
  * ```ts
12
31
  * new ReadableStream({
@@ -42,8 +61,35 @@ import { CSVRecord, CSVRecordAssemblerOptions, Token } from './common/types.ts';
42
61
  * // { name: "Bob", age: "25" }
43
62
  * // { name: "Charlie", age: "30" }
44
63
  * ```
64
+ *
65
+ * @example Custom queuing strategies with backpressure tuning
66
+ * ```ts
67
+ * const transformer = new CSVRecordAssemblerTransformer(
68
+ * {},
69
+ * {
70
+ * highWaterMark: 2048, // 2048 tokens
71
+ * size: (tokens) => tokens.length, // Count by token count
72
+ * checkInterval: 20 // Check backpressure every 20 records
73
+ * },
74
+ * {
75
+ * highWaterMark: 512, // 512 records
76
+ * size: () => 1, // Each record counts as 1
77
+ * checkInterval: 5 // Check backpressure every 5 records
78
+ * }
79
+ * );
80
+ *
81
+ * await tokenStream
82
+ * .pipeThrough(transformer)
83
+ * .pipeTo(yourRecordProcessor);
84
+ * ```
45
85
  */
46
86
  export declare class CSVRecordAssemblerTransformer<Header extends ReadonlyArray<string>> extends TransformStream<Token[], CSVRecord<Header>> {
47
87
  readonly assembler: CSVRecordAssembler<Header>;
48
- constructor(options?: CSVRecordAssemblerOptions<Header>);
88
+ /**
89
+ * Yields to the event loop to allow backpressure handling.
90
+ * Can be overridden for testing purposes.
91
+ * @internal
92
+ */
93
+ protected yieldToEventLoop(): Promise<void>;
94
+ constructor(options?: CSVRecordAssemblerOptions<Header>, writableStrategy?: ExtendedQueuingStrategy<Token[]>, readableStrategy?: ExtendedQueuingStrategy<CSVRecord<Header>>);
49
95
  }
@@ -2,28 +2,65 @@ import { CSVRecordAssembler } from './CSVRecordAssembler.js';
2
2
 
3
3
  class CSVRecordAssemblerTransformer extends TransformStream {
4
4
  assembler;
5
- constructor(options = {}) {
5
+ /**
6
+ * Yields to the event loop to allow backpressure handling.
7
+ * Can be overridden for testing purposes.
8
+ * @internal
9
+ */
10
+ async yieldToEventLoop() {
11
+ await new Promise((resolve) => setTimeout(resolve, 0));
12
+ }
13
+ constructor(options = {}, writableStrategy = {
14
+ highWaterMark: 1024,
15
+ // 1024 tokens
16
+ size: (tokens) => tokens.length,
17
+ // Count by number of tokens in array
18
+ checkInterval: 10
19
+ // Check backpressure every 10 records
20
+ }, readableStrategy = {
21
+ highWaterMark: 256,
22
+ // 256 records
23
+ size: () => 1,
24
+ // Each record counts as 1
25
+ checkInterval: 10
26
+ // Check backpressure every 10 records
27
+ }) {
6
28
  const assembler = new CSVRecordAssembler(options);
7
- super({
8
- transform: (tokens, controller) => {
9
- try {
10
- for (const token of assembler.assemble(tokens, { stream: true })) {
11
- controller.enqueue(token);
29
+ const checkInterval = writableStrategy.checkInterval ?? readableStrategy.checkInterval ?? 10;
30
+ super(
31
+ {
32
+ transform: async (tokens, controller) => {
33
+ try {
34
+ let recordCount = 0;
35
+ for (const record of assembler.assemble(tokens, { stream: true })) {
36
+ controller.enqueue(record);
37
+ recordCount++;
38
+ if (recordCount % checkInterval === 0 && controller.desiredSize !== null && controller.desiredSize <= 0) {
39
+ await this.yieldToEventLoop();
40
+ }
41
+ }
42
+ } catch (error) {
43
+ controller.error(error);
12
44
  }
13
- } catch (error) {
14
- controller.error(error);
15
- }
16
- },
17
- flush: (controller) => {
18
- try {
19
- for (const token of assembler.assemble()) {
20
- controller.enqueue(token);
45
+ },
46
+ flush: async (controller) => {
47
+ try {
48
+ let recordCount = 0;
49
+ for (const record of assembler.assemble()) {
50
+ controller.enqueue(record);
51
+ recordCount++;
52
+ if (recordCount % checkInterval === 0 && controller.desiredSize !== null && controller.desiredSize <= 0) {
53
+ await this.yieldToEventLoop();
54
+ }
55
+ }
56
+ } catch (error) {
57
+ controller.error(error);
21
58
  }
22
- } catch (error) {
23
- controller.error(error);
24
59
  }
25
- }
26
- });
60
+ },
61
+ writableStrategy,
62
+ readableStrategy
63
+ );
27
64
  this.assembler = assembler;
28
65
  }
29
66
  }
@@ -1 +1 @@
1
- {"version":3,"file":"CSVRecordAssemblerTransformer.js","sources":["../src/CSVRecordAssemblerTransformer.ts"],"sourcesContent":["import { CSVRecordAssembler } from \"./CSVRecordAssembler.ts\";\nimport type {\n CSVRecord,\n CSVRecordAssemblerOptions,\n Token,\n} from \"./common/types.ts\";\n\n/**\n * A transform stream that converts a stream of tokens into a stream of CSV records.\n * @template Header The type of the header row.\n * @param options The options for the parser.\n *\n * @category Low-level API\n *\n * @example Parse a CSV with headers by data\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"name,age\\r\\n\");\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.enqueue(\"Bob,25\\r\\n\");\n * controller.enqueue(\"Charlie,30\\r\\n\");\n * controller.close();\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeThrough(new CSVRecordAssemblerTransformer())\n * .pipeTo(new WritableStream({ write(row) { console.log(row); }}));\n * // { name: \"Alice\", age: \"20\" }\n * // { name: \"Bob\", age: \"25\" }\n * // { name: \"Charlie\", age: \"30\" }\n * ```\n *\n * @example Parse a CSV with headers by options\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.enqueue(\"Bob,25\\r\\n\");\n * controller.enqueue(\"Charlie,30\\r\\n\");\n * controller.close();\n * }\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeThrough(new CSVRecordAssemblerTransformer({ header: [\"name\", \"age\"] }))\n * .pipeTo(new WritableStream({ write(row) { console.log(row); }}));\n * // { name: \"Alice\", age: \"20\" }\n * // { name: \"Bob\", age: \"25\" }\n * // { name: \"Charlie\", age: \"30\" }\n * ```\n */\nexport class CSVRecordAssemblerTransformer<\n Header extends ReadonlyArray<string>,\n> extends TransformStream<Token[], CSVRecord<Header>> {\n public readonly assembler: CSVRecordAssembler<Header>;\n\n constructor(options: CSVRecordAssemblerOptions<Header> = {}) {\n const assembler = new CSVRecordAssembler(options);\n super({\n transform: (tokens, controller) => {\n try {\n for (const token of assembler.assemble(tokens, { stream: true })) {\n controller.enqueue(token);\n }\n } catch (error) {\n controller.error(error);\n }\n },\n flush: (controller) => {\n try {\n for (const token of assembler.assemble()) {\n controller.enqueue(token);\n }\n } catch (error) {\n controller.error(error);\n }\n },\n });\n this.assembler = assembler;\n }\n}\n"],"names":[],"mappings":";;AAkDO,MAAM,sCAEH,eAA4C,CAAA;AAAA,EACpC,SAAA;AAAA,EAEhB,WAAA,CAAY,OAA6C,GAAA,EAAI,EAAA;AAC3D,IAAM,MAAA,SAAA,GAAY,IAAI,kBAAA,CAAmB,OAAO,CAAA;AAChD,IAAM,KAAA,CAAA;AAAA,MACJ,SAAA,EAAW,CAAC,MAAA,EAAQ,UAAe,KAAA;AACjC,QAAI,IAAA;AACF,UAAW,KAAA,MAAA,KAAA,IAAS,UAAU,QAAS,CAAA,MAAA,EAAQ,EAAE,MAAQ,EAAA,IAAA,EAAM,CAAG,EAAA;AAChE,YAAA,UAAA,CAAW,QAAQ,KAAK,CAAA;AAAA;AAC1B,iBACO,KAAO,EAAA;AACd,UAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB,OACF;AAAA,MACA,KAAA,EAAO,CAAC,UAAe,KAAA;AACrB,QAAI,IAAA;AACF,UAAW,KAAA,MAAA,KAAA,IAAS,SAAU,CAAA,QAAA,EAAY,EAAA;AACxC,YAAA,UAAA,CAAW,QAAQ,KAAK,CAAA;AAAA;AAC1B,iBACO,KAAO,EAAA;AACd,UAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,KACD,CAAA;AACD,IAAA,IAAA,CAAK,SAAY,GAAA,SAAA;AAAA;AAErB;;;;"}
1
+ {"version":3,"file":"CSVRecordAssemblerTransformer.js","sources":["../src/CSVRecordAssemblerTransformer.ts"],"sourcesContent":["import { CSVRecordAssembler } from \"./CSVRecordAssembler.ts\";\nimport type {\n CSVRecord,\n CSVRecordAssemblerOptions,\n ExtendedQueuingStrategy,\n Token,\n} from \"./common/types.ts\";\n\n/**\n * A transform stream that converts a stream of tokens into a stream of CSV records.\n *\n * @template Header The type of the header row.\n * @param options - CSV-specific options (header, maxFieldCount, etc.)\n * @param writableStrategy - Strategy for the writable side (default: `{ highWaterMark: 1024, size: tokens => tokens.length, checkInterval: 10 }`)\n * @param readableStrategy - Strategy for the readable side (default: `{ highWaterMark: 256, size: () => 1, checkInterval: 10 }`)\n *\n * @category Low-level API\n *\n * @remarks\n * Follows the Web Streams API pattern where queuing strategies are passed as\n * constructor arguments, similar to the standard `TransformStream`.\n *\n * **Default Queuing Strategy:**\n * - Writable side: Counts by number of tokens in each array. Default highWaterMark is 1024 tokens.\n * - Readable side: Counts each record as 1. Default highWaterMark is 256 records.\n *\n * **Backpressure Handling:**\n * The transformer monitors `controller.desiredSize` and yields to the event loop when backpressure\n * is detected (desiredSize ≤ 0). This prevents blocking the main thread during heavy processing\n * and allows the downstream consumer to catch up.\n *\n * These defaults are starting points based on data flow characteristics, not empirical benchmarks.\n * Optimal values depend on your runtime environment, data size, and performance requirements.\n *\n * @example Parse a CSV with headers by data\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"name,age\\r\\n\");\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.enqueue(\"Bob,25\\r\\n\");\n * controller.enqueue(\"Charlie,30\\r\\n\");\n * controller.close();\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeThrough(new CSVRecordAssemblerTransformer())\n * .pipeTo(new WritableStream({ write(row) { console.log(row); }}));\n * // { name: \"Alice\", age: \"20\" }\n * // { name: \"Bob\", age: \"25\" }\n * // { name: \"Charlie\", age: \"30\" }\n * ```\n *\n * @example Parse a CSV with headers by options\n * ```ts\n * new ReadableStream({\n * start(controller) {\n * controller.enqueue(\"Alice,20\\r\\n\");\n * controller.enqueue(\"Bob,25\\r\\n\");\n * controller.enqueue(\"Charlie,30\\r\\n\");\n * controller.close();\n * }\n * })\n * .pipeThrough(new CSVLexerTransformer())\n * .pipeThrough(new CSVRecordAssemblerTransformer({ header: [\"name\", \"age\"] }))\n * .pipeTo(new WritableStream({ write(row) { console.log(row); }}));\n * // { name: \"Alice\", age: \"20\" }\n * // { name: \"Bob\", age: \"25\" }\n * // { name: \"Charlie\", age: \"30\" }\n * ```\n *\n * @example Custom queuing strategies with backpressure tuning\n * ```ts\n * const transformer = new CSVRecordAssemblerTransformer(\n * {},\n * {\n * highWaterMark: 2048, // 2048 tokens\n * size: (tokens) => tokens.length, // Count by token count\n * checkInterval: 20 // Check backpressure every 20 records\n * },\n * {\n * highWaterMark: 512, // 512 records\n * size: () => 1, // Each record counts as 1\n * checkInterval: 5 // Check backpressure every 5 records\n * }\n * );\n *\n * await tokenStream\n * .pipeThrough(transformer)\n * .pipeTo(yourRecordProcessor);\n * ```\n */\nexport class CSVRecordAssemblerTransformer<\n Header extends ReadonlyArray<string>,\n> extends TransformStream<Token[], CSVRecord<Header>> {\n public readonly assembler: CSVRecordAssembler<Header>;\n\n /**\n * Yields to the event loop to allow backpressure handling.\n * Can be overridden for testing purposes.\n * @internal\n */\n protected async yieldToEventLoop(): Promise<void> {\n await new Promise((resolve) => setTimeout(resolve, 0));\n }\n\n constructor(\n options: CSVRecordAssemblerOptions<Header> = {},\n writableStrategy: ExtendedQueuingStrategy<Token[]> = {\n highWaterMark: 1024, // 1024 tokens\n size: (tokens) => tokens.length, // Count by number of tokens in array\n checkInterval: 10, // Check backpressure every 10 records\n },\n readableStrategy: ExtendedQueuingStrategy<CSVRecord<Header>> = {\n highWaterMark: 256, // 256 records\n size: () => 1, // Each record counts as 1\n checkInterval: 10, // Check backpressure every 10 records\n },\n ) {\n const assembler = new CSVRecordAssembler(options);\n const checkInterval =\n writableStrategy.checkInterval ?? readableStrategy.checkInterval ?? 10;\n\n super(\n {\n transform: async (tokens, controller) => {\n try {\n let recordCount = 0;\n for (const record of assembler.assemble(tokens, { stream: true })) {\n controller.enqueue(record);\n recordCount++;\n\n // Check backpressure periodically based on checkInterval\n if (\n recordCount % checkInterval === 0 &&\n controller.desiredSize !== null &&\n controller.desiredSize <= 0\n ) {\n // Yield to event loop when backpressure is detected\n await this.yieldToEventLoop();\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n flush: async (controller) => {\n try {\n let recordCount = 0;\n for (const record of assembler.assemble()) {\n controller.enqueue(record);\n recordCount++;\n\n // Check backpressure periodically based on checkInterval\n if (\n recordCount % checkInterval === 0 &&\n controller.desiredSize !== null &&\n controller.desiredSize <= 0\n ) {\n await this.yieldToEventLoop();\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n },\n writableStrategy,\n readableStrategy,\n );\n this.assembler = assembler;\n }\n}\n"],"names":[],"mappings":";;AA2FO,MAAM,sCAEH,eAA4C,CAAA;AAAA,EACpC,SAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOhB,MAAgB,gBAAkC,GAAA;AAChD,IAAA,MAAM,IAAI,OAAQ,CAAA,CAAC,YAAY,UAAW,CAAA,OAAA,EAAS,CAAC,CAAC,CAAA;AAAA;AACvD,EAEA,WACE,CAAA,OAAA,GAA6C,EAAC,EAC9C,gBAAqD,GAAA;AAAA,IACnD,aAAe,EAAA,IAAA;AAAA;AAAA,IACf,IAAA,EAAM,CAAC,MAAA,KAAW,MAAO,CAAA,MAAA;AAAA;AAAA,IACzB,aAAe,EAAA;AAAA;AAAA,KAEjB,gBAA+D,GAAA;AAAA,IAC7D,aAAe,EAAA,GAAA;AAAA;AAAA,IACf,MAAM,MAAM,CAAA;AAAA;AAAA,IACZ,aAAe,EAAA;AAAA;AAAA,GAEjB,EAAA;AACA,IAAM,MAAA,SAAA,GAAY,IAAI,kBAAA,CAAmB,OAAO,CAAA;AAChD,IAAA,MAAM,aACJ,GAAA,gBAAA,CAAiB,aAAiB,IAAA,gBAAA,CAAiB,aAAiB,IAAA,EAAA;AAEtE,IAAA,KAAA;AAAA,MACE;AAAA,QACE,SAAA,EAAW,OAAO,MAAA,EAAQ,UAAe,KAAA;AACvC,UAAI,IAAA;AACF,YAAA,IAAI,WAAc,GAAA,CAAA;AAClB,YAAW,KAAA,MAAA,MAAA,IAAU,UAAU,QAAS,CAAA,MAAA,EAAQ,EAAE,MAAQ,EAAA,IAAA,EAAM,CAAG,EAAA;AACjE,cAAA,UAAA,CAAW,QAAQ,MAAM,CAAA;AACzB,cAAA,WAAA,EAAA;AAGA,cACE,IAAA,WAAA,GAAc,kBAAkB,CAChC,IAAA,UAAA,CAAW,gBAAgB,IAC3B,IAAA,UAAA,CAAW,eAAe,CAC1B,EAAA;AAEA,gBAAA,MAAM,KAAK,gBAAiB,EAAA;AAAA;AAC9B;AACF,mBACO,KAAO,EAAA;AACd,YAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB,SACF;AAAA,QACA,KAAA,EAAO,OAAO,UAAe,KAAA;AAC3B,UAAI,IAAA;AACF,YAAA,IAAI,WAAc,GAAA,CAAA;AAClB,YAAW,KAAA,MAAA,MAAA,IAAU,SAAU,CAAA,QAAA,EAAY,EAAA;AACzC,cAAA,UAAA,CAAW,QAAQ,MAAM,CAAA;AACzB,cAAA,WAAA,EAAA;AAGA,cACE,IAAA,WAAA,GAAc,kBAAkB,CAChC,IAAA,UAAA,CAAW,gBAAgB,IAC3B,IAAA,UAAA,CAAW,eAAe,CAC1B,EAAA;AACA,gBAAA,MAAM,KAAK,gBAAiB,EAAA;AAAA;AAC9B;AACF,mBACO,KAAO,EAAA;AACd,YAAA,UAAA,CAAW,MAAM,KAAK,CAAA;AAAA;AACxB;AACF,OACF;AAAA,MACA,gBAAA;AAAA,MACA;AAAA,KACF;AACA,IAAA,IAAA,CAAK,SAAY,GAAA,SAAA;AAAA;AAErB;;;;"}
@@ -356,6 +356,31 @@ export type CSVString<Header extends ReadonlyArray<string> = [], Delimiter exten
356
356
  * @category Types
357
357
  */
358
358
  export type CSVBinary = ReadableStream<Uint8Array> | Response | ArrayBuffer | Uint8Array;
359
+ /**
360
+ * Backpressure monitoring options.
361
+ *
362
+ * @category Types
363
+ */
364
+ export interface BackpressureOptions {
365
+ /**
366
+ * How often to check for backpressure (in number of items processed).
367
+ *
368
+ * Lower values = more responsive to backpressure but slight performance overhead.
369
+ * Higher values = less overhead but slower backpressure response.
370
+ *
371
+ * Default:
372
+ * - CSVLexerTransformer: 100 tokens
373
+ * - CSVRecordAssemblerTransformer: 10 records
374
+ */
375
+ checkInterval?: number;
376
+ }
377
+ /**
378
+ * Extended queuing strategy with backpressure monitoring options.
379
+ *
380
+ * @category Types
381
+ */
382
+ export interface ExtendedQueuingStrategy<T> extends QueuingStrategy<T>, BackpressureOptions {
383
+ }
359
384
  /**
360
385
  * CSV.
361
386
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "web-csv-toolbox",
3
- "version": "0.13.0-next-bd865d6ddb1cf9691d7b9a83d0790651f074dd47",
3
+ "version": "0.13.0-next-7d51d5285be9cffa5103de58469d8de0c98959d7",
4
4
  "description": "A CSV Toolbox utilizing Web Standard APIs.",
5
5
  "type": "module",
6
6
  "module": "dist/web-csv-toolbox.js",