@cj-tech-master/excelts 6.1.0 → 6.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/modules/csv/worker/worker-script.generated.d.ts +1 -1
- package/dist/browser/modules/csv/worker/worker-script.generated.js +1 -1
- package/dist/browser/modules/excel/stream/sheet-rels-writer.d.ts +2 -1
- package/dist/browser/modules/excel/stream/sheet-rels-writer.js +10 -1
- package/dist/browser/modules/excel/stream/workbook-writer.browser.js +2 -1
- package/dist/browser/modules/excel/stream/worksheet-reader.d.ts +2 -1
- package/dist/browser/modules/excel/stream/worksheet-reader.js +4 -1
- package/dist/browser/modules/excel/workbook.browser.js +2 -1
- package/dist/browser/modules/excel/worksheet.js +2 -1
- package/dist/browser/modules/excel/xlsx/xform/sheet/hyperlink-xform.d.ts +8 -3
- package/dist/browser/modules/excel/xlsx/xform/sheet/hyperlink-xform.js +20 -10
- package/dist/browser/modules/excel/xlsx/xform/sheet/worksheet-xform.js +11 -1
- package/dist/cjs/modules/csv/worker/worker-script.generated.js +1 -1
- package/dist/cjs/modules/excel/stream/sheet-rels-writer.js +10 -1
- package/dist/cjs/modules/excel/stream/workbook-writer.browser.js +2 -1
- package/dist/cjs/modules/excel/stream/worksheet-reader.js +4 -1
- package/dist/cjs/modules/excel/workbook.browser.js +2 -1
- package/dist/cjs/modules/excel/worksheet.js +2 -1
- package/dist/cjs/modules/excel/xlsx/xform/sheet/hyperlink-xform.js +20 -9
- package/dist/cjs/modules/excel/xlsx/xform/sheet/worksheet-xform.js +11 -1
- package/dist/esm/modules/csv/worker/worker-script.generated.js +1 -1
- package/dist/esm/modules/excel/stream/sheet-rels-writer.js +10 -1
- package/dist/esm/modules/excel/stream/workbook-writer.browser.js +2 -1
- package/dist/esm/modules/excel/stream/worksheet-reader.js +4 -1
- package/dist/esm/modules/excel/workbook.browser.js +2 -1
- package/dist/esm/modules/excel/worksheet.js +2 -1
- package/dist/esm/modules/excel/xlsx/xform/sheet/hyperlink-xform.js +20 -10
- package/dist/esm/modules/excel/xlsx/xform/sheet/worksheet-xform.js +11 -1
- package/dist/iife/excelts.iife.js +77 -72
- package/dist/iife/excelts.iife.js.map +1 -1
- package/dist/iife/excelts.iife.min.js +34 -34
- package/dist/types/modules/csv/worker/worker-script.generated.d.ts +1 -1
- package/dist/types/modules/excel/stream/sheet-rels-writer.d.ts +2 -1
- package/dist/types/modules/excel/stream/worksheet-reader.d.ts +2 -1
- package/dist/types/modules/excel/xlsx/xform/sheet/hyperlink-xform.d.ts +8 -3
- package/package.json +13 -13
|
@@ -3,4 +3,4 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Regenerate with: npm run generate:csv-worker
|
|
5
5
|
*/
|
|
6
|
-
export declare const CSV_WORKER_SCRIPT = "(function() {\n\t//#region src/modules/csv/utils/row.ts\n\t/**\n\t* Check if a row is a RowHashArray (array of [key, value] tuples)\n\t*/\n\tfunction isRowHashArray(row) {\n\t\tif (!Array.isArray(row) || row.length === 0) return false;\n\t\tconst first = row[0];\n\t\treturn Array.isArray(first) && first.length === 2 && typeof first[0] === \"string\";\n\t}\n\t/**\n\t* Convert RowHashArray to RowMap\n\t* Note: Manual loop is ~4x faster than Object.fromEntries\n\t*/\n\tfunction rowHashArrayToMap(row) {\n\t\tconst obj = Object.create(null);\n\t\tfor (const [key, value] of row) if (key !== \"__proto__\") obj[key] = value;\n\t\treturn obj;\n\t}\n\t/**\n\t* Convert RowHashArray to values array (preserving order)\n\t*/\n\tfunction rowHashArrayToValues(row) {\n\t\treturn row.map(([, value]) => value);\n\t}\n\t/**\n\t* Get headers from RowHashArray\n\t*/\n\tfunction rowHashArrayToHeaders(row) {\n\t\treturn row.map(([key]) => key);\n\t}\n\t/**\n\t* Get value by key from RowHashArray (returns undefined if not found)\n\t* More efficient than creating a full map when you need only specific values\n\t*/\n\tfunction rowHashArrayGet(row, key) {\n\t\tfor (const [k, v] of row) if (k === key) return v;\n\t}\n\t/**\n\t* Map RowHashArray values according to header order\n\t* Optimized: builds values array in single pass without intermediate object\n\t*/\n\tfunction rowHashArrayMapByHeaders(row, headers) {\n\t\tif (headers.length <= 10) return headers.map((h) => rowHashArrayGet(row, h));\n\t\tconst map = rowHashArrayToMap(row);\n\t\treturn headers.map((h) => map[h]);\n\t}\n\t/**\n\t* Deduplicate headers by appending suffix to duplicates.\n\t* Example: [\"A\", \"B\", \"A\", \"A\"] \u2192 [\"A\", \"B\", \"A_1\", \"A_2\"]\n\t*\n\t* @param headers - Original header array\n\t* @returns New array with unique header names\n\t*/\n\tfunction deduplicateHeaders(headers) {\n\t\treturn deduplicateHeadersWithRenames(headers).headers;\n\t}\n\tfunction deduplicateHeadersWithRenames(headers) {\n\t\tconst headerCount = /* @__PURE__ */ new Map();\n\t\tconst usedHeaders = /* @__PURE__ */ new Set();\n\t\tconst reservedHeaders = /* @__PURE__ */ new Set();\n\t\tconst result = [];\n\t\tconst renamedHeaders = {};\n\t\tlet hasRenames = false;\n\t\tlet emptyHeaderCount = 0;\n\t\tfor (const header of headers) if (header !== null && header !== void 0 && header !== \"\") reservedHeaders.add(header);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header === null || header === void 0) {\n\t\t\t\tresult.push(header);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (header === \"\") {\n\t\t\t\tlet placeholder = `_column_${i}`;\n\t\t\t\twhile (usedHeaders.has(placeholder) || reservedHeaders.has(placeholder)) placeholder = `_column_${i}_${emptyHeaderCount++}`;\n\t\t\t\tusedHeaders.add(placeholder);\n\t\t\t\tresult.push(placeholder);\n\t\t\t\trenamedHeaders[placeholder] = \"\";\n\t\t\t\thasRenames = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (!usedHeaders.has(header)) {\n\t\t\t\tusedHeaders.add(header);\n\t\t\t\theaderCount.set(header, 1);\n\t\t\t\tresult.push(header);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tlet suffix = headerCount.get(header) ?? 1;\n\t\t\tlet candidate = `${header}_${suffix}`;\n\t\t\twhile (usedHeaders.has(candidate) || reservedHeaders.has(candidate)) {\n\t\t\t\tsuffix++;\n\t\t\t\tcandidate = `${header}_${suffix}`;\n\t\t\t}\n\t\t\theaderCount.set(header, suffix + 1);\n\t\t\tusedHeaders.add(candidate);\n\t\t\tresult.push(candidate);\n\t\t\trenamedHeaders[candidate] = header;\n\t\t\thasRenames = true;\n\t\t}\n\t\treturn {\n\t\t\theaders: result,\n\t\t\trenamedHeaders: hasRenames ? renamedHeaders : null\n\t\t};\n\t}\n\t/**\n\t* Process columns configuration to extract keys and headers.\n\t* Returns null if columns is empty or undefined.\n\t*\n\t* This function is used by both formatCsv (batch) and CsvFormatterStream (streaming)\n\t* to normalize column configuration into separate key/header arrays.\n\t*\n\t* @param columns - Column configuration array (string names or ColumnConfig objects)\n\t* @returns Object with keys (data access) and headers (output names), or null if empty\n\t*\n\t* @example\n\t* ```ts\n\t* processColumns(['name', { key: 'age', header: 'Age (years)' }])\n\t* // { keys: ['name', 'age'], headers: ['name', 'Age (years)'] }\n\t* ```\n\t*/\n\tfunction processColumns(columns) {\n\t\tif (!columns || columns.length === 0) return null;\n\t\treturn {\n\t\t\tkeys: columns.map((c) => typeof c === \"string\" ? c : c.key),\n\t\t\theaders: columns.map((c) => typeof c === \"string\" ? c : c.header ?? c.key)\n\t\t};\n\t}\n\t/** Pre-compiled regex for non-whitespace detection */\n\tconst NON_WHITESPACE_REGEX = /\\S/;\n\t/**\n\t* Check if a row should be skipped as empty.\n\t* When `shouldSkipEmpty` is \"greedy\", whitespace-only rows also count as empty.\n\t*\n\t* @param row - The row to check\n\t* @param shouldSkipEmpty - true, false, or \"greedy\"\n\t* @returns true if the row should be skipped\n\t*/\n\tfunction isEmptyRow(row, shouldSkipEmpty) {\n\t\tif (!shouldSkipEmpty) return false;\n\t\tif (shouldSkipEmpty === \"greedy\") {\n\t\t\tfor (const field of row) if (NON_WHITESPACE_REGEX.test(field)) return false;\n\t\t} else for (const field of row) if (field !== \"\") return false;\n\t\treturn true;\n\t}\n\t/**\n\t* Check if all values in a row are empty strings.\n\t* Used by skipRecordsWithEmptyValues option.\n\t*\n\t* @param row - The row to check\n\t* @returns true if all fields are empty strings\n\t*/\n\tfunction hasAllEmptyValues(row) {\n\t\treturn isEmptyRow(row, true);\n\t}\n\t//#endregion\n\t//#region src/utils/errors.ts\n\t/**\n\t* Base class for all library errors.\n\t* Module-specific errors should extend this class.\n\t*\n\t* Features:\n\t* - Supports ES2022 error cause for error chaining\n\t* - Properly captures stack trace\n\t* - Sets correct prototype for instanceof checks\n\t* - JSON serialization support for logging\n\t*/\n\tvar BaseError = class extends Error {\n\t\tconstructor(message, options) {\n\t\t\tsuper(message, options);\n\t\t\tthis.name = \"BaseError\";\n\t\t\tObject.setPrototypeOf(this, new.target.prototype);\n\t\t\tif (Error.captureStackTrace) Error.captureStackTrace(this, this.constructor);\n\t\t}\n\t\t/**\n\t\t* Serialize error for logging/transmission.\n\t\t* Includes cause chain for debugging.\n\t\t*/\n\t\ttoJSON() {\n\t\t\treturn {\n\t\t\t\tname: this.name,\n\t\t\t\tmessage: this.message,\n\t\t\t\tstack: this.stack,\n\t\t\t\tcause: this.cause instanceof Error ? errorToJSON(this.cause) : this.cause\n\t\t\t};\n\t\t}\n\t};\n\t/**\n\t* Serialize any Error to a plain object for logging/transmission.\n\t* Handles both BaseError and native Error instances.\n\t*/\n\tfunction errorToJSON(err) {\n\t\tif (err instanceof BaseError) return err.toJSON();\n\t\treturn {\n\t\t\tname: err.name,\n\t\t\tmessage: err.message,\n\t\t\tstack: err.stack,\n\t\t\tcause: err.cause instanceof Error ? errorToJSON(err.cause) : err.cause\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/errors.ts\n\t/**\n\t* CSV module error types.\n\t*/\n\t/**\n\t* Base class for all CSV-related errors.\n\t*/\n\tvar CsvError = class extends BaseError {\n\t\tconstructor(..._args) {\n\t\t\tsuper(..._args);\n\t\t\tthis.name = \"CsvError\";\n\t\t}\n\t};\n\t//#endregion\n\t//#region src/modules/csv/parse/helpers.ts\n\t/**\n\t* CSV Parse Utilities\n\t*\n\t* Shared parsing helpers used by both sync (parseCsv) and streaming (CsvParserStream)\n\t* parsers to ensure consistent behavior:\n\t*\n\t* - Header processing: Handle headers option (true/array/transform)\n\t* - Column validation: Check row length against expected column count\n\t* - Row-to-object conversion: Transform string[] to Record<string, any>\n\t* - Dynamic typing: Apply type coercion based on configuration\n\t*\n\t* These utilities are extracted to avoid code duplication between\n\t* the batch parser (parse.ts) and the streaming parser (csv-stream.ts).\n\t*/\n\t/**\n\t* Process headers from first row or configuration.\n\t* Shared logic between parseCsv and CsvParserStream.\n\t*\n\t* @param row - The current row being processed\n\t* @param options - Header processing options\n\t* @param existingHeaders - Already configured headers (for array case)\n\t* @returns Processing result or null if headers not applicable\n\t*/\n\tfunction processHeaders(row, options, existingHeaders) {\n\t\tconst { headers, groupColumnsByName = false } = options;\n\t\tif (existingHeaders !== null && Array.isArray(headers)) return null;\n\t\tlet rawHeaders;\n\t\tlet skipCurrentRow;\n\t\tif (typeof headers === \"function\") {\n\t\t\trawHeaders = headers(row);\n\t\t\tif (rawHeaders.length !== row.length) throw new CsvError(`Header function returned ${rawHeaders.length} headers but row has ${row.length} columns. The header function must return an array with the same length as the input row.`);\n\t\t\tskipCurrentRow = true;\n\t\t} else if (Array.isArray(headers)) {\n\t\t\trawHeaders = headers;\n\t\t\tskipCurrentRow = false;\n\t\t} else if (headers === true) {\n\t\t\trawHeaders = row;\n\t\t\tskipCurrentRow = true;\n\t\t} else return null;\n\t\tconst { headers: dedupedHeaders, renamedHeaders } = deduplicateHeadersWithRenames(rawHeaders);\n\t\treturn {\n\t\t\theaders: dedupedHeaders,\n\t\t\toriginalHeaders: groupColumnsByName ? rawHeaders.map((h) => h === null || h === void 0 ? null : String(h)) : null,\n\t\t\trenamedHeaders,\n\t\t\tskipCurrentRow\n\t\t};\n\t}\n\t/**\n\t* Validate and adjust row column count against expected headers.\n\t* Shared logic between parseCsv and CsvParserStream.\n\t*\n\t* @param row - The row to validate (will be modified in place if needed)\n\t* @param expectedCols - Expected number of columns (from headers)\n\t* @param options - Validation options\n\t* @returns Validation result\n\t*/\n\tfunction validateAndAdjustColumns(row, expectedCols, options) {\n\t\tconst { columnLess, columnMore } = options;\n\t\tconst actualCols = row.length;\n\t\tif (actualCols === expectedCols) return {\n\t\t\tisValid: true,\n\t\t\tmodified: false\n\t\t};\n\t\tif (actualCols > expectedCols) switch (columnMore) {\n\t\t\tcase \"error\": return {\n\t\t\t\tisValid: false,\n\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\treason: `expected ${expectedCols} columns, got ${actualCols}`,\n\t\t\t\tmodified: false\n\t\t\t};\n\t\t\tcase \"truncate\":\n\t\t\t\trow.length = expectedCols;\n\t\t\t\treturn {\n\t\t\t\t\tisValid: true,\n\t\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\t\tmodified: true\n\t\t\t\t};\n\t\t\tcase \"keep\": return {\n\t\t\t\tisValid: true,\n\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\tmodified: true,\n\t\t\t\textras: row.splice(expectedCols)\n\t\t\t};\n\t\t\tdefault: {\n\t\t\t\tconst _never = columnMore;\n\t\t\t\tthrow new Error(`Unknown columnMore strategy: ${_never}`);\n\t\t\t}\n\t\t}\n\t\tswitch (columnLess) {\n\t\t\tcase \"error\": return {\n\t\t\t\tisValid: false,\n\t\t\t\terrorCode: \"TooFewFields\",\n\t\t\t\treason: `expected ${expectedCols} columns, got ${actualCols}`,\n\t\t\t\tmodified: false\n\t\t\t};\n\t\t\tcase \"pad\":\n\t\t\t\twhile (row.length < expectedCols) row.push(\"\");\n\t\t\t\treturn {\n\t\t\t\t\tisValid: true,\n\t\t\t\t\terrorCode: \"TooFewFields\",\n\t\t\t\t\tmodified: true\n\t\t\t\t};\n\t\t\tdefault: {\n\t\t\t\tconst _never = columnLess;\n\t\t\t\tthrow new Error(`Unknown columnLess strategy: ${_never}`);\n\t\t\t}\n\t\t}\n\t}\n\t/**\n\t* Create a safe onSkip handler that catches errors from user callback.\n\t*\n\t* The onSkip callback is user-provided and may throw errors. We wrap it\n\t* to prevent callback errors from interrupting parsing. Errors in the\n\t* callback are silently ignored since there's no good way to surface them\n\t* in the sync parsing context.\n\t*\n\t* For better error visibility in async/streaming contexts, consider\n\t* emitting a warning event on the stream instead.\n\t*/\n\tfunction createOnSkipHandler(onSkip) {\n\t\tif (!onSkip) return null;\n\t\treturn (error, record) => {\n\t\t\ttry {\n\t\t\t\tonSkip(error, record);\n\t\t\t} catch (callbackError) {}\n\t\t};\n\t}\n\t/**\n\t* Convert a row array to an object using headers.\n\t* Internal helper for convertRowToObject.\n\t*/\n\tfunction rowToObject(row, headers) {\n\t\tconst obj = Object.create(null);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header !== null && header !== void 0 && header !== \"__proto__\") obj[header] = row[i] ?? \"\";\n\t\t}\n\t\treturn obj;\n\t}\n\t/**\n\t* Convert a row array to an object, optionally grouping duplicate column names.\n\t* Unified function that handles both normal and grouped modes.\n\t*\n\t* @param row - The row values as an array\n\t* @param headers - The deduplicated header names\n\t* @param originalHeaders - The original (non-deduplicated) headers for grouping\n\t* @param groupColumnsByName - Whether to group duplicate column names\n\t* @returns Object with header keys and row values\n\t*/\n\tfunction convertRowToObject(row, headers, originalHeaders, groupColumnsByName) {\n\t\tif (groupColumnsByName && originalHeaders) return rowToObjectGrouped(row, originalHeaders);\n\t\treturn rowToObject(row, headers);\n\t}\n\t/**\n\t* Convert a row array to an object, grouping duplicate column names.\n\t* Internal helper for convertRowToObject.\n\t*/\n\tfunction rowToObjectGrouped(row, headers) {\n\t\tconst obj = Object.create(null);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header !== null && header !== void 0 && header !== \"__proto__\") {\n\t\t\t\tconst value = row[i] ?? \"\";\n\t\t\t\tif (header in obj) {\n\t\t\t\t\tconst existing = obj[header];\n\t\t\t\t\tif (Array.isArray(existing)) existing.push(value);\n\t\t\t\t\telse obj[header] = [existing, value];\n\t\t\t\t} else obj[header] = value;\n\t\t\t}\n\t\t}\n\t\treturn obj;\n\t}\n\t/**\n\t* Filter out null/undefined values from a header array.\n\t* Returns only the valid string headers.\n\t*\n\t* @param headers - Header array that may contain null/undefined values\n\t* @returns Array of valid string headers (null/undefined removed)\n\t*/\n\tfunction filterValidHeaders(headers) {\n\t\treturn headers.filter((h) => h !== null && h !== void 0);\n\t}\n\t//#endregion\n\t//#region src/modules/csv/utils/detect.ts\n\t/**\n\t* CSV Detection Utilities\n\t*\n\t* Auto-detection of CSV characteristics:\n\t* - Delimiter detection (comma, tab, semicolon, pipe, etc.)\n\t* - Line ending detection (LF, CRLF, CR)\n\t* - Quote character normalization\n\t*\n\t* This module is part of the csv/utils subsystem:\n\t* - detect.ts: Auto-detection of CSV format\n\t* - row.ts: Row format conversions (RowHashArray, headers)\n\t* - dynamic-typing.ts: Type coercion (string -> number/boolean/date)\n\t* - number.ts: Number parsing utilities\n\t* - generate.ts: Test data generation\n\t*/\n\t/**\n\t* Escape special regex characters\n\t*/\n\tfunction escapeRegex(str) {\n\t\treturn str.replace(/[.*+?^${}()|[\\]\\\\]/g, \"\\\\$&\");\n\t}\n\t/**\n\t* Normalize quote option to { enabled, char } form.\n\t* Centralizes the quote/false/null handling logic.\n\t*/\n\tfunction normalizeQuoteOption(option) {\n\t\tif (option === false || option === null) return {\n\t\t\tenabled: false,\n\t\t\tchar: \"\"\n\t\t};\n\t\treturn {\n\t\t\tenabled: true,\n\t\t\tchar: option ?? \"\\\"\"\n\t\t};\n\t}\n\t/**\n\t* Normalize escape option to { enabled, char } form.\n\t* Consistent with normalizeQuoteOption API design.\n\t*\n\t* @param escapeOption - User's escape option (string, false, null, or undefined)\n\t* @param quoteChar - The quote character (used as default when escape is undefined)\n\t* @returns { enabled: boolean, char: string }\n\t* - enabled=false, char=\"\" when explicitly disabled (false/null)\n\t* - enabled=true, char=quoteChar when undefined (default behavior)\n\t* - enabled=true, char=escapeOption when string provided\n\t*/\n\tfunction normalizeEscapeOption(escapeOption, quoteChar) {\n\t\tif (escapeOption === false || escapeOption === null) return {\n\t\t\tenabled: false,\n\t\t\tchar: \"\"\n\t\t};\n\t\treturn {\n\t\t\tenabled: true,\n\t\t\tchar: escapeOption ?? quoteChar\n\t\t};\n\t}\n\t/**\n\t* Common CSV delimiters to try during auto-detection\n\t* Order matters - comma is most common, then semicolon (European), tab, pipe\n\t*/\n\tconst AUTO_DETECT_DELIMITERS = [\n\t\t\",\",\n\t\t\";\",\n\t\t\"\t\",\n\t\t\"|\"\n\t];\n\t/**\n\t* Default delimiter when auto-detection fails\n\t*/\n\tconst DEFAULT_DELIMITER = \",\";\n\t/**\n\t* Characters that trigger formula escaping (CSV injection prevention).\n\t* Per OWASP recommendations, these characters at the start of a field\n\t* could be interpreted as formulas by spreadsheet applications.\n\t*\n\t* @see https://owasp.org/www-community/attacks/CSV_Injection\n\t*/\n\tconst FORMULA_ESCAPE_CHARS = new Set([\n\t\t\"=\",\n\t\t\"+\",\n\t\t\"-\",\n\t\t\"@\",\n\t\t\"\t\",\n\t\t\"\\r\",\n\t\t\"\\n\",\n\t\t\"\uFF1D\",\n\t\t\"\uFF0B\",\n\t\t\"\uFF0D\",\n\t\t\"\uFF20\"\n\t]);\n\t/**\n\t* Strip UTF-8 BOM (Byte Order Mark) from start of string if present.\n\t* Excel exports UTF-8 CSV files with BOM (\\ufeff).\n\t*\n\t* @param input - String to process\n\t* @returns String without BOM\n\t*/\n\tfunction stripBom(input) {\n\t\treturn input.charCodeAt(0) === 65279 ? input.slice(1) : input;\n\t}\n\t/**\n\t* Check if a string starts with a formula escape character.\n\t* Used for CSV injection prevention.\n\t*/\n\tfunction startsWithFormulaChar(str) {\n\t\treturn str.length > 0 && FORMULA_ESCAPE_CHARS.has(str[0]);\n\t}\n\t/**\n\t* Detect the line terminator used in a string.\n\t* Uses quote-aware detection to avoid detecting newlines inside quoted fields.\n\t*\n\t* @param input - String to analyze\n\t* @param quote - Quote character (default: '\"')\n\t* @returns Detected line terminator or '\\n' as default\n\t*\n\t* @example\n\t* detectLinebreak('a,b\\r\\nc,d') // '\\r\\n'\n\t* detectLinebreak('a,b\\nc,d') // '\\n'\n\t* detectLinebreak('a,b\\rc,d') // '\\r'\n\t* detectLinebreak('a,b,c') // '\\n' (default)\n\t* detectLinebreak('\"a\\nb\",c\\r\\nd') // '\\r\\n' (ignores newline in quotes)\n\t*/\n\tfunction detectLinebreak(input, quote = \"\\\"\") {\n\t\tlet inQuote = false;\n\t\tfor (let i = 0; i < input.length; i++) {\n\t\t\tconst char = input[i];\n\t\t\tif (char === quote) {\n\t\t\t\tif (inQuote && input[i + 1] === quote) {\n\t\t\t\t\ti++;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tinQuote = !inQuote;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (inQuote) continue;\n\t\t\tif (char === \"\\r\") return input[i + 1] === \"\\n\" ? \"\\r\\n\" : \"\\r\";\n\t\t\tif (char === \"\\n\") return \"\\n\";\n\t\t}\n\t\treturn \"\\n\";\n\t}\n\t/**\n\t* Auto-detect the delimiter used in a CSV string\n\t*\n\t* Algorithm:\n\t* 1. Sample the first few lines (up to 10) for analysis\n\t* 2. For each candidate delimiter:\n\t* - Count occurrences per line (respecting quotes)\n\t* - Check consistency: all lines should have the same count\n\t* - Higher count = more fields = better delimiter candidate\n\t* 3. Choose the delimiter with highest consistent field count\n\t*\n\t* Tie-breaking rules (in priority order):\n\t* 1. Lowest delta (variance) wins - more consistent field counts across lines\n\t* 2. On delta tie, highest avgFieldCount wins - more fields per row\n\t* 3. On complete tie, array order wins - first delimiter in delimitersToGuess\n\t* (default order: comma, semicolon, tab, pipe)\n\t*\n\t* @param input - CSV string to analyze\n\t* @param quote - Quote character (default: '\"')\n\t* @param delimitersToGuess - Custom list of delimiters to try (default: [\",\", \";\", \"\\t\", \"|\"])\n\t* @returns Detected delimiter or first delimiter in list\n\t*\n\t* @example\n\t* detectDelimiter('a,b,c\\n1,2,3') // ','\n\t* detectDelimiter('a;b;c\\n1;2;3') // ';'\n\t* detectDelimiter('a\\tb\\tc\\n1\\t2\\t3') // '\\t'\n\t* detectDelimiter('a:b:c\\n1:2:3', '\"', [':']) // ':'\n\t*/\n\tfunction detectDelimiter(input, quote = \"\\\"\", delimitersToGuess, comment, skipEmptyLines) {\n\t\tconst delimiters = delimitersToGuess ?? AUTO_DETECT_DELIMITERS;\n\t\tconst defaultDelimiter = delimiters[0] ?? DEFAULT_DELIMITER;\n\t\tconst lines = getSampleLines(input, 10, quote, comment, skipEmptyLines);\n\t\tif (lines.length === 0) return defaultDelimiter;\n\t\tlet bestDelimiter = defaultDelimiter;\n\t\tlet bestDelta;\n\t\tlet bestAvgFieldCount;\n\t\tfor (const delimiter of delimiters) {\n\t\t\tconst { avgFieldCount, delta } = scoreDelimiter(lines, delimiter, quote);\n\t\t\tif (avgFieldCount <= 1.99) continue;\n\t\t\tif (bestDelta === void 0 || delta < bestDelta || delta === bestDelta && (bestAvgFieldCount === void 0 || avgFieldCount > bestAvgFieldCount)) {\n\t\t\t\tbestDelta = delta;\n\t\t\t\tbestAvgFieldCount = avgFieldCount;\n\t\t\t\tbestDelimiter = delimiter;\n\t\t\t}\n\t\t}\n\t\treturn bestDelimiter;\n\t}\n\t/**\n\t* Get sample lines from input, skipping empty lines\n\t*/\n\tfunction getSampleLines(input, maxLines, quote, comment, skipEmptyLines) {\n\t\tconst lines = [];\n\t\tlet start = 0;\n\t\tlet inQuotes = false;\n\t\tconst len = input.length;\n\t\tfor (let i = 0; i < len && lines.length < maxLines; i++) {\n\t\t\tconst char = input[i];\n\t\t\tif (quote && char === quote) if (inQuotes && input[i + 1] === quote) i++;\n\t\t\telse inQuotes = !inQuotes;\n\t\t\telse if (!inQuotes && (char === \"\\n\" || char === \"\\r\")) {\n\t\t\t\tconst line = input.slice(start, i);\n\t\t\t\tif (comment && line.startsWith(comment)) {} else {\n\t\t\t\t\tconst trimmed = line.trim();\n\t\t\t\t\tif (!(line.length === 0 || skipEmptyLines && trimmed === \"\") && trimmed !== \"\") lines.push(line);\n\t\t\t\t}\n\t\t\t\tif (char === \"\\r\" && input[i + 1] === \"\\n\") i++;\n\t\t\t\tstart = i + 1;\n\t\t\t}\n\t\t}\n\t\tif (start < len && lines.length < maxLines) {\n\t\t\tconst line = input.slice(start);\n\t\t\tif (!comment || !line.startsWith(comment)) {\n\t\t\t\tconst trimmed = line.trim();\n\t\t\t\tif (!(line.length === 0 || skipEmptyLines && trimmed === \"\") && trimmed !== \"\") lines.push(line);\n\t\t\t}\n\t\t}\n\t\treturn lines;\n\t}\n\t/**\n\t* Score a delimiter candidate based on consistency and field count\n\t*\n\t* Returns 0 if:\n\t* - Delimiter not found in any line\n\t* - Field counts are inconsistent across lines\n\t*\n\t* Higher score = more fields per row with consistent counts\n\t*/\n\tfunction scoreDelimiter(lines, delimiter, quote) {\n\t\tif (lines.length === 0) return {\n\t\t\tavgFieldCount: 0,\n\t\t\tdelta: Number.POSITIVE_INFINITY\n\t\t};\n\t\tlet delta = 0;\n\t\tlet avgFieldCount = 0;\n\t\tlet prevFieldCount;\n\t\tfor (const line of lines) {\n\t\t\tconst fieldCount = countDelimiters(line, delimiter, quote) + 1;\n\t\t\tavgFieldCount += fieldCount;\n\t\t\tif (prevFieldCount === void 0) {\n\t\t\t\tprevFieldCount = fieldCount;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tdelta += Math.abs(fieldCount - prevFieldCount);\n\t\t\tprevFieldCount = fieldCount;\n\t\t}\n\t\tavgFieldCount /= lines.length;\n\t\treturn {\n\t\t\tavgFieldCount,\n\t\t\tdelta\n\t\t};\n\t}\n\t/**\n\t* Count delimiters in a line, respecting quoted fields\n\t*/\n\tfunction countDelimiters(line, delimiter, quote) {\n\t\tlet count = 0;\n\t\tlet inQuotes = false;\n\t\tconst len = line.length;\n\t\tconst delimLen = delimiter.length;\n\t\tfor (let i = 0; i < len; i++) if (quote && line[i] === quote) if (inQuotes && line[i + 1] === quote) i++;\n\t\telse inQuotes = !inQuotes;\n\t\telse if (!inQuotes) {\n\t\t\tif (delimLen === 1) {\n\t\t\t\tif (line[i] === delimiter) count++;\n\t\t\t} else if (line.startsWith(delimiter, i)) {\n\t\t\t\tcount++;\n\t\t\t\ti += delimLen - 1;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/constants.ts\n\t/**\n\t* CSV Module Constants\n\t*\n\t* Shared constants used across the CSV module.\n\t* Extracted to avoid circular dependencies between parse-core and utils/parse.\n\t*/\n\t/**\n\t* Pre-compiled regex for line splitting (matches CR, LF, or CRLF)\n\t*/\n\tconst DEFAULT_LINEBREAK_REGEX = /\\r\\n|\\r|\\n/;\n\t/**\n\t* Lazily initialized TextEncoder + buffers for byte length calculations.\n\t* Avoids eager allocation at module load time.\n\t*/\n\tlet sharedTextEncoder = null;\n\tlet singleCharBuffer = null;\n\tlet encodeBuffer = null;\n\tfunction getEncoder() {\n\t\tif (!sharedTextEncoder) {\n\t\t\tsharedTextEncoder = new TextEncoder();\n\t\t\tsingleCharBuffer = new Uint8Array(4);\n\t\t\tencodeBuffer = new Uint8Array(4096);\n\t\t}\n\t\treturn sharedTextEncoder;\n\t}\n\t/**\n\t* Get UTF-8 byte length of a string efficiently.\n\t* Uses fast path for ASCII-only strings and encodeInto for mixed content.\n\t*\n\t* @param text - String to measure\n\t* @returns UTF-8 byte length\n\t*/\n\tfunction getUtf8ByteLength(text) {\n\t\tconst len = text.length;\n\t\tif (len === 0) return 0;\n\t\tif (len === 1) {\n\t\t\tif (text.charCodeAt(0) < 128) return 1;\n\t\t\treturn getEncoder().encodeInto(text, singleCharBuffer).written;\n\t\t}\n\t\tlet isAllAscii = true;\n\t\tfor (let i = 0; i < len; i++) if (text.charCodeAt(i) >= 128) {\n\t\t\tisAllAscii = false;\n\t\t\tbreak;\n\t\t}\n\t\tif (isAllAscii) return len;\n\t\tconst encoder = getEncoder();\n\t\tif (len * 3 > encodeBuffer.length) encodeBuffer = new Uint8Array(len * 3);\n\t\treturn encoder.encodeInto(text, encodeBuffer).written;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/config.ts\n\t/**\n\t* Create a normalized ParseConfig from options.\n\t* This is the single source of truth for configuration normalization,\n\t* used by both sync and streaming parsers.\n\t*\n\t* @example Batch parsing\n\t* ```ts\n\t* const { config, processedInput } = createParseConfig({ input: csvString, options });\n\t* ```\n\t*\n\t* @example Streaming parsing\n\t* ```ts\n\t* const { config } = createParseConfig({ options });\n\t* // Later, after delimiter detection:\n\t* config.delimiter = detectedDelimiter;\n\t* ```\n\t*/\n\tfunction createParseConfig(opts) {\n\t\tconst { input, options, detectedDelimiter } = opts;\n\t\tconst { delimiter: delimiterOption = \",\", delimitersToGuess, lineEnding: lineEndingOption = \"\", quote: quoteOption = \"\\\"\", escape: escapeOption, skipEmptyLines = false, trim = false, ltrim = false, rtrim = false, headers = false, comment, maxRows, toLine, skipLines = 0, skipRows = 0, columnMismatch, groupColumnsByName = false, fastMode = false, dynamicTyping, castDate, beforeFirstChunk, info: infoOption = false, raw: rawOption = false, relaxQuotes = false, skipRecordsWithError = false, skipRecordsWithEmptyValues = false, onSkip, maxRowBytes } = options;\n\t\tconst columnLess = columnMismatch?.less ?? \"error\";\n\t\tconst columnMore = columnMismatch?.more ?? \"error\";\n\t\tlet processedInput;\n\t\tif (input !== void 0) {\n\t\t\tprocessedInput = input;\n\t\t\tif (beforeFirstChunk) {\n\t\t\t\tconst result = beforeFirstChunk(processedInput);\n\t\t\t\tif (typeof result === \"string\") processedInput = result;\n\t\t\t\telse if (result !== void 0 && result !== null) throw new CsvError(`beforeFirstChunk must return a string or undefined, got ${typeof result}`);\n\t\t\t}\n\t\t\tprocessedInput = stripBom(processedInput);\n\t\t}\n\t\tconst shouldSkipEmpty = skipEmptyLines;\n\t\tconst { enabled: quoteEnabled, char: quote } = normalizeQuoteOption(quoteOption);\n\t\tconst escapeNormalized = normalizeEscapeOption(escapeOption, quote);\n\t\tconst escape = escapeNormalized.enabled ? escapeNormalized.char || quote : \"\";\n\t\tlet delimiter;\n\t\tif (detectedDelimiter !== void 0) delimiter = detectedDelimiter;\n\t\telse if (delimiterOption === \"\" && processedInput !== void 0) delimiter = detectDelimiter(processedInput, quote || \"\\\"\", delimitersToGuess, comment, shouldSkipEmpty);\n\t\telse if (delimiterOption === \"\") delimiter = \",\";\n\t\telse delimiter = delimiterOption;\n\t\tconst linebreak = lineEndingOption || (processedInput !== void 0 ? detectLinebreak(processedInput) : \"\\n\");\n\t\treturn {\n\t\t\tconfig: {\n\t\t\t\tdelimiter,\n\t\t\t\tlinebreak,\n\t\t\t\tlinebreakRegex: linebreak && linebreak !== \"\\n\" && linebreak !== \"\\r\\n\" && linebreak !== \"\\r\" ? linebreak : DEFAULT_LINEBREAK_REGEX,\n\t\t\t\tquote,\n\t\t\t\tescape,\n\t\t\t\tquoteEnabled,\n\t\t\t\ttrimField: makeTrimField(trim, ltrim, rtrim),\n\t\t\t\ttrimFieldIsIdentity: !trim && !ltrim && !rtrim,\n\t\t\t\tshouldSkipEmpty,\n\t\t\t\tskipLines,\n\t\t\t\tskipRows,\n\t\t\t\tmaxRows,\n\t\t\t\ttoLine,\n\t\t\t\tmaxRowBytes,\n\t\t\t\tcomment,\n\t\t\t\tfastMode,\n\t\t\t\trelaxQuotes,\n\t\t\t\tcolumnLess,\n\t\t\t\tcolumnMore,\n\t\t\t\tgroupColumnsByName,\n\t\t\t\tskipRecordsWithError,\n\t\t\t\tskipRecordsWithEmptyValues,\n\t\t\t\tinfoOption,\n\t\t\t\trawOption,\n\t\t\t\tdynamicTyping,\n\t\t\t\tcastDate,\n\t\t\t\tinvokeOnSkip: createOnSkipHandler(onSkip),\n\t\t\t\theaders\n\t\t\t},\n\t\t\tprocessedInput\n\t\t};\n\t}\n\t/**\n\t* Resolve options into a normalized config object.\n\t* Convenience wrapper around createParseConfig that ensures processedInput is non-null.\n\t*/\n\tfunction resolveParseConfig(input, options) {\n\t\tconst result = createParseConfig({\n\t\t\tinput,\n\t\t\toptions\n\t\t});\n\t\treturn {\n\t\t\tconfig: result.config,\n\t\t\tprocessedInput: result.processedInput\n\t\t};\n\t}\n\t/**\n\t* Convert ParseConfig to ScannerConfig\n\t*/\n\tfunction toScannerConfig(config) {\n\t\treturn {\n\t\t\tdelimiter: config.delimiter,\n\t\t\tquote: config.quote,\n\t\t\tescape: config.escape,\n\t\t\tquoteEnabled: config.quoteEnabled,\n\t\t\trelaxQuotes: config.relaxQuotes\n\t\t};\n\t}\n\t/**\n\t* Create a trim function based on options\n\t*/\n\tfunction makeTrimField(trim, ltrim, rtrim) {\n\t\tif (trim || ltrim && rtrim) return (s) => s.trim();\n\t\tif (ltrim) return (s) => s.trimStart();\n\t\tif (rtrim) return (s) => s.trimEnd();\n\t\treturn (s) => s;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/state.ts\n\t/**\n\t* Create initial parse state with optional header configuration\n\t*/\n\tfunction createParseState(config) {\n\t\tconst state = {\n\t\t\tlineNumber: 0,\n\t\t\tdataRowCount: 0,\n\t\t\tskippedDataRows: 0,\n\t\t\ttruncated: false,\n\t\t\theaderRow: null,\n\t\t\toriginalHeaders: null,\n\t\t\tuseHeaders: false,\n\t\t\theaderRowProcessed: false,\n\t\t\trenamedHeadersForMeta: null,\n\t\t\tcurrentRowStartLine: config.infoOption ? 1 : 0,\n\t\t\tcurrentRowStartOffset: 0,\n\t\t\tcurrentRowQuoted: [],\n\t\t\tcurrentRawRow: \"\"\n\t\t};\n\t\tconst { headers, groupColumnsByName } = config;\n\t\tif (headers === true) state.useHeaders = true;\n\t\telse if (Array.isArray(headers)) {\n\t\t\tconst result = processHeaders([], {\n\t\t\t\theaders,\n\t\t\t\tgroupColumnsByName\n\t\t\t}, null);\n\t\t\tif (result) {\n\t\t\t\tstate.headerRow = result.headers;\n\t\t\t\tstate.originalHeaders = result.originalHeaders;\n\t\t\t\tstate.renamedHeadersForMeta = result.renamedHeaders;\n\t\t\t}\n\t\t\tstate.useHeaders = true;\n\t\t\tstate.headerRowProcessed = true;\n\t\t} else if (typeof headers === \"function\") state.useHeaders = true;\n\t\treturn state;\n\t}\n\t/**\n\t* Reset info state for next row\n\t*/\n\tfunction resetInfoState(state, trackInfo, trackRaw, nextLine, nextOffset) {\n\t\tif (trackInfo) {\n\t\t\tstate.currentRowQuoted = [];\n\t\t\tstate.currentRowStartLine = nextLine;\n\t\t\tstate.currentRowStartOffset = nextOffset;\n\t\t}\n\t\tif (trackRaw) state.currentRawRow = \"\";\n\t}\n\t/**\n\t* Pre-allocated frozen array of false values for fast mode quoted tracking.\n\t* In fast mode (no quote detection), all fields are unquoted, so we can\n\t* return a shared reference instead of allocating per row.\n\t*\n\t* IMPORTANT: This array is frozen and must NOT be modified.\n\t* Callers should copy if they need to store/modify the values.\n\t*/\n\tconst SHARED_FALSE_ARRAY_SIZE = 256;\n\tconst SHARED_FALSE_ARRAY = Object.freeze(new Array(SHARED_FALSE_ARRAY_SIZE).fill(false));\n\t/**\n\t* Get a shared array of false values for unquoted field tracking.\n\t* Returns a frozen shared reference for common cases to avoid per-row allocation.\n\t*\n\t* IMPORTANT: The returned array must NOT be modified. If you need to store\n\t* the values, make a copy: `[...getUnquotedArray(n)]` or `.slice(0, n)`.\n\t*\n\t* @param length - Number of fields in the row\n\t* @returns Shared frozen array (for length <= 256) or new array (for larger rows)\n\t*/\n\tfunction getUnquotedArray(length) {\n\t\tif (length <= SHARED_FALSE_ARRAY_SIZE) return SHARED_FALSE_ARRAY;\n\t\treturn new Array(length).fill(false);\n\t}\n\tArray.from({ length: 60 }, (_, i) => i < 10 ? `0${i}` : `${i}`);\n\tconst C_0 = 48;\n\tconst C_DASH = 45;\n\tconst C_SLASH = 47;\n\tconst C_COLON = 58;\n\tconst C_T = 84;\n\tconst C_SPACE = 32;\n\tconst C_Z = 90;\n\tconst C_PLUS = 43;\n\tconst C_DOT = 46;\n\tconst digit2 = (s, i) => (s.charCodeAt(i) - C_0) * 10 + s.charCodeAt(i + 1) - C_0 | 0;\n\tconst digit4 = (s, i) => (s.charCodeAt(i) - C_0) * 1e3 + (s.charCodeAt(i + 1) - C_0) * 100 + (s.charCodeAt(i + 2) - C_0) * 10 + s.charCodeAt(i + 3) - C_0 | 0;\n\tconst DAYS_IN_MONTH = [\n\t\t0,\n\t\t31,\n\t\t29,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t30,\n\t\t31\n\t];\n\tfunction validateDate(y, m, d) {\n\t\tif (m < 1 || m > 12 || d < 1 || d > DAYS_IN_MONTH[m]) return null;\n\t\tconst date = new Date(y, m - 1, d);\n\t\treturn date.getMonth() === m - 1 ? date : null;\n\t}\n\tfunction validateDateTime(y, m, d, h, min, s) {\n\t\tif (m < 1 || m > 12 || d < 1 || d > DAYS_IN_MONTH[m]) return null;\n\t\tif (h > 23 || min > 59 || s > 59) return null;\n\t\treturn new Date(y, m - 1, d, h, min, s);\n\t}\n\tfunction parseISO(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH) return null;\n\t\treturn validateDate(digit4(s, 0), digit2(s, 5), digit2(s, 8));\n\t}\n\tfunction parseISOT(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH || s.charCodeAt(10) !== C_T || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 0), digit2(s, 5), digit2(s, 8), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseISOSpace(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH || s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 0), digit2(s, 5), digit2(s, 8), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseISOZ(s) {\n\t\tif (s.charCodeAt(19) !== C_Z) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOMsZ(s) {\n\t\tif (s.charCodeAt(19) !== C_DOT || s.charCodeAt(23) !== C_Z) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOOffset(s) {\n\t\tconst c = s.charCodeAt(19);\n\t\tif (c !== C_PLUS && c !== C_DASH) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOMsOffset(s) {\n\t\tif (s.charCodeAt(19) !== C_DOT) return null;\n\t\tconst c = s.charCodeAt(23);\n\t\tif (c !== C_PLUS && c !== C_DASH) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseUS(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\treturn validateDate(digit4(s, 6), digit2(s, 0), digit2(s, 3));\n\t}\n\tfunction parseEU(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\treturn validateDate(digit4(s, 6), digit2(s, 3), digit2(s, 0));\n\t}\n\tfunction parseUSTime(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\tif (s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 6), digit2(s, 0), digit2(s, 3), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseEUTime(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\tif (s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 6), digit2(s, 3), digit2(s, 0), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tconst PARSERS = {\n\t\t\"YYYY-MM-DD\": parseISO,\n\t\t\"YYYY-MM-DD[T]HH:mm:ss\": parseISOT,\n\t\t\"YYYY-MM-DD HH:mm:ss\": parseISOSpace,\n\t\t\"YYYY-MM-DD[T]HH:mm:ssZ\": (s) => s.length === 20 ? parseISOZ(s) : s.length === 25 ? parseISOOffset(s) : null,\n\t\t\"YYYY-MM-DD[T]HH:mm:ss.SSSZ\": (s) => s.length === 24 ? parseISOMsZ(s) : s.length === 29 ? parseISOMsOffset(s) : null,\n\t\t\"MM-DD-YYYY\": parseUS,\n\t\t\"MM-DD-YYYY HH:mm:ss\": parseUSTime,\n\t\t\"MM/DD/YYYY HH:mm:ss\": parseUSTime,\n\t\t\"DD-MM-YYYY\": parseEU,\n\t\t\"DD-MM-YYYY HH:mm:ss\": parseEUTime,\n\t\t\"DD/MM/YYYY HH:mm:ss\": parseEUTime\n\t};\n\tconst AUTO_DETECT = [\n\t\t[10, [parseISO]],\n\t\t[19, [parseISOT, parseISOSpace]],\n\t\t[20, [parseISOZ]],\n\t\t[24, [parseISOMsZ]],\n\t\t[25, [parseISOOffset]],\n\t\t[29, [parseISOMsOffset]]\n\t];\n\t/**\n\t* Optimized date parser for batch processing\n\t*\n\t* @example\n\t* const parser = DateParser.create([\"YYYY-MM-DD\"]);\n\t* const dates = parser.parseAll(csvStrings);\n\t*/\n\tvar DateParser = class DateParser {\n\t\tconstructor(fns) {\n\t\t\tthis.parse = (value) => {\n\t\t\t\tif (!value) return null;\n\t\t\t\tconst s = value.trim();\n\t\t\t\tif (!s) return null;\n\t\t\t\tif (this.single) return this.fn0(s);\n\t\t\t\tfor (let i = 0, len = this.fns.length; i < len; i++) {\n\t\t\t\t\tconst r = this.fns[i](s);\n\t\t\t\t\tif (r) return r;\n\t\t\t\t}\n\t\t\t\treturn null;\n\t\t\t};\n\t\t\tthis.fns = fns;\n\t\t\tthis.single = fns.length === 1;\n\t\t\tthis.fn0 = fns[0];\n\t\t}\n\t\t/** Create parser for specific formats */\n\t\tstatic create(formats) {\n\t\t\treturn new DateParser(formats.map((f) => PARSERS[f]).filter(Boolean));\n\t\t}\n\t\t/** Create parser for auto-detecting ISO formats */\n\t\tstatic iso() {\n\t\t\tconst fns = [];\n\t\t\tfor (const [, parsers] of AUTO_DETECT) fns.push(...parsers);\n\t\t\treturn new DateParser(fns);\n\t\t}\n\t\t/** Parse array of values */\n\t\tparseAll(values) {\n\t\t\tconst len = values.length;\n\t\t\tconst out = new Array(len);\n\t\t\tconst parse = this.parse;\n\t\t\tfor (let i = 0; i < len; i++) out[i] = parse(values[i]);\n\t\t\treturn out;\n\t\t}\n\t\t/** Parse and filter valid dates */\n\t\tparseValid(values) {\n\t\t\tconst out = [];\n\t\t\tconst parse = this.parse;\n\t\t\tfor (let i = 0, len = values.length; i < len; i++) {\n\t\t\t\tconst d = parse(values[i]);\n\t\t\t\tif (d) out.push(d);\n\t\t\t}\n\t\t\treturn out;\n\t\t}\n\t};\n\t//#endregion\n\t//#region src/modules/csv/utils/dynamic-typing.ts\n\t/**\n\t* CSV Dynamic Typing - Automatic Type Conversion\n\t*\n\t* Functions for converting CSV string values to appropriate JavaScript types.\n\t* Supports boolean, number, null detection with customizable per-column config.\n\t*/\n\t/**\n\t* Pre-compiled regex for valid number format detection.\n\t* Matches integers, decimals, and scientific notation.\n\t* Pre-compiling avoids regex compilation overhead in the hot path.\n\t*/\n\tconst NUMERIC_REGEX = /^-?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][+-]?\\d+)?$/;\n\tlet isoDateParser = null;\n\t/**\n\t* Get or create the ISO date parser singleton\n\t*/\n\tfunction getIsoDateParser() {\n\t\tif (!isoDateParser) isoDateParser = DateParser.iso();\n\t\treturn isoDateParser;\n\t}\n\t/**\n\t* Try to parse a string as an ISO date.\n\t* Returns the Date if successful, or null if not a valid date.\n\t*\n\t* Supported formats:\n\t* - YYYY-MM-DD\n\t* - YYYY-MM-DDTHH:mm:ss\n\t* - YYYY-MM-DD HH:mm:ss\n\t* - YYYY-MM-DDTHH:mm:ssZ\n\t* - YYYY-MM-DDTHH:mm:ss.SSSZ\n\t* - YYYY-MM-DDTHH:mm:ss+HH:mm\n\t*/\n\tfunction tryParseDate(value) {\n\t\tif (!value || value.length < 10) return null;\n\t\treturn getIsoDateParser().parse(value);\n\t}\n\t/**\n\t* Check if castDate config enables date parsing for a column\n\t*/\n\tfunction shouldCastDate(castDate, columnName) {\n\t\tif (!castDate) return false;\n\t\tif (castDate === true) return true;\n\t\tif (Array.isArray(castDate) && typeof columnName === \"string\") return castDate.includes(columnName);\n\t\treturn false;\n\t}\n\t/**\n\t* Check if a charCode matches a lowercase letter (case-insensitive).\n\t* @param code - The charCode to check\n\t* @param lowercaseCode - The lowercase letter's charCode to match against\n\t* @returns true if code matches (case-insensitive)\n\t*/\n\tfunction isCharEqualIgnoreCase(code, lowercaseCode) {\n\t\treturn code === lowercaseCode || code === lowercaseCode - 32;\n\t}\n\t/**\n\t* Convert a string value to its appropriate JavaScript type.\n\t* Used internally by dynamicTyping feature.\n\t*\n\t* Conversion rules:\n\t* - Empty string \u2192 \"\" (unchanged)\n\t* - \"true\"/\"TRUE\"/\"True\" \u2192 true\n\t* - \"false\"/\"FALSE\"/\"False\" \u2192 false\n\t* - \"null\"/\"NULL\" \u2192 null\n\t* - Numeric strings \u2192 number (int or float)\n\t* - Everything else \u2192 original string\n\t*\n\t* Special cases:\n\t* - Leading zeros (e.g., \"007\") \u2192 preserved as string (for zip codes, IDs)\n\t* - \"Infinity\", \"-Infinity\", \"NaN\" \u2192 corresponding number values\n\t*/\n\tfunction convertValue(value) {\n\t\tconst len = value.length;\n\t\tif (len === 0) return \"\";\n\t\tconst firstChar = value.charCodeAt(0);\n\t\tif (len === 4) {\n\t\t\tif ((firstChar === 116 || firstChar === 84) && isCharEqualIgnoreCase(value.charCodeAt(1), 114) && isCharEqualIgnoreCase(value.charCodeAt(2), 117) && isCharEqualIgnoreCase(value.charCodeAt(3), 101)) return true;\n\t\t\tif ((firstChar === 110 || firstChar === 78) && isCharEqualIgnoreCase(value.charCodeAt(1), 117) && isCharEqualIgnoreCase(value.charCodeAt(2), 108) && isCharEqualIgnoreCase(value.charCodeAt(3), 108)) return null;\n\t\t} else if (len === 5 && (firstChar === 102 || firstChar === 70) && isCharEqualIgnoreCase(value.charCodeAt(1), 97) && isCharEqualIgnoreCase(value.charCodeAt(2), 108) && isCharEqualIgnoreCase(value.charCodeAt(3), 115) && isCharEqualIgnoreCase(value.charCodeAt(4), 101)) return false;\n\t\tif (firstChar >= 48 && firstChar <= 57 || firstChar === 45 || firstChar === 46 || firstChar === 73 || firstChar === 78) {\n\t\t\tif (value.charCodeAt(len - 1) <= 32) return value;\n\t\t\tif (value === \"Infinity\") return Infinity;\n\t\t\tif (value === \"-Infinity\") return -Infinity;\n\t\t\tif (value === \"NaN\") return NaN;\n\t\t\tif (firstChar === 48 && len > 1) {\n\t\t\t\tconst secondChar = value.charCodeAt(1);\n\t\t\t\tif (secondChar >= 48 && secondChar <= 57) return value;\n\t\t\t}\n\t\t\tif (firstChar === 45 && len > 2 && value.charCodeAt(1) === 48) {\n\t\t\t\tconst thirdChar = value.charCodeAt(2);\n\t\t\t\tif (thirdChar >= 48 && thirdChar <= 57) return value;\n\t\t\t}\n\t\t\tif (NUMERIC_REGEX.test(value)) {\n\t\t\t\tconst num = Number(value);\n\t\t\t\tif (!isNaN(num)) return num;\n\t\t\t}\n\t\t}\n\t\treturn value;\n\t}\n\t/**\n\t* Type guard to check if dynamicTyping config has custom converter function\n\t*/\n\tfunction isCustomConverter(config) {\n\t\treturn typeof config === \"function\";\n\t}\n\t/**\n\t* Apply dynamic typing to a single field value\n\t*\n\t* @param value - The string value to convert\n\t* @param columnConfig - Column-specific config (true, false, or custom function)\n\t* @returns Converted value\n\t*/\n\tfunction applyDynamicTyping(value, columnConfig) {\n\t\tif (columnConfig === false) return value;\n\t\tif (isCustomConverter(columnConfig)) return columnConfig(value);\n\t\treturn convertValue(value);\n\t}\n\t/**\n\t* Apply dynamic typing and/or date casting to a single value.\n\t* Unified helper used by both object and array row processing.\n\t*\n\t* @param value - The string value to convert\n\t* @param columnName - Column identifier (string for objects, can be used for per-column config)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration\n\t* @returns Converted value\n\t*/\n\tfunction convertSingleValue(value, columnName, dynamicTyping, castDate) {\n\t\tif (shouldCastDate(castDate, columnName)) {\n\t\t\tconst dateValue = tryParseDate(value);\n\t\t\tif (dateValue !== null) return dateValue;\n\t\t}\n\t\tif (dynamicTyping === true) return convertValue(value);\n\t\tif (dynamicTyping === false) return value;\n\t\tif (columnName === void 0) return value;\n\t\tconst config = dynamicTyping[columnName];\n\t\tif (config === void 0) return value;\n\t\treturn applyDynamicTyping(value, config);\n\t}\n\t/**\n\t* Apply dynamic typing to an entire row (object form).\n\t*\n\t* Performance: Converts values IN PLACE to avoid allocating a new object.\n\t* The input object is mutated and returned with converted values.\n\t*\n\t* @param row - Row object with string values (will be mutated)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration for date parsing\n\t* @returns The same row object with converted values\n\t*/\n\tfunction applyDynamicTypingToRow(row, dynamicTyping, castDate) {\n\t\tif (dynamicTyping === false && !castDate) return row;\n\t\tfor (const key in row) if (Object.hasOwn(row, key)) row[key] = convertSingleValue(row[key], key, dynamicTyping, castDate);\n\t\treturn row;\n\t}\n\t/**\n\t* Apply dynamic typing to an array row\n\t*\n\t* @param row - Row array with string values\n\t* @param headers - Header names (for per-column config lookup)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration for date parsing\n\t* @returns New row array with converted values\n\t*/\n\tfunction applyDynamicTypingToArrayRow(row, headers, dynamicTyping, castDate) {\n\t\tif (dynamicTyping === false && !castDate) return row;\n\t\tif (dynamicTyping !== true && dynamicTyping !== false && !headers) return row;\n\t\treturn row.map((value, index) => {\n\t\t\tconst columnName = headers?.[index];\n\t\t\treturn convertSingleValue(value, columnName, dynamicTyping, castDate);\n\t\t});\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/row-processor.ts\n\t/**\n\t* Process headers from a row (first data row or configured headers)\n\t* Returns true if the row should be skipped (was used as headers)\n\t*/\n\tfunction processHeaderRow(row, state, config) {\n\t\tconst result = processHeaders(row, {\n\t\t\theaders: config.headers,\n\t\t\tgroupColumnsByName: config.groupColumnsByName\n\t\t}, state.headerRow);\n\t\tif (result) {\n\t\t\tstate.headerRow = result.headers;\n\t\t\tstate.originalHeaders = result.originalHeaders;\n\t\t\tstate.renamedHeadersForMeta = result.renamedHeaders;\n\t\t\tstate.headerRowProcessed = true;\n\t\t\treturn result.skipCurrentRow;\n\t\t}\n\t\tstate.headerRowProcessed = true;\n\t\treturn false;\n\t}\n\t/**\n\t* Validate row column count against headers\n\t* Returns error info if validation fails, null otherwise\n\t*/\n\tfunction validateRowColumns(row, state, config) {\n\t\tif (!state.headerRow || state.headerRow.length === 0) return null;\n\t\tconst expectedCols = state.headerRow.length;\n\t\tconst actualCols = row.length;\n\t\tif (actualCols === expectedCols) return null;\n\t\tconst validation = validateAndAdjustColumns(row, expectedCols, {\n\t\t\tcolumnLess: config.columnLess,\n\t\t\tcolumnMore: config.columnMore\n\t\t});\n\t\tif (validation.errorCode) return {\n\t\t\terrorCode: validation.errorCode,\n\t\t\tmessage: validation.errorCode === \"TooManyFields\" ? `Too many fields: expected ${expectedCols}, found ${actualCols}` : `Too few fields: expected ${expectedCols}, found ${actualCols}`,\n\t\t\tisValid: validation.isValid,\n\t\t\treason: validation.reason,\n\t\t\textras: validation.extras\n\t\t};\n\t\treturn null;\n\t}\n\t/**\n\t* Build record info for a completed row\n\t*/\n\tfunction buildRecordInfo(state, dataRowIndex, includeRaw, fieldCount) {\n\t\tconst info = {\n\t\t\tindex: dataRowIndex,\n\t\t\tline: state.currentRowStartLine,\n\t\t\toffset: state.currentRowStartOffset,\n\t\t\tquoted: state.currentRowQuoted.slice(0, fieldCount)\n\t\t};\n\t\tif (includeRaw) info.raw = state.currentRawRow;\n\t\treturn info;\n\t}\n\t/**\n\t* Convert a raw row to an object record with optional dynamic typing\n\t*/\n\tfunction rowToRecord(row, state, config) {\n\t\tif (state.headerRow) {\n\t\t\tlet record = convertRowToObject(row, state.headerRow, state.originalHeaders, config.groupColumnsByName);\n\t\t\tif (config.dynamicTyping || config.castDate) record = applyDynamicTypingToRow(record, config.dynamicTyping || false, config.castDate);\n\t\t\treturn record;\n\t\t}\n\t\tconst result = {};\n\t\tfor (let i = 0; i < row.length; i++) result[i] = row[i];\n\t\treturn result;\n\t}\n\t/**\n\t* Process a completed row through headers, validation, etc.\n\t* This is the core row processing logic shared between sync and streaming parsers.\n\t*/\n\tfunction processCompletedRow(row, state, config, errors, lineNumber) {\n\t\tif (state.useHeaders && !state.headerRowProcessed) {\n\t\t\tif (processHeaderRow(row, state, config)) return {\n\t\t\t\tstop: false,\n\t\t\t\tskipped: true\n\t\t\t};\n\t\t}\n\t\tif (state.skippedDataRows < config.skipRows) {\n\t\t\tstate.skippedDataRows++;\n\t\t\treturn {\n\t\t\t\tstop: false,\n\t\t\t\tskipped: true\n\t\t\t};\n\t\t}\n\t\tconst validationError = validateRowColumns(row, state, config);\n\t\tlet extras;\n\t\tif (validationError) {\n\t\t\tconst errorObj = {\n\t\t\t\tcode: validationError.errorCode,\n\t\t\t\tmessage: validationError.message,\n\t\t\t\tline: lineNumber\n\t\t\t};\n\t\t\terrors.push(errorObj);\n\t\t\tif (!validationError.isValid) {\n\t\t\t\tif (config.skipRecordsWithError) {\n\t\t\t\t\tconfig.invokeOnSkip?.({\n\t\t\t\t\t\tcode: validationError.errorCode,\n\t\t\t\t\t\tmessage: validationError.reason || \"Column mismatch\",\n\t\t\t\t\t\tline: lineNumber\n\t\t\t\t\t}, row);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tstop: false,\n\t\t\t\t\t\tskipped: true,\n\t\t\t\t\t\trow,\n\t\t\t\t\t\terror: {\n\t\t\t\t\t\t\tcode: validationError.errorCode,\n\t\t\t\t\t\t\tmessage: validationError.reason || \"Column mismatch\",\n\t\t\t\t\t\t\tline: lineNumber\n\t\t\t\t\t\t},\n\t\t\t\t\t\treason: validationError.reason || \"Column mismatch\"\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\tstop: false,\n\t\t\t\t\tskipped: true,\n\t\t\t\t\trow,\n\t\t\t\t\terror: errorObj,\n\t\t\t\t\treason: validationError.reason || \"Column mismatch\"\n\t\t\t\t};\n\t\t\t}\n\t\t\textras = validationError.extras;\n\t\t}\n\t\tif (config.skipRecordsWithEmptyValues && hasAllEmptyValues(row)) return {\n\t\t\tstop: false,\n\t\t\tskipped: true\n\t\t};\n\t\tif (config.maxRows !== void 0 && state.dataRowCount >= config.maxRows) {\n\t\t\tstate.truncated = true;\n\t\t\treturn {\n\t\t\t\tstop: true,\n\t\t\t\tskipped: false\n\t\t\t};\n\t\t}\n\t\tstate.dataRowCount++;\n\t\tlet info;\n\t\tif (config.infoOption) info = buildRecordInfo(state, state.dataRowCount - 1, config.rawOption, row.length);\n\t\treturn {\n\t\t\tstop: false,\n\t\t\tskipped: false,\n\t\t\trow,\n\t\t\tinfo,\n\t\t\textras\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/scanner/scanner.ts\n\t/**\n\t* Find the next newline position and determine its type.\n\t*\n\t* @returns [position, length] where length is 1 for \\n/\\r, 2 for \\r\\n, or [-1, 0] if not found\n\t*/\n\tfunction findNewline(input, start) {\n\t\tconst len = input.length;\n\t\tconst lfPos = input.indexOf(\"\\n\", start);\n\t\tconst crPos = input.indexOf(\"\\r\", start);\n\t\tif (lfPos === -1 && crPos === -1) return [-1, 0];\n\t\tif (crPos === -1 || lfPos !== -1 && lfPos < crPos) return [lfPos, 1];\n\t\tif (crPos + 1 < len) return input[crPos + 1] === \"\\n\" ? [crPos, 2] : [crPos, 1];\n\t\treturn [crPos, -1];\n\t}\n\t/**\n\t* Check if position is at a delimiter (supports multi-character delimiters).\n\t*/\n\tfunction isAtDelimiter(input, pos, delimiter) {\n\t\tif (delimiter.length === 1) return input[pos] === delimiter;\n\t\treturn input.startsWith(delimiter, pos);\n\t}\n\t/**\n\t* Find the next delimiter position (supports multi-character delimiters).\n\t*/\n\tfunction findDelimiter(input, start, delimiter) {\n\t\treturn input.indexOf(delimiter, start);\n\t}\n\t/**\n\t* Scan a quoted field starting at the opening quote.\n\t*\n\t* Handles:\n\t* - Escaped quotes (RFC 4180: \"\" -> \")\n\t* - Backslash escapes when escape !== quote\n\t* - CRLF normalization inside quoted fields (CRLF -> LF)\n\t* - relaxQuotes mode (allow unescaped quotes mid-field)\n\t*\n\t* Performance optimization: Uses array to collect segments instead of\n\t* string concatenation to avoid O(n\u00B2) string building in fields with\n\t* many escaped quotes or embedded newlines.\n\t*\n\t* @param input - Input string\n\t* @param start - Position of opening quote\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @returns Field scan result\n\t*/\n\tfunction scanQuotedField(input, start, config, isEof) {\n\t\tconst { quote, escape, delimiter, relaxQuotes } = config;\n\t\tconst len = input.length;\n\t\tlet pos = start + 1;\n\t\tlet segments = null;\n\t\tlet segmentStart = pos;\n\t\tconst buildValue = (endPos) => {\n\t\t\tconst lastSegment = endPos > segmentStart ? input.slice(segmentStart, endPos) : \"\";\n\t\t\tif (segments === null) return lastSegment;\n\t\t\tif (lastSegment) segments.push(lastSegment);\n\t\t\treturn segments.length === 1 ? segments[0] : segments.join(\"\");\n\t\t};\n\t\twhile (pos < len) {\n\t\t\tconst char = input[pos];\n\t\t\tif (escape && char === escape) {\n\t\t\t\tif (pos + 1 < len && input[pos + 1] === quote) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\tpos += 2;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (escape !== quote && pos + 1 < len && input[pos + 1] === escape) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(escape);\n\t\t\t\t\tpos += 2;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (escape === quote) {\n\t\t\t\t\tif (pos + 1 >= len) {\n\t\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\t\tresumePos: start\n\t\t\t\t\t\t};\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tconst nextChar = input[pos + 1];\n\t\t\t\t\tif ((delimiter.length === 1 ? nextChar === delimiter : isAtDelimiter(input, pos + 1, delimiter)) || nextChar === \"\\n\" || nextChar === \"\\r\") return {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t\tif (relaxQuotes) {\n\t\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\t\tpos++;\n\t\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (char === quote && escape !== quote) {\n\t\t\t\tif (pos + 1 >= len) {\n\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\tresumePos: start\n\t\t\t\t\t};\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t\tconst nextChar = input[pos + 1];\n\t\t\t\tif ((delimiter.length === 1 ? nextChar === delimiter : isAtDelimiter(input, pos + 1, delimiter)) || nextChar === \"\\n\" || nextChar === \"\\r\") return {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tneedMore: false\n\t\t\t\t};\n\t\t\t\tif (relaxQuotes) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\tpos++;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tneedMore: false\n\t\t\t\t};\n\t\t\t}\n\t\t\tif (char === \"\\r\") {\n\t\t\t\tif (pos + 1 < len) {\n\t\t\t\t\tif (input[pos + 1] === \"\\n\") {\n\t\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\t\t\tpos += 2;\n\t\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\t\tpos++;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (!isEof) return {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: start\n\t\t\t\t};\n\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\tpos++;\n\t\t\t\tsegmentStart = pos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tpos++;\n\t\t}\n\t\tif (!isEof) return {\n\t\t\tvalue: buildValue(pos),\n\t\t\tquoted: true,\n\t\t\tendPos: pos,\n\t\t\tneedMore: true,\n\t\t\tresumePos: start\n\t\t};\n\t\treturn {\n\t\t\tvalue: buildValue(pos),\n\t\t\tquoted: true,\n\t\t\tendPos: pos,\n\t\t\tneedMore: false,\n\t\t\tunterminated: true\n\t\t};\n\t}\n\t/**\n\t* Scan an unquoted field using indexOf for batch searching.\n\t*\n\t* This is the performance-critical path for most CSV files.\n\t* Uses indexOf to find the next delimiter or newline in O(n) time\n\t* with optimized native string search.\n\t*\n\t* @param input - Input string\n\t* @param start - Starting position\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @returns Field scan result\n\t*/\n\tfunction scanUnquotedField(input, start, config, isEof) {\n\t\tconst { delimiter } = config;\n\t\tconst len = input.length;\n\t\tconst delimPos = findDelimiter(input, start, delimiter);\n\t\tconst [newlinePos, newlineLen] = findNewline(input, start);\n\t\tlet endPos;\n\t\tlet atNewline = false;\n\t\tif (delimPos === -1 && newlinePos === -1) {\n\t\t\tif (!isEof) return {\n\t\t\t\tvalue: input.slice(start),\n\t\t\t\tquoted: false,\n\t\t\t\tendPos: len,\n\t\t\t\tneedMore: true,\n\t\t\t\tresumePos: start\n\t\t\t};\n\t\t\treturn {\n\t\t\t\tvalue: input.slice(start),\n\t\t\t\tquoted: false,\n\t\t\t\tendPos: len,\n\t\t\t\tneedMore: false\n\t\t\t};\n\t\t}\n\t\tif (delimPos === -1) {\n\t\t\tendPos = newlinePos;\n\t\t\tatNewline = true;\n\t\t} else if (newlinePos === -1) endPos = delimPos;\n\t\telse if (delimPos < newlinePos) endPos = delimPos;\n\t\telse {\n\t\t\tendPos = newlinePos;\n\t\t\tatNewline = true;\n\t\t}\n\t\tif (atNewline && newlineLen === -1 && !isEof) return {\n\t\t\tvalue: input.slice(start, endPos),\n\t\t\tquoted: false,\n\t\t\tendPos,\n\t\t\tneedMore: true,\n\t\t\tresumePos: start\n\t\t};\n\t\treturn {\n\t\t\tvalue: input.slice(start, endPos),\n\t\t\tquoted: false,\n\t\t\tendPos,\n\t\t\tneedMore: false\n\t\t};\n\t}\n\t/**\n\t* Scan a complete row from the input string.\n\t*\n\t* @param input - Input string\n\t* @param start - Starting position\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @param outFields - Optional reusable array for fields (will be cleared)\n\t* @param outQuoted - Optional reusable array for quoted flags (will be cleared)\n\t* @returns Row scan result with rawStart/rawEnd for zero-copy raw row extraction\n\t*/\n\tfunction scanRow(input, start, config, isEof, outFields, outQuoted) {\n\t\tconst { delimiter, quote, quoteEnabled } = config;\n\t\tconst delimLen = delimiter.length;\n\t\tconst len = input.length;\n\t\tconst fields = outFields ?? [];\n\t\tconst quoted = outQuoted ?? [];\n\t\tif (outFields) outFields.length = 0;\n\t\tif (outQuoted) outQuoted.length = 0;\n\t\tlet pos = start;\n\t\tlet hasUnterminatedQuote = false;\n\t\tconst rawStart = start;\n\t\twhile (pos < len) {\n\t\t\tconst char = input[pos];\n\t\t\tif (quoteEnabled && char === quote) {\n\t\t\t\tconst result = scanQuotedField(input, pos, config, isEof);\n\t\t\t\tif (result.needMore) return {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: pos,\n\t\t\t\t\tcomplete: false,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: result.resumePos ?? start,\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: pos\n\t\t\t\t};\n\t\t\t\tif (result.unterminated) hasUnterminatedQuote = true;\n\t\t\t\tfields.push(result.value);\n\t\t\t\tquoted.push(true);\n\t\t\t\tpos = result.endPos;\n\t\t\t\tif (pos < len) {\n\t\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\t\tpos += delimLen;\n\t\t\t\t\t\tif (pos >= len && isEof) {\n\t\t\t\t\t\t\tfields.push(\"\");\n\t\t\t\t\t\t\tquoted.push(false);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tconst nextChar = input[pos];\n\t\t\t\t\tif (nextChar === \"\\n\") return {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\n\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t\tif (nextChar === \"\\r\") {\n\t\t\t\t\t\tif (pos + 1 < len) {\n\t\t\t\t\t\t\tif (input[pos + 1] === \"\\n\") return {\n\t\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\t\tendPos: pos + 2,\n\t\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\t\tnewline: \"\\r\\n\",\n\t\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\t\tcomplete: false,\n\t\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\t\tresumePos: start,\n\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t};\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tpos++;\n\t\t\t\t\twhile (pos < len) {\n\t\t\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\t\t\tpos += delimLen;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (input[pos] === \"\\n\" || input[pos] === \"\\r\") break;\n\t\t\t\t\t\tpos++;\n\t\t\t\t\t}\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst result = scanUnquotedField(input, pos, config, isEof);\n\t\t\tif (result.needMore) {\n\t\t\t\tfields.push(result.value);\n\t\t\t\tquoted.push(false);\n\t\t\t\treturn {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: result.endPos,\n\t\t\t\t\tcomplete: false,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: result.resumePos ?? start,\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: result.endPos\n\t\t\t\t};\n\t\t\t}\n\t\t\tfields.push(result.value);\n\t\t\tquoted.push(false);\n\t\t\tpos = result.endPos;\n\t\t\tif (pos < len) {\n\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\tpos += delimLen;\n\t\t\t\t\tif (pos >= len && isEof) {\n\t\t\t\t\t\tfields.push(\"\");\n\t\t\t\t\t\tquoted.push(false);\n\t\t\t\t\t}\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst char = input[pos];\n\t\t\t\tif (char === \"\\n\") return {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tcomplete: true,\n\t\t\t\t\tneedMore: false,\n\t\t\t\t\tnewline: \"\\n\",\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: pos\n\t\t\t\t};\n\t\t\t\tif (char === \"\\r\") {\n\t\t\t\t\tif (pos + 1 < len && input[pos + 1] === \"\\n\") return {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 2,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\r\\n\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t\treturn {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (isEof) {\n\t\t\tif (fields.length > 0 || pos > start) return {\n\t\t\t\tfields,\n\t\t\t\tquoted,\n\t\t\t\tendPos: pos,\n\t\t\t\tcomplete: true,\n\t\t\t\tneedMore: false,\n\t\t\t\tunterminatedQuote: hasUnterminatedQuote || void 0,\n\t\t\t\trawStart,\n\t\t\t\trawEnd: pos\n\t\t\t};\n\t\t}\n\t\treturn {\n\t\t\tfields,\n\t\t\tquoted,\n\t\t\tendPos: pos,\n\t\t\tcomplete: false,\n\t\t\tneedMore: !isEof,\n\t\t\tresumePos: start,\n\t\t\tunterminatedQuote: hasUnterminatedQuote || void 0,\n\t\t\trawStart,\n\t\t\trawEnd: pos\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/lines.ts\n\t/**\n\t* Cache for global-flag versions of RegExp objects.\n\t* Avoids re-creating `new RegExp(..., 'g')` on every call to splitLinesWithEndings.\n\t*/\n\tconst globalRegexCache = /* @__PURE__ */ new WeakMap();\n\tfunction getCachedGlobalRegex(re) {\n\t\tlet cached = globalRegexCache.get(re);\n\t\tif (!cached) {\n\t\t\tcached = new RegExp(re.source, `${re.flags.replace(/g/g, \"\")}g`);\n\t\t\tglobalRegexCache.set(re, cached);\n\t\t}\n\t\treturn cached;\n\t}\n\t/**\n\t* Split input into lines using the given linebreak regex and yield per-line\n\t* metadata including the actual line ending length.\n\t*\n\t* Notes:\n\t* - Works with mixed line endings.\n\t* - Skips trailing split artifacts (empty string produced by split when the input ends with a newline).\n\t*/\n\tfunction* splitLinesWithEndings(input, linebreakRegex) {\n\t\tif (input === \"\") return;\n\t\tif (typeof linebreakRegex === \"string\") {\n\t\t\tconst sep = linebreakRegex;\n\t\t\tif (sep === \"\") {\n\t\t\t\tyield {\n\t\t\t\t\tline: input,\n\t\t\t\t\tlineEndingLength: 0,\n\t\t\t\t\tlineLengthWithEnding: input.length\n\t\t\t\t};\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tlet pos = 0;\n\t\t\twhile (true) {\n\t\t\t\tconst idx = input.indexOf(sep, pos);\n\t\t\t\tif (idx === -1) {\n\t\t\t\t\tif (pos === input.length) return;\n\t\t\t\t\tconst line = input.slice(pos);\n\t\t\t\t\tyield {\n\t\t\t\t\t\tline,\n\t\t\t\t\t\tlineEndingLength: 0,\n\t\t\t\t\t\tlineLengthWithEnding: line.length\n\t\t\t\t\t};\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tconst line = input.slice(pos, idx);\n\t\t\t\tconst lineEndingLength = sep.length;\n\t\t\t\tyield {\n\t\t\t\t\tline,\n\t\t\t\t\tlineEndingLength,\n\t\t\t\t\tlineLengthWithEnding: line.length + lineEndingLength\n\t\t\t\t};\n\t\t\t\tpos = idx + sep.length;\n\t\t\t}\n\t\t}\n\t\tconst re = linebreakRegex.global ? linebreakRegex : getCachedGlobalRegex(linebreakRegex);\n\t\tlet pos = 0;\n\t\tre.lastIndex = 0;\n\t\twhile (true) {\n\t\t\tconst match = re.exec(input);\n\t\t\tif (!match) break;\n\t\t\tconst start = match.index;\n\t\t\tconst end = start + match[0].length;\n\t\t\tconst line = input.slice(pos, start);\n\t\t\tconst lineEndingLength = match[0].length;\n\t\t\tyield {\n\t\t\t\tline,\n\t\t\t\tlineEndingLength,\n\t\t\t\tlineLengthWithEnding: line.length + lineEndingLength\n\t\t\t};\n\t\t\tpos = end;\n\t\t\tif (match[0].length === 0) re.lastIndex++;\n\t\t}\n\t\tif (pos === input.length) return;\n\t\tconst tail = input.slice(pos);\n\t\tyield {\n\t\t\tline: tail,\n\t\t\tlineEndingLength: 0,\n\t\t\tlineLengthWithEnding: tail.length\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/sync.ts\n\t/**\n\t* Normalize validate result to { isValid, reason } form\n\t*/\n\tfunction normalizeValidateResult(result) {\n\t\tif (typeof result === \"boolean\") return {\n\t\t\tisValid: result,\n\t\t\treason: \"Validation failed\"\n\t\t};\n\t\treturn {\n\t\t\tisValid: result.isValid,\n\t\t\treason: result.reason || \"Validation failed\"\n\t\t};\n\t}\n\t/**\n\t* Apply dynamic typing to an array row (wrapper to reduce code duplication)\n\t*/\n\tfunction applyArrayTyping(row, dynamicTyping, castDate) {\n\t\treturn applyDynamicTypingToArrayRow(row, null, dynamicTyping || false, castDate);\n\t}\n\t/**\n\t* Return array only if non-empty, otherwise undefined\n\t*/\n\tfunction optionalArray(arr) {\n\t\treturn arr.length > 0 ? arr : void 0;\n\t}\n\t/**\n\t* Build CsvParseMeta from config and state (avoids duplication between array and object mode)\n\t*/\n\tfunction buildMeta(config, state) {\n\t\treturn {\n\t\t\tdelimiter: config.delimiter,\n\t\t\tlinebreak: config.linebreak,\n\t\t\taborted: false,\n\t\t\ttruncated: state.truncated,\n\t\t\tcursor: state.dataRowCount,\n\t\t\tfields: state.headerRow ? filterValidHeaders(state.headerRow) : void 0,\n\t\t\trenamedHeaders: state.renamedHeadersForMeta\n\t\t};\n\t}\n\t/**\n\t* Apply trim function to all fields in a row.\n\t* Uses cached trimFieldIsIdentity from config to avoid per-row checking.\n\t*/\n\tfunction trimFields(fields, config) {\n\t\tif (config.trimFieldIsIdentity) return fields;\n\t\treturn fields.map(config.trimField);\n\t}\n\t/**\n\t* Parse input using fast mode (no quote detection)\n\t*/\n\tfunction* parseFastMode(input, config, state, errors) {\n\t\tif (input === \"\") return;\n\t\tlet currentCharOffset = 0;\n\t\tfor (const { line, lineLengthWithEnding: lineCharLength } of splitLinesWithEndings(input, config.linebreakRegex)) {\n\t\t\tstate.lineNumber++;\n\t\t\tif (config.toLine !== void 0 && state.lineNumber > config.toLine) {\n\t\t\t\tstate.truncated = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif (state.lineNumber <= config.skipLines) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (line === \"\" && config.shouldSkipEmpty) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.maxRowBytes !== void 0) {\n\t\t\t\tif (getUtf8ByteLength(line) > config.maxRowBytes) throw new Error(`Row exceeds the maximum size of ${config.maxRowBytes} bytes`);\n\t\t\t}\n\t\t\tif (config.infoOption) {\n\t\t\t\tstate.currentRowStartLine = state.lineNumber;\n\t\t\t\tstate.currentRowStartOffset = currentCharOffset;\n\t\t\t}\n\t\t\tif (config.rawOption) state.currentRawRow = line;\n\t\t\tconst trimmedRow = trimFields(line.split(config.delimiter), config);\n\t\t\tif (config.infoOption) state.currentRowQuoted = getUnquotedArray(trimmedRow.length);\n\t\t\tif (config.comment && trimmedRow[0]?.trimStart().startsWith(config.comment)) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.shouldSkipEmpty && isEmptyRow(trimmedRow, config.shouldSkipEmpty)) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst result = processCompletedRow(trimmedRow, state, config, errors, state.lineNumber);\n\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\tif (result.stop) {\n\t\t\t\tyield result;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif (!result.skipped || result.error) yield result;\n\t\t\tresetInfoState(state, config.infoOption, config.rawOption, state.lineNumber + 1, currentCharOffset);\n\t\t}\n\t}\n\t/**\n\t* Parse input using Scanner-based batch scanning.\n\t* This is a high-performance alternative that uses indexOf-based field scanning\n\t* instead of character-by-character parsing.\n\t*\n\t* Key optimizations:\n\t* 1. Uses indexOf to find delimiters/quotes/newlines in bulk\n\t* 2. Uses slice for field extraction (avoids string concatenation)\n\t* 3. Processes entire rows at once instead of character-by-character\n\t*/\n\tfunction* parseWithScanner(input, config, state, errors) {\n\t\tconst scannerConfig = toScannerConfig(config);\n\t\tconst len = input.length;\n\t\tlet pos = 0;\n\t\tif (config.infoOption) state.currentRowStartOffset = 0;\n\t\twhile (pos < len) {\n\t\t\tconst scanResult = scanRow(input, pos, scannerConfig, true);\n\t\t\tif (scanResult.fields.length === 0 && scanResult.endPos === pos) break;\n\t\t\tconst row = trimFields(scanResult.fields, config);\n\t\t\tconst rowStartLine = state.lineNumber + 1;\n\t\t\t{\n\t\t\t\tconst rawStart = scanResult.rawStart;\n\t\t\t\tconst rawEnd = scanResult.rawEnd;\n\t\t\t\tlet newlines = 1;\n\t\t\t\tfor (let i = rawStart; i < rawEnd; i++) {\n\t\t\t\t\tconst ch = input.charCodeAt(i);\n\t\t\t\t\tif (ch === 10) newlines++;\n\t\t\t\t\telse if (ch === 13) {\n\t\t\t\t\t\tif (i + 1 < rawEnd && input.charCodeAt(i + 1) === 10) i++;\n\t\t\t\t\t\tnewlines++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstate.lineNumber += newlines;\n\t\t\t}\n\t\t\tif (config.toLine !== void 0 && state.lineNumber > config.toLine) {\n\t\t\t\tstate.truncated = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tconst rawEndPos = scanResult.rawEnd;\n\t\t\tif (state.lineNumber <= config.skipLines) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.maxRowBytes !== void 0) {\n\t\t\t\tif (getUtf8ByteLength(input.slice(scanResult.rawStart, rawEndPos)) > config.maxRowBytes) throw new Error(`Row exceeds the maximum size of ${config.maxRowBytes} bytes`);\n\t\t\t}\n\t\t\tif (config.comment && row[0]?.trimStart().startsWith(config.comment)) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.shouldSkipEmpty && isEmptyRow(row, config.shouldSkipEmpty)) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.infoOption) {\n\t\t\t\tstate.currentRowStartLine = rowStartLine;\n\t\t\t\tstate.currentRowStartOffset = scanResult.rawStart;\n\t\t\t\tstate.currentRowQuoted = scanResult.quoted;\n\t\t\t}\n\t\t\tif (config.rawOption) state.currentRawRow = input.slice(scanResult.rawStart, rawEndPos);\n\t\t\tif (scanResult.unterminatedQuote) errors.push({\n\t\t\t\tcode: \"MissingQuotes\",\n\t\t\t\tmessage: \"Quoted field unterminated\",\n\t\t\t\tline: state.lineNumber\n\t\t\t});\n\t\t\tconst result = processCompletedRow(row, state, config, errors, state.lineNumber);\n\t\t\tif (result.stop) {\n\t\t\t\tyield result;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif (!result.skipped || result.error) yield result;\n\t\t\tpos = scanResult.endPos;\n\t\t\tif (config.infoOption) state.currentRowStartOffset = scanResult.endPos;\n\t\t}\n\t}\n\t/**\n\t* Parse CSV string synchronously.\n\t*\n\t* @example\n\t* ```ts\n\t* // Simple array output (no headers)\n\t* const rows = parseCsv(\"a,b,c\\n1,2,3\");\n\t* // rows: string[][] = [[\"a\",\"b\",\"c\"], [\"1\",\"2\",\"3\"]]\n\t*\n\t* // Object output with headers\n\t* const result = parseCsv(\"name,age\\nAlice,30\", { headers: true });\n\t* // result.rows: Record<string, unknown>[] = [{ name: \"Alice\", age: \"30\" }]\n\t*\n\t* // With info option\n\t* const result = parseCsv(\"a,b\\n1,2\", { info: true });\n\t* // result.rows: RecordWithInfo<string[]>[] = [{ record: [\"a\",\"b\"], info: {...} }, ...]\n\t* ```\n\t*/\n\tfunction parseCsv(input, options = {}) {\n\t\tconst { config, processedInput } = resolveParseConfig(input, options);\n\t\tconst state = createParseState(config);\n\t\tconst errors = [];\n\t\tconst invalidRows = [];\n\t\tconst parser = config.fastMode ? parseFastMode(processedInput, config, state, errors) : parseWithScanner(processedInput, config, state, errors);\n\t\tif (!state.useHeaders) {\n\t\t\tconst processedRows = [];\n\t\t\tfor (const result of parser) {\n\t\t\t\tif (result.row && !result.skipped) {\n\t\t\t\t\tlet row = result.row;\n\t\t\t\t\tif (options.rowTransform) {\n\t\t\t\t\t\tconst transformed = options.rowTransform(row);\n\t\t\t\t\t\tif (transformed === null || transformed === void 0) continue;\n\t\t\t\t\t\trow = transformed;\n\t\t\t\t\t}\n\t\t\t\t\tif (options.validate) {\n\t\t\t\t\t\tconst { isValid, reason } = normalizeValidateResult(options.validate(row));\n\t\t\t\t\t\tif (!isValid) {\n\t\t\t\t\t\t\tinvalidRows.push({\n\t\t\t\t\t\t\t\trow,\n\t\t\t\t\t\t\t\treason\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif (config.dynamicTyping || config.castDate) row = applyArrayTyping(row, config.dynamicTyping, config.castDate);\n\t\t\t\t\tif (config.infoOption && result.info) processedRows.push({\n\t\t\t\t\t\trecord: row,\n\t\t\t\t\t\tinfo: result.info\n\t\t\t\t\t});\n\t\t\t\t\telse processedRows.push(row);\n\t\t\t\t} else if (result.row && result.skipped && result.error) invalidRows.push({\n\t\t\t\t\trow: result.row,\n\t\t\t\t\treason: result.reason || result.error.message\n\t\t\t\t});\n\t\t\t\tif (result.stop) break;\n\t\t\t}\n\t\t\tconst meta = buildMeta(config, state);\n\t\t\tif (config.infoOption) return {\n\t\t\t\theaders: void 0,\n\t\t\t\trows: processedRows,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t\tif (options.validate) return {\n\t\t\t\theaders: void 0,\n\t\t\t\trows: processedRows,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t\treturn processedRows;\n\t\t}\n\t\tconst objectRows = [];\n\t\tfor (const result of parser) {\n\t\t\tif (result.row && !result.skipped) {\n\t\t\t\tlet record = rowToRecord(result.row, state, config);\n\t\t\t\tif (result.extras && result.extras.length > 0) record._extra = result.extras;\n\t\t\t\tif (options.rowTransform) {\n\t\t\t\t\tconst transformed = options.rowTransform(record);\n\t\t\t\t\tif (transformed === null || transformed === void 0) continue;\n\t\t\t\t\trecord = transformed;\n\t\t\t\t}\n\t\t\t\tif (options.validate) {\n\t\t\t\t\tconst { isValid, reason } = normalizeValidateResult(options.validate(record));\n\t\t\t\t\tif (!isValid) {\n\t\t\t\t\t\tinvalidRows.push({\n\t\t\t\t\t\t\trow: result.row,\n\t\t\t\t\t\t\treason\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (config.infoOption && result.info) objectRows.push({\n\t\t\t\t\trecord,\n\t\t\t\t\tinfo: result.info\n\t\t\t\t});\n\t\t\t\telse objectRows.push(record);\n\t\t\t} else if (result.row && result.skipped && result.error) invalidRows.push({\n\t\t\t\trow: result.row,\n\t\t\t\treason: result.reason || result.error.message\n\t\t\t});\n\t\t\tif (result.stop) break;\n\t\t}\n\t\tconst meta = buildMeta(config, state);\n\t\tconst { objname } = options;\n\t\tif (objname && state.headerRow) {\n\t\t\tconst objResult = {};\n\t\t\tfor (const item of objectRows) {\n\t\t\t\tconst key = (config.infoOption ? item.record : item)[objname];\n\t\t\t\tconst keyStr = key === void 0 || key === null ? \"\" : String(key);\n\t\t\t\tobjResult[keyStr] = item;\n\t\t\t}\n\t\t\treturn {\n\t\t\t\theaders: meta.fields,\n\t\t\t\trows: objResult,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t}\n\t\treturn {\n\t\t\theaders: meta.fields,\n\t\t\trows: objectRows,\n\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\terrors: optionalArray(errors),\n\t\t\tmeta\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/formatted-value.ts\n\t/**\n\t* Formatted value wrapper for controlling field-level quoting in CSV output.\n\t*\n\t* This module provides helper functions to override the global quoting behavior\n\t* for individual field values during CSV formatting.\n\t*\n\t* @module\n\t*/\n\t/**\n\t* Symbol to identify FormattedValue instances.\n\t* Using Symbol.for ensures reliable detection across module boundaries.\n\t*/\n\tconst FORMATTED_VALUE_SYMBOL = Symbol.for(\"csv.FormattedValue\");\n\t/**\n\t* Check if a value is a FormattedValue instance.\n\t*/\n\tfunction isFormattedValue(value) {\n\t\treturn value !== null && typeof value === \"object\" && value[FORMATTED_VALUE_SYMBOL] === true;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/config.ts\n\t/**\n\t* Escape a string for use inside a regex character class [...].\n\t* Handles all special characters: \\ ] ^ -\n\t*/\n\tfunction escapeForCharClass(str) {\n\t\treturn str.replace(/[\\\\\\]^-]/g, \"\\\\$&\");\n\t}\n\t/**\n\t* Create pre-compiled regex patterns for CSV formatting\n\t*/\n\tfunction createFormatRegex(options) {\n\t\tconst { quote: quoteOption, delimiter, escape: escapeOption } = options;\n\t\tconst { enabled: quoteEnabled, char: quote } = normalizeQuoteOption(quoteOption);\n\t\tconst escapeNormalized = normalizeEscapeOption(escapeOption, quote);\n\t\tif (!quoteEnabled) return {\n\t\t\tneedsQuoteRegex: null,\n\t\t\tescapeQuoteRegex: null,\n\t\t\tescapedQuote: \"\",\n\t\t\tquoteEnabled: false,\n\t\t\tquote: \"\",\n\t\t\tescape: \"\",\n\t\t\tdelimiter,\n\t\t\tuseFastCheck: false\n\t\t};\n\t\tconst escape = escapeNormalized.char || quote;\n\t\tconst useFastCheck = delimiter.length === 1 && quote.length === 1 && escape.length === 1;\n\t\treturn {\n\t\t\tneedsQuoteRegex: useFastCheck ? null : (() => {\n\t\t\t\tconst classContent = `${escapeForCharClass(delimiter)}${escapeForCharClass(quote)}${escape !== quote ? escapeForCharClass(escape) : \"\"}\\r\\n`;\n\t\t\t\treturn new RegExp(`[${classContent}]`);\n\t\t\t})(),\n\t\t\tescapeQuoteRegex: escape !== quote ? new RegExp(`${escapeRegex(quote)}|${escapeRegex(escape)}`, \"g\") : new RegExp(escapeRegex(quote), \"g\"),\n\t\t\tescapedQuote: escape + quote,\n\t\t\tquoteEnabled: true,\n\t\t\tquote,\n\t\t\tescape,\n\t\t\tdelimiter,\n\t\t\tuseFastCheck\n\t\t};\n\t}\n\tfunction createQuoteLookup(quoteConfig) {\n\t\tif (quoteConfig === true) return () => true;\n\t\tif (quoteConfig === false || quoteConfig === void 0) return () => false;\n\t\tif (Array.isArray(quoteConfig)) return (index) => quoteConfig[index] === true;\n\t\treturn (_index, header) => header ? quoteConfig[header] === true : false;\n\t}\n\t/**\n\t* Create complete format configuration from options\n\t*/\n\tfunction createFormatConfig(options) {\n\t\tconst { delimiter = \",\", lineEnding = \"\\n\", quote: quoteOption = \"\\\"\", escape: escapeOption, quoteColumns = false, quoteHeaders = false, writeHeaders: writeHeadersOption, bom = false, trailingNewline = false, escapeFormulae = false, decimalSeparator = \".\", typeTransform } = options;\n\t\tif (decimalSeparator !== \".\" && decimalSeparator !== \",\") throw new CsvError(`Invalid decimalSeparator: \"${decimalSeparator}\". Must be \".\" or \",\".`);\n\t\tif (decimalSeparator === delimiter) throw new CsvError(\"decimalSeparator cannot be the same as delimiter\");\n\t\tconst regex = createFormatRegex({\n\t\t\tquote: quoteOption,\n\t\t\tdelimiter,\n\t\t\tescape: escapeOption\n\t\t});\n\t\treturn {\n\t\t\tdelimiter,\n\t\t\tlineEnding,\n\t\t\tquoteAll: quoteColumns === true,\n\t\t\tescapeFormulae,\n\t\t\tdecimalSeparator,\n\t\t\twriteHeaders: writeHeadersOption ?? true,\n\t\t\tbom,\n\t\t\ttrailingNewline,\n\t\t\ttypeTransform,\n\t\t\tregex,\n\t\t\tshouldQuoteColumn: createQuoteLookup(quoteColumns),\n\t\t\tshouldQuoteHeader: createQuoteLookup(quoteHeaders)\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/utils/number.ts\n\t/**\n\t* Format a number for CSV output with the specified decimal separator.\n\t*\n\t* @param value - The number to format\n\t* @param decimalSeparator - The decimal separator to use\n\t* @returns Formatted string representation\n\t*\n\t* @example\n\t* formatNumberForCsv(3.14, \".\") // \"3.14\"\n\t* formatNumberForCsv(3.14, \",\") // \"3,14\"\n\t*/\n\tfunction formatNumberForCsv(value, decimalSeparator) {\n\t\tif (decimalSeparator !== \",\") return String(value);\n\t\treturn String(value).replace(\".\", \",\");\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/formatter.ts\n\t/**\n\t* Apply type-based transform to a single value.\n\t* Returns the transformed result, or undefined if no transform applies.\n\t*/\n\tfunction applyTypeTransform(value, transform, ctx) {\n\t\tif (value === null || value === void 0) return;\n\t\tconst type = typeof value;\n\t\tif (type === \"boolean\" && transform.boolean) return transform.boolean(value, ctx);\n\t\tif (value instanceof Date && transform.date) return transform.date(value, ctx);\n\t\tif (type === \"number\" && transform.number) return transform.number(value, ctx);\n\t\tif (type === \"bigint\" && transform.bigint) return transform.bigint(value, ctx);\n\t\tif (type === \"string\" && transform.string) return transform.string(value, ctx);\n\t\tif (type === \"object\" && !Array.isArray(value) && !(value instanceof Date)) {\n\t\t\tif (transform.object) return transform.object(value, ctx);\n\t\t}\n\t}\n\t/**\n\t* Default type conversion to string.\n\t*/\n\tfunction defaultToString(value, decimalSeparator) {\n\t\tif (value === null || value === void 0) return \"\";\n\t\tif (typeof value === \"number\") return formatNumberForCsv(value, decimalSeparator);\n\t\tif (value instanceof Date) return value.toISOString();\n\t\tif (typeof value === \"bigint\") return String(value);\n\t\tif (typeof value === \"boolean\") return value ? \"true\" : \"false\";\n\t\tif (typeof value === \"object\") try {\n\t\t\treturn JSON.stringify(value);\n\t\t} catch {\n\t\t\treturn \"[object Object]\";\n\t\t}\n\t\treturn String(value);\n\t}\n\t/**\n\t* Fast check if a string needs quoting (for single-char delimiter/quote/escape)\n\t* Uses indexOf for slightly better V8 optimization\n\t*/\n\tfunction needsQuoteFast(str, delimiter, quote, escape) {\n\t\treturn str.indexOf(delimiter) !== -1 || str.indexOf(quote) !== -1 || escape !== quote && str.indexOf(escape) !== -1 || str.indexOf(\"\\n\") !== -1 || str.indexOf(\"\\r\") !== -1;\n\t}\n\t/**\n\t* Format a single field value to CSV string\n\t*/\n\tfunction formatField(value, regex, ctx) {\n\t\tconst { index, header, isHeader, outputRowIndex, forceQuote, quoteAll, escapeFormulae, decimalSeparator, transform } = ctx;\n\t\tlet str;\n\t\tlet transformQuoteHint;\n\t\tif (!isHeader && transform) {\n\t\t\tconst transformed = applyTypeTransform(value, transform, {\n\t\t\t\tcolumn: header ?? index,\n\t\t\t\tindex: outputRowIndex\n\t\t\t});\n\t\t\tif (transformed === void 0 || transformed === null) str = defaultToString(value, decimalSeparator);\n\t\t\telse if (isFormattedValue(transformed)) {\n\t\t\t\tstr = transformed.value;\n\t\t\t\ttransformQuoteHint = transformed.quote;\n\t\t\t} else str = transformed;\n\t\t} else str = defaultToString(value, decimalSeparator);\n\t\tif (escapeFormulae && transformQuoteHint !== false && startsWithFormulaChar(str)) str = \"'\" + str;\n\t\tif (!regex.quoteEnabled) return str;\n\t\tlet needsQuote;\n\t\tif (transformQuoteHint !== void 0) needsQuote = transformQuoteHint;\n\t\telse needsQuote = quoteAll || forceQuote || (regex.useFastCheck ? needsQuoteFast(str, regex.delimiter, regex.quote, regex.escape) : regex.needsQuoteRegex.test(str));\n\t\tif (needsQuote) {\n\t\t\tlet escaped;\n\t\t\tif (regex.escape !== regex.quote) escaped = str.replace(regex.escapeQuoteRegex, (ch) => ch === regex.quote ? regex.escape + regex.quote : regex.escape + regex.escape);\n\t\t\telse escaped = str.replace(regex.escapeQuoteRegex, regex.escapedQuote);\n\t\t\treturn regex.quote + escaped + regex.quote;\n\t\t}\n\t\treturn str;\n\t}\n\t/**\n\t* Format an entire row to CSV string.\n\t*\n\t* Performance optimizations:\n\t* - Uses for loop with direct string building instead of map().join()\n\t* - Reuses a single mutable context object instead of creating one per field\n\t*/\n\tfunction formatRowWithLookup(row, regex, options) {\n\t\tconst { quoteLookup, delimiter, headers, isHeader, outputRowIndex, quoteAll, escapeFormulae, decimalSeparator, transform } = options;\n\t\tconst len = row.length;\n\t\tif (len === 0) return \"\";\n\t\tconst ctx = {\n\t\t\tindex: 0,\n\t\t\theader: headers?.[0],\n\t\t\tisHeader,\n\t\t\toutputRowIndex,\n\t\t\tforceQuote: quoteLookup(0, headers?.[0]),\n\t\t\tquoteAll,\n\t\t\tescapeFormulae,\n\t\t\tdecimalSeparator,\n\t\t\ttransform\n\t\t};\n\t\tlet result = formatField(row[0], regex, ctx);\n\t\tfor (let i = 1; i < len; i++) {\n\t\t\tctx.index = i;\n\t\t\tctx.header = headers?.[i];\n\t\t\tctx.forceQuote = quoteLookup(i, ctx.header);\n\t\t\tresult += delimiter + formatField(row[i], regex, ctx);\n\t\t}\n\t\treturn result;\n\t}\n\t/**\n\t* Apply row transform if configured. Returns null to skip the row.\n\t*/\n\tfunction applyRowTransform(cfg, row, index) {\n\t\tif (!cfg.typeTransform?.row) return row;\n\t\tconst t = cfg.typeTransform.row(row, index);\n\t\treturn t === null ? null : t;\n\t}\n\t/**\n\t* Normalize all input types to a unified format.\n\t* Handles: objects, arrays, RowHashArray, and columns config.\n\t*/\n\tfunction normalizeInput(data, options, cfg) {\n\t\tconst { headers, columns } = options;\n\t\tif (data.length === 0) {\n\t\t\tif (columns && columns.length > 0) return {\n\t\t\t\tkeys: null,\n\t\t\t\tdisplayHeaders: columns.map((c) => typeof c === \"string\" ? c : c.header ?? c.key),\n\t\t\t\trows: []\n\t\t\t};\n\t\t\tif (Array.isArray(headers)) return {\n\t\t\t\tkeys: headers,\n\t\t\t\tdisplayHeaders: headers,\n\t\t\t\trows: []\n\t\t\t};\n\t\t\treturn {\n\t\t\t\tkeys: null,\n\t\t\t\tdisplayHeaders: null,\n\t\t\t\trows: []\n\t\t\t};\n\t\t}\n\t\tconst firstRow = data[0];\n\t\tif (columns && columns.length > 0) {\n\t\t\tconst processed = processColumns(columns);\n\t\t\tconst keys = processed.keys;\n\t\t\tconst displayHeaders = processed.headers;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < data.length; i++) {\n\t\t\t\tconst row = applyRowTransform(cfg, data[i], i);\n\t\t\t\tif (row === null) continue;\n\t\t\t\tlet values;\n\t\t\t\tif (isRowHashArray(row)) values = rowHashArrayMapByHeaders(row, keys);\n\t\t\t\telse if (Array.isArray(row)) values = row;\n\t\t\t\telse values = keys.map((k) => row[k]);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tif (isRowHashArray(firstRow)) {\n\t\t\tconst hashArrays = data;\n\t\t\tconst keys = headers === true ? rowHashArrayToHeaders(hashArrays[0]) : Array.isArray(headers) ? headers : null;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < hashArrays.length; i++) {\n\t\t\t\tconst row = applyRowTransform(cfg, hashArrays[i], i);\n\t\t\t\tif (row === null) continue;\n\t\t\t\tlet values;\n\t\t\t\tif (isRowHashArray(row)) values = keys ? rowHashArrayMapByHeaders(row, keys) : rowHashArrayToValues(row);\n\t\t\t\telse if (Array.isArray(row)) values = row;\n\t\t\t\telse values = keys ? keys.map((k) => row[k]) : Object.values(row);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders: keys,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tif (!Array.isArray(firstRow) && typeof firstRow === \"object\") {\n\t\t\tconst objects = data;\n\t\t\tconst keys = headers === true ? Object.keys(objects[0]) : Array.isArray(headers) ? headers : null;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < objects.length; i++) {\n\t\t\t\tconst obj = applyRowTransform(cfg, objects[i], i);\n\t\t\t\tif (obj === null) continue;\n\t\t\t\tconst values = keys ? keys.map((k) => obj[k]) : Object.values(obj);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders: keys,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tconst arrays = data;\n\t\tconst keys = Array.isArray(headers) ? headers : null;\n\t\tconst rows = [];\n\t\tfor (let i = 0; i < arrays.length; i++) {\n\t\t\tconst row = applyRowTransform(cfg, arrays[i], i);\n\t\t\tif (row === null) continue;\n\t\t\trows.push(row);\n\t\t}\n\t\treturn {\n\t\t\tkeys,\n\t\t\tdisplayHeaders: keys,\n\t\t\trows\n\t\t};\n\t}\n\t/**\n\t* Format data as CSV string.\n\t*\n\t* Performance optimization: Builds result string directly without\n\t* intermediate arrays from map().join() operations.\n\t*\n\t* @example\n\t* ```ts\n\t* // Array of arrays\n\t* formatCsv([[\"a\", \"b\"], [\"1\", \"2\"]])\n\t* // \"a,b\\n1,2\"\n\t*\n\t* // Array of objects\n\t* formatCsv([{ name: \"Alice\", age: 30 }])\n\t* // \"name,age\\nAlice,30\"\n\t*\n\t* // With options\n\t* formatCsv(data, {\n\t* delimiter: \";\",\n\t* quoteColumns: { name: true },\n\t* escapeFormulae: true,\n\t* bom: true\n\t* })\n\t* ```\n\t*/\n\tfunction formatCsv(data, options = {}) {\n\t\tconst cfg = createFormatConfig(options);\n\t\tconst { displayHeaders, rows } = normalizeInput(data, options, cfg);\n\t\tconst lines = [];\n\t\tconst effectiveHeaders = displayHeaders ? deduplicateHeaders(displayHeaders) : void 0;\n\t\tif (effectiveHeaders && cfg.writeHeaders) lines.push(formatRowWithLookup(effectiveHeaders, cfg.regex, {\n\t\t\tquoteLookup: cfg.shouldQuoteHeader,\n\t\t\tdelimiter: cfg.delimiter,\n\t\t\theaders: effectiveHeaders,\n\t\t\tisHeader: true,\n\t\t\toutputRowIndex: 0,\n\t\t\tquoteAll: cfg.quoteAll,\n\t\t\tescapeFormulae: cfg.escapeFormulae,\n\t\t\tdecimalSeparator: cfg.decimalSeparator,\n\t\t\ttransform: void 0\n\t\t}));\n\t\tfor (let rowIdx = 0; rowIdx < rows.length; rowIdx++) lines.push(formatRowWithLookup(rows[rowIdx], cfg.regex, {\n\t\t\tquoteLookup: cfg.shouldQuoteColumn,\n\t\t\tdelimiter: cfg.delimiter,\n\t\t\theaders: effectiveHeaders,\n\t\t\tisHeader: false,\n\t\t\toutputRowIndex: rowIdx,\n\t\t\tquoteAll: cfg.quoteAll,\n\t\t\tescapeFormulae: cfg.escapeFormulae,\n\t\t\tdecimalSeparator: cfg.decimalSeparator,\n\t\t\ttransform: cfg.typeTransform\n\t\t}));\n\t\tlet result = cfg.bom ? \"\uFEFF\" : \"\";\n\t\tresult += lines.join(cfg.lineEnding);\n\t\tif (lines.length > 0 && cfg.trailingNewline) result += cfg.lineEnding;\n\t\treturn result;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/worker/worker.entry.ts\n\t/**\n\t* CSV Worker Entry\n\t*\n\t* This file runs inside a Web Worker (classic script after bundling).\n\t* It implements the worker message protocol used by CsvWorkerPool.\n\t*\n\t* IMPORTANT:\n\t* - Keep this file browser/worker-safe (no Node.js APIs)\n\t* - Parsing/formatting are delegated to the main CSV implementations to avoid drift.\n\t*/\n\tconst sessions = /* @__PURE__ */ new Map();\n\tfunction getSession(sessionId) {\n\t\tconst session = sessions.get(sessionId);\n\t\tif (!session) throw new Error(`Session not found: ${sessionId}`);\n\t\treturn session;\n\t}\n\tfunction reply(taskId, start, data) {\n\t\tself.postMessage({\n\t\t\ttype: \"result\",\n\t\t\ttaskId,\n\t\t\tdata,\n\t\t\tduration: performance.now() - start\n\t\t});\n\t}\n\tfunction replyError(taskId, start, error) {\n\t\tconst message = error instanceof Error ? error.message : String(error);\n\t\tself.postMessage({\n\t\t\ttype: \"error\",\n\t\t\ttaskId,\n\t\t\terror: message,\n\t\t\tduration: performance.now() - start\n\t\t});\n\t}\n\tfunction toObjectRows(data, headers) {\n\t\tif (!Array.isArray(data)) return {\n\t\t\trows: [],\n\t\t\theaders: headers ?? []\n\t\t};\n\t\tif (data.length === 0) return {\n\t\t\trows: [],\n\t\t\theaders: headers ?? []\n\t\t};\n\t\tconst first = data[0];\n\t\tif (first && typeof first === \"object\" && !Array.isArray(first)) return {\n\t\t\trows: data,\n\t\t\theaders: headers ?? Object.keys(first)\n\t\t};\n\t\tconst arrayRows = data;\n\t\tlet resolvedHeaders;\n\t\tlet rows;\n\t\tif (headers && headers.length > 0) {\n\t\t\tresolvedHeaders = headers;\n\t\t\trows = arrayRows;\n\t\t} else {\n\t\t\tresolvedHeaders = (arrayRows[0] ?? []).map((v) => String(v));\n\t\t\trows = arrayRows.slice(1);\n\t\t}\n\t\treturn {\n\t\t\trows: rows.map((row) => {\n\t\t\t\tconst obj = Object.create(null);\n\t\t\t\tfor (let i = 0; i < resolvedHeaders.length; i++) {\n\t\t\t\t\tconst key = resolvedHeaders[i];\n\t\t\t\t\tif (key !== \"__proto__\") obj[key] = row[i];\n\t\t\t\t}\n\t\t\t\treturn obj;\n\t\t\t}),\n\t\t\theaders: resolvedHeaders\n\t\t};\n\t}\n\tfunction sortData(data, configs) {\n\t\tconst list = Array.isArray(configs) ? configs : [configs];\n\t\tdata.sort((a, b) => {\n\t\t\tfor (const config of list) {\n\t\t\t\tconst { column, order = \"asc\", comparator = \"auto\" } = config;\n\t\t\t\tconst aVal = a[column];\n\t\t\t\tconst bVal = b[column];\n\t\t\t\tlet result;\n\t\t\t\tif (comparator === \"number\" || comparator === \"auto\" && !Number.isNaN(Number(aVal)) && !Number.isNaN(Number(bVal))) result = Number(aVal ?? 0) - Number(bVal ?? 0);\n\t\t\t\telse if (comparator === \"date\") result = new Date(aVal ?? 0).getTime() - new Date(bVal ?? 0).getTime();\n\t\t\t\telse result = String(aVal ?? \"\").localeCompare(String(bVal ?? \"\"));\n\t\t\t\tif (result !== 0) return order === \"desc\" ? -result : result;\n\t\t\t}\n\t\t\treturn 0;\n\t\t});\n\t}\n\tfunction evaluateCondition(row, condition, compiledRegex) {\n\t\tconst { column, operator, value, ignoreCase = false } = condition;\n\t\tlet fieldValue = row?.[column];\n\t\tlet compareValue = value;\n\t\tif (ignoreCase && typeof fieldValue === \"string\" && operator !== \"regex\") {\n\t\t\tfieldValue = fieldValue.toLowerCase();\n\t\t\tif (typeof compareValue === \"string\") compareValue = compareValue.toLowerCase();\n\t\t\telse if (Array.isArray(compareValue)) compareValue = compareValue.map((v) => typeof v === \"string\" ? v.toLowerCase() : v);\n\t\t}\n\t\tswitch (operator) {\n\t\t\tcase \"eq\": return fieldValue === compareValue;\n\t\t\tcase \"neq\": return fieldValue !== compareValue;\n\t\t\tcase \"gt\": return Number(fieldValue) > Number(compareValue);\n\t\t\tcase \"gte\": return Number(fieldValue) >= Number(compareValue);\n\t\t\tcase \"lt\": return Number(fieldValue) < Number(compareValue);\n\t\t\tcase \"lte\": return Number(fieldValue) <= Number(compareValue);\n\t\t\tcase \"contains\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.includes(cv);\n\t\t\t}\n\t\t\tcase \"startsWith\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.startsWith(cv);\n\t\t\t}\n\t\t\tcase \"endsWith\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.endsWith(cv);\n\t\t\t}\n\t\t\tcase \"regex\": return (compiledRegex ?? new RegExp(compareValue, ignoreCase ? \"i\" : \"\")).test(String(fieldValue));\n\t\t\tcase \"in\": return Array.isArray(compareValue) && compareValue.includes(fieldValue);\n\t\t\tcase \"notIn\": return !Array.isArray(compareValue) || !compareValue.includes(fieldValue);\n\t\t\tcase \"isNull\": return fieldValue === null || fieldValue === void 0 || fieldValue === \"\";\n\t\t\tcase \"notNull\": return fieldValue !== null && fieldValue !== void 0 && fieldValue !== \"\";\n\t\t\tdefault: return true;\n\t\t}\n\t}\n\tfunction filterData(data, config) {\n\t\tconst { conditions, logic = \"and\" } = config;\n\t\tconst compiledRegexMap = /* @__PURE__ */ new Map();\n\t\tfor (const cond of conditions) if (cond.operator === \"regex\") compiledRegexMap.set(cond, new RegExp(cond.value, cond.ignoreCase ? \"i\" : \"\"));\n\t\tconst evaluate = logic === \"and\" ? (row) => conditions.every((cond) => evaluateCondition(row, cond, compiledRegexMap.get(cond))) : (row) => conditions.some((cond) => evaluateCondition(row, cond, compiledRegexMap.get(cond)));\n\t\treturn data.filter(evaluate);\n\t}\n\tfunction searchData(data, config) {\n\t\tconst { query, columns, ignoreCase = true } = config;\n\t\tconst searchQuery = ignoreCase ? query.toLowerCase() : query;\n\t\tconst resolvedColumns = columns ?? Object.keys(data[0] ?? {});\n\t\treturn data.filter((row) => {\n\t\t\treturn resolvedColumns.some((col) => {\n\t\t\t\tlet value = String(row[col] ?? \"\");\n\t\t\t\tif (ignoreCase) value = value.toLowerCase();\n\t\t\t\treturn value.includes(searchQuery);\n\t\t\t});\n\t\t});\n\t}\n\tfunction computeAggregate(rows, column, fn) {\n\t\tif (fn === \"count\") return rows.length;\n\t\tif (fn === \"first\") return rows.length > 0 ? rows[0]?.[column] : null;\n\t\tif (fn === \"last\") return rows.length > 0 ? rows[rows.length - 1]?.[column] : null;\n\t\tconst nums = rows.map((r) => Number(r?.[column])).filter((n) => !Number.isNaN(n));\n\t\tif (nums.length === 0) return fn === \"avg\" ? 0 : null;\n\t\tif (fn === \"sum\" || fn === \"avg\") {\n\t\t\tconst sum = nums.reduce((a, b) => a + b, 0);\n\t\t\treturn fn === \"avg\" ? sum / nums.length : sum;\n\t\t}\n\t\tif (fn === \"min\") return nums.reduce((a, b) => a < b ? a : b, nums[0]);\n\t\tif (fn === \"max\") return nums.reduce((a, b) => a > b ? a : b, nums[0]);\n\t\treturn null;\n\t}\n\tfunction groupByData(data, config) {\n\t\tconst { columns, aggregates } = config;\n\t\tconst groups = /* @__PURE__ */ new Map();\n\t\tfor (const row of data) {\n\t\t\tconst keyValues = columns.map((col) => row[col]);\n\t\t\tconst key = keyValues.join(\"\\0\");\n\t\t\tconst existing = groups.get(key);\n\t\t\tif (existing) existing.rows.push(row);\n\t\t\telse groups.set(key, {\n\t\t\t\tkeyValues,\n\t\t\t\trows: [row]\n\t\t\t});\n\t\t}\n\t\tconst result = [];\n\t\tfor (const group of groups.values()) {\n\t\t\tconst obj = Object.create(null);\n\t\t\tcolumns.forEach((col, idx) => {\n\t\t\t\tconst k = String(col);\n\t\t\t\tif (k !== \"__proto__\") obj[k] = group.keyValues[idx];\n\t\t\t});\n\t\t\tfor (const { column, fn, alias } of aggregates) {\n\t\t\t\tconst key = alias || `${column}_${fn}`;\n\t\t\t\tif (key !== \"__proto__\") obj[key] = computeAggregate(group.rows, column, fn);\n\t\t\t}\n\t\t\tresult.push(obj);\n\t\t}\n\t\treturn result;\n\t}\n\tfunction aggregateData(data, configs) {\n\t\tconst result = Object.create(null);\n\t\tfor (const config of configs) {\n\t\t\tconst { column, fn, alias } = config;\n\t\t\tconst key = alias || `${column}_${fn}`;\n\t\t\tif (key !== \"__proto__\") result[key] = computeAggregate(data, column, fn);\n\t\t}\n\t\treturn result;\n\t}\n\tfunction getPageData(data, config) {\n\t\tconst page = Math.max(1, config.page);\n\t\tlet { pageSize } = config;\n\t\tif (pageSize <= 0) pageSize = data.length || 1;\n\t\tconst start = (page - 1) * pageSize;\n\t\treturn {\n\t\t\tdata: data.slice(start, start + pageSize),\n\t\t\tpage,\n\t\t\tpageSize,\n\t\t\ttotalRows: data.length,\n\t\t\ttotalPages: Math.ceil(data.length / pageSize)\n\t\t};\n\t}\n\tfunction executeQuery(session, config) {\n\t\tlet data = config.sort ? [...session.originalData] : session.originalData;\n\t\tconst result = { data: [] };\n\t\tif (config.sort) sortData(data, config.sort);\n\t\tif (config.filter) {\n\t\t\tdata = filterData(data, config.filter);\n\t\t\tresult.matchCount = data.length;\n\t\t}\n\t\tif (config.search) {\n\t\t\tdata = searchData(data, config.search);\n\t\t\tresult.matchCount = data.length;\n\t\t}\n\t\tif (config.groupBy) {\n\t\t\tdata = groupByData(data, config.groupBy);\n\t\t\tresult.groupCount = data.length;\n\t\t} else if (config.aggregate) result.aggregates = aggregateData(data, config.aggregate);\n\t\tif (config.page) {\n\t\t\tconst pageResult = getPageData(data, config.page);\n\t\t\tresult.data = pageResult.data;\n\t\t\tresult.page = pageResult.page;\n\t\t\tresult.pageSize = pageResult.pageSize;\n\t\t\tresult.totalRows = pageResult.totalRows;\n\t\t\tresult.totalPages = pageResult.totalPages;\n\t\t} else result.data = data;\n\t\treturn result;\n\t}\n\tself.addEventListener(\"message\", (event) => {\n\t\tif (event.origin !== \"\" && event.origin !== self.location?.origin) return;\n\t\tconst msg = event.data;\n\t\tif (!msg || typeof msg.type !== \"string\") return;\n\t\tconst taskId = msg.taskId ?? 0;\n\t\tconst start = performance.now();\n\t\ttry {\n\t\t\tswitch (msg.type) {\n\t\t\t\tcase \"parse\": {\n\t\t\t\t\tconst result = parseCsv(msg.data, msg.options);\n\t\t\t\t\tif (msg.sessionId) {\n\t\t\t\t\t\tconst isObj = result && result.headers;\n\t\t\t\t\t\tsessions.set(msg.sessionId, {\n\t\t\t\t\t\t\tdata: isObj ? result.rows : result,\n\t\t\t\t\t\t\theaders: isObj ? result.headers : null,\n\t\t\t\t\t\t\toriginalData: isObj ? [...result.rows] : [...result]\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\treply(taskId, start, result);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"format\":\n\t\t\t\t\treply(taskId, start, formatCsv(msg.data, msg.options));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"load\": {\n\t\t\t\t\tconst { rows, headers } = toObjectRows(msg.data, msg.headers ?? null);\n\t\t\t\t\tsessions.set(msg.sessionId, {\n\t\t\t\t\t\tdata: rows,\n\t\t\t\t\t\theaders: headers ?? null,\n\t\t\t\t\t\toriginalData: [...rows]\n\t\t\t\t\t});\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\trowCount: rows.length,\n\t\t\t\t\t\theaders\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"getData\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\theaders: session.headers || [],\n\t\t\t\t\t\trowCount: session.data.length\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"clear\":\n\t\t\t\t\tif (msg.sessionId) sessions.delete(msg.sessionId);\n\t\t\t\t\telse sessions.clear();\n\t\t\t\t\treply(taskId, start, void 0);\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"sort\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tsortData(session.data, msg.config);\n\t\t\t\t\tsession.originalData = [...session.data];\n\t\t\t\t\treply(taskId, start, { rowCount: session.data.length });\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"filter\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tconst totalCount = session.originalData.length;\n\t\t\t\t\tsession.data = filterData(session.originalData, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\tmatchCount: session.data.length,\n\t\t\t\t\t\ttotalCount\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"search\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tconst totalCount = session.originalData.length;\n\t\t\t\t\tsession.data = searchData(session.originalData, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\tmatchCount: session.data.length,\n\t\t\t\t\t\ttotalCount\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"groupBy\": {\n\t\t\t\t\tconst groups = groupByData(getSession(msg.sessionId).data, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: groups,\n\t\t\t\t\t\tgroupCount: groups.length\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"aggregate\":\n\t\t\t\t\treply(taskId, start, { data: aggregateData(getSession(msg.sessionId).data, msg.config) });\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"getPage\":\n\t\t\t\t\treply(taskId, start, getPageData(getSession(msg.sessionId).data, msg.config));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"query\":\n\t\t\t\t\treply(taskId, start, executeQuery(getSession(msg.sessionId), msg.config));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"terminate\":\n\t\t\t\t\tsessions.clear();\n\t\t\t\t\tbreak;\n\t\t\t\tdefault: throw new Error(`Unknown message type: ${msg.type}`);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\treplyError(taskId, start, error);\n\t\t}\n\t});\n\tself.postMessage({ type: \"ready\" });\n\t//#endregion\n})();\n";
|
|
6
|
+
export declare const CSV_WORKER_SCRIPT = "(function() {\n\t//#region src/modules/csv/utils/row.ts\n\t/**\n\t* Check if a row is a RowHashArray (array of [key, value] tuples)\n\t*/\n\tfunction isRowHashArray(row) {\n\t\tif (!Array.isArray(row) || row.length === 0) return false;\n\t\tconst first = row[0];\n\t\treturn Array.isArray(first) && first.length === 2 && typeof first[0] === \"string\";\n\t}\n\t/**\n\t* Convert RowHashArray to RowMap\n\t* Note: Manual loop is ~4x faster than Object.fromEntries\n\t*/\n\tfunction rowHashArrayToMap(row) {\n\t\tconst obj = Object.create(null);\n\t\tfor (const [key, value] of row) if (key !== \"__proto__\") obj[key] = value;\n\t\treturn obj;\n\t}\n\t/**\n\t* Convert RowHashArray to values array (preserving order)\n\t*/\n\tfunction rowHashArrayToValues(row) {\n\t\treturn row.map(([, value]) => value);\n\t}\n\t/**\n\t* Get headers from RowHashArray\n\t*/\n\tfunction rowHashArrayToHeaders(row) {\n\t\treturn row.map(([key]) => key);\n\t}\n\t/**\n\t* Get value by key from RowHashArray (returns undefined if not found)\n\t* More efficient than creating a full map when you need only specific values\n\t*/\n\tfunction rowHashArrayGet(row, key) {\n\t\tfor (const [k, v] of row) if (k === key) return v;\n\t}\n\t/**\n\t* Map RowHashArray values according to header order\n\t* Optimized: builds values array in single pass without intermediate object\n\t*/\n\tfunction rowHashArrayMapByHeaders(row, headers) {\n\t\tif (headers.length <= 10) return headers.map((h) => rowHashArrayGet(row, h));\n\t\tconst map = rowHashArrayToMap(row);\n\t\treturn headers.map((h) => map[h]);\n\t}\n\t/**\n\t* Deduplicate headers by appending suffix to duplicates.\n\t* Example: [\"A\", \"B\", \"A\", \"A\"] \u2192 [\"A\", \"B\", \"A_1\", \"A_2\"]\n\t*\n\t* @param headers - Original header array\n\t* @returns New array with unique header names\n\t*/\n\tfunction deduplicateHeaders(headers) {\n\t\treturn deduplicateHeadersWithRenames(headers).headers;\n\t}\n\tfunction deduplicateHeadersWithRenames(headers) {\n\t\tconst headerCount = /* @__PURE__ */ new Map();\n\t\tconst usedHeaders = /* @__PURE__ */ new Set();\n\t\tconst reservedHeaders = /* @__PURE__ */ new Set();\n\t\tconst result = [];\n\t\tconst renamedHeaders = {};\n\t\tlet hasRenames = false;\n\t\tlet emptyHeaderCount = 0;\n\t\tfor (const header of headers) if (header !== null && header !== void 0 && header !== \"\") reservedHeaders.add(header);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header === null || header === void 0) {\n\t\t\t\tresult.push(header);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (header === \"\") {\n\t\t\t\tlet placeholder = `_column_${i}`;\n\t\t\t\twhile (usedHeaders.has(placeholder) || reservedHeaders.has(placeholder)) placeholder = `_column_${i}_${emptyHeaderCount++}`;\n\t\t\t\tusedHeaders.add(placeholder);\n\t\t\t\tresult.push(placeholder);\n\t\t\t\trenamedHeaders[placeholder] = \"\";\n\t\t\t\thasRenames = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (!usedHeaders.has(header)) {\n\t\t\t\tusedHeaders.add(header);\n\t\t\t\theaderCount.set(header, 1);\n\t\t\t\tresult.push(header);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tlet suffix = headerCount.get(header) ?? 1;\n\t\t\tlet candidate = `${header}_${suffix}`;\n\t\t\twhile (usedHeaders.has(candidate) || reservedHeaders.has(candidate)) {\n\t\t\t\tsuffix++;\n\t\t\t\tcandidate = `${header}_${suffix}`;\n\t\t\t}\n\t\t\theaderCount.set(header, suffix + 1);\n\t\t\tusedHeaders.add(candidate);\n\t\t\tresult.push(candidate);\n\t\t\trenamedHeaders[candidate] = header;\n\t\t\thasRenames = true;\n\t\t}\n\t\treturn {\n\t\t\theaders: result,\n\t\t\trenamedHeaders: hasRenames ? renamedHeaders : null\n\t\t};\n\t}\n\t/**\n\t* Process columns configuration to extract keys and headers.\n\t* Returns null if columns is empty or undefined.\n\t*\n\t* This function is used by both formatCsv (batch) and CsvFormatterStream (streaming)\n\t* to normalize column configuration into separate key/header arrays.\n\t*\n\t* @param columns - Column configuration array (string names or ColumnConfig objects)\n\t* @returns Object with keys (data access) and headers (output names), or null if empty\n\t*\n\t* @example\n\t* ```ts\n\t* processColumns(['name', { key: 'age', header: 'Age (years)' }])\n\t* // { keys: ['name', 'age'], headers: ['name', 'Age (years)'] }\n\t* ```\n\t*/\n\tfunction processColumns(columns) {\n\t\tif (!columns || columns.length === 0) return null;\n\t\treturn {\n\t\t\tkeys: columns.map((c) => typeof c === \"string\" ? c : c.key),\n\t\t\theaders: columns.map((c) => typeof c === \"string\" ? c : c.header ?? c.key)\n\t\t};\n\t}\n\t/** Pre-compiled regex for non-whitespace detection */\n\tconst NON_WHITESPACE_REGEX = /\\S/;\n\t/**\n\t* Check if a row should be skipped as empty.\n\t* When `shouldSkipEmpty` is \"greedy\", whitespace-only rows also count as empty.\n\t*\n\t* @param row - The row to check\n\t* @param shouldSkipEmpty - true, false, or \"greedy\"\n\t* @returns true if the row should be skipped\n\t*/\n\tfunction isEmptyRow(row, shouldSkipEmpty) {\n\t\tif (!shouldSkipEmpty) return false;\n\t\tif (shouldSkipEmpty === \"greedy\") {\n\t\t\tfor (const field of row) if (NON_WHITESPACE_REGEX.test(field)) return false;\n\t\t} else for (const field of row) if (field !== \"\") return false;\n\t\treturn true;\n\t}\n\t/**\n\t* Check if all values in a row are empty strings.\n\t* Used by skipRecordsWithEmptyValues option.\n\t*\n\t* @param row - The row to check\n\t* @returns true if all fields are empty strings\n\t*/\n\tfunction hasAllEmptyValues(row) {\n\t\treturn isEmptyRow(row, true);\n\t}\n\t//#endregion\n\t//#region src/utils/errors.ts\n\t/**\n\t* Base class for all library errors.\n\t* Module-specific errors should extend this class.\n\t*\n\t* Features:\n\t* - Supports ES2022 error cause for error chaining\n\t* - Properly captures stack trace\n\t* - Sets correct prototype for instanceof checks\n\t* - JSON serialization support for logging\n\t*/\n\tvar BaseError = class extends Error {\n\t\tconstructor(message, options) {\n\t\t\tsuper(message, options);\n\t\t\tthis.name = \"BaseError\";\n\t\t\tObject.setPrototypeOf(this, new.target.prototype);\n\t\t\tif (Error.captureStackTrace) Error.captureStackTrace(this, this.constructor);\n\t\t}\n\t\t/**\n\t\t* Serialize error for logging/transmission.\n\t\t* Includes cause chain for debugging.\n\t\t*/\n\t\ttoJSON() {\n\t\t\treturn {\n\t\t\t\tname: this.name,\n\t\t\t\tmessage: this.message,\n\t\t\t\tstack: this.stack,\n\t\t\t\tcause: this.cause instanceof Error ? errorToJSON(this.cause) : this.cause\n\t\t\t};\n\t\t}\n\t};\n\t/**\n\t* Serialize any Error to a plain object for logging/transmission.\n\t* Handles both BaseError and native Error instances.\n\t*/\n\tfunction errorToJSON(err) {\n\t\tif (err instanceof BaseError) return err.toJSON();\n\t\treturn {\n\t\t\tname: err.name,\n\t\t\tmessage: err.message,\n\t\t\tstack: err.stack,\n\t\t\tcause: err.cause instanceof Error ? errorToJSON(err.cause) : err.cause\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/errors.ts\n\t/**\n\t* CSV module error types.\n\t*/\n\t/**\n\t* Base class for all CSV-related errors.\n\t*/\n\tvar CsvError = class extends BaseError {\n\t\tconstructor(..._args) {\n\t\t\tsuper(..._args);\n\t\t\tthis.name = \"CsvError\";\n\t\t}\n\t};\n\t//#endregion\n\t//#region src/modules/csv/parse/helpers.ts\n\t/**\n\t* CSV Parse Utilities\n\t*\n\t* Shared parsing helpers used by both sync (parseCsv) and streaming (CsvParserStream)\n\t* parsers to ensure consistent behavior:\n\t*\n\t* - Header processing: Handle headers option (true/array/transform)\n\t* - Column validation: Check row length against expected column count\n\t* - Row-to-object conversion: Transform string[] to Record<string, any>\n\t* - Dynamic typing: Apply type coercion based on configuration\n\t*\n\t* These utilities are extracted to avoid code duplication between\n\t* the batch parser (parse.ts) and the streaming parser (csv-stream.ts).\n\t*/\n\t/**\n\t* Process headers from first row or configuration.\n\t* Shared logic between parseCsv and CsvParserStream.\n\t*\n\t* @param row - The current row being processed\n\t* @param options - Header processing options\n\t* @param existingHeaders - Already configured headers (for array case)\n\t* @returns Processing result or null if headers not applicable\n\t*/\n\tfunction processHeaders(row, options, existingHeaders) {\n\t\tconst { headers, groupColumnsByName = false } = options;\n\t\tif (existingHeaders !== null && Array.isArray(headers)) return null;\n\t\tlet rawHeaders;\n\t\tlet skipCurrentRow;\n\t\tif (typeof headers === \"function\") {\n\t\t\trawHeaders = headers(row);\n\t\t\tif (rawHeaders.length !== row.length) throw new CsvError(`Header function returned ${rawHeaders.length} headers but row has ${row.length} columns. The header function must return an array with the same length as the input row.`);\n\t\t\tskipCurrentRow = true;\n\t\t} else if (Array.isArray(headers)) {\n\t\t\trawHeaders = headers;\n\t\t\tskipCurrentRow = false;\n\t\t} else if (headers === true) {\n\t\t\trawHeaders = row;\n\t\t\tskipCurrentRow = true;\n\t\t} else return null;\n\t\tconst { headers: dedupedHeaders, renamedHeaders } = deduplicateHeadersWithRenames(rawHeaders);\n\t\treturn {\n\t\t\theaders: dedupedHeaders,\n\t\t\toriginalHeaders: groupColumnsByName ? rawHeaders.map((h) => h === null || h === void 0 ? null : String(h)) : null,\n\t\t\trenamedHeaders,\n\t\t\tskipCurrentRow\n\t\t};\n\t}\n\t/**\n\t* Validate and adjust row column count against expected headers.\n\t* Shared logic between parseCsv and CsvParserStream.\n\t*\n\t* @param row - The row to validate (will be modified in place if needed)\n\t* @param expectedCols - Expected number of columns (from headers)\n\t* @param options - Validation options\n\t* @returns Validation result\n\t*/\n\tfunction validateAndAdjustColumns(row, expectedCols, options) {\n\t\tconst { columnLess, columnMore } = options;\n\t\tconst actualCols = row.length;\n\t\tif (actualCols === expectedCols) return {\n\t\t\tisValid: true,\n\t\t\tmodified: false\n\t\t};\n\t\tif (actualCols > expectedCols) switch (columnMore) {\n\t\t\tcase \"error\": return {\n\t\t\t\tisValid: false,\n\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\treason: `expected ${expectedCols} columns, got ${actualCols}`,\n\t\t\t\tmodified: false\n\t\t\t};\n\t\t\tcase \"truncate\":\n\t\t\t\trow.length = expectedCols;\n\t\t\t\treturn {\n\t\t\t\t\tisValid: true,\n\t\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\t\tmodified: true\n\t\t\t\t};\n\t\t\tcase \"keep\": return {\n\t\t\t\tisValid: true,\n\t\t\t\terrorCode: \"TooManyFields\",\n\t\t\t\tmodified: true,\n\t\t\t\textras: row.splice(expectedCols)\n\t\t\t};\n\t\t\tdefault: throw new Error(`Unknown columnMore strategy: ${columnMore}`);\n\t\t}\n\t\tswitch (columnLess) {\n\t\t\tcase \"error\": return {\n\t\t\t\tisValid: false,\n\t\t\t\terrorCode: \"TooFewFields\",\n\t\t\t\treason: `expected ${expectedCols} columns, got ${actualCols}`,\n\t\t\t\tmodified: false\n\t\t\t};\n\t\t\tcase \"pad\":\n\t\t\t\twhile (row.length < expectedCols) row.push(\"\");\n\t\t\t\treturn {\n\t\t\t\t\tisValid: true,\n\t\t\t\t\terrorCode: \"TooFewFields\",\n\t\t\t\t\tmodified: true\n\t\t\t\t};\n\t\t\tdefault: throw new Error(`Unknown columnLess strategy: ${columnLess}`);\n\t\t}\n\t}\n\t/**\n\t* Create a safe onSkip handler that catches errors from user callback.\n\t*\n\t* The onSkip callback is user-provided and may throw errors. We wrap it\n\t* to prevent callback errors from interrupting parsing. Errors in the\n\t* callback are silently ignored since there's no good way to surface them\n\t* in the sync parsing context.\n\t*\n\t* For better error visibility in async/streaming contexts, consider\n\t* emitting a warning event on the stream instead.\n\t*/\n\tfunction createOnSkipHandler(onSkip) {\n\t\tif (!onSkip) return null;\n\t\treturn (error, record) => {\n\t\t\ttry {\n\t\t\t\tonSkip(error, record);\n\t\t\t} catch (callbackError) {}\n\t\t};\n\t}\n\t/**\n\t* Convert a row array to an object using headers.\n\t* Internal helper for convertRowToObject.\n\t*/\n\tfunction rowToObject(row, headers) {\n\t\tconst obj = Object.create(null);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header !== null && header !== void 0 && header !== \"__proto__\") obj[header] = row[i] ?? \"\";\n\t\t}\n\t\treturn obj;\n\t}\n\t/**\n\t* Convert a row array to an object, optionally grouping duplicate column names.\n\t* Unified function that handles both normal and grouped modes.\n\t*\n\t* @param row - The row values as an array\n\t* @param headers - The deduplicated header names\n\t* @param originalHeaders - The original (non-deduplicated) headers for grouping\n\t* @param groupColumnsByName - Whether to group duplicate column names\n\t* @returns Object with header keys and row values\n\t*/\n\tfunction convertRowToObject(row, headers, originalHeaders, groupColumnsByName) {\n\t\tif (groupColumnsByName && originalHeaders) return rowToObjectGrouped(row, originalHeaders);\n\t\treturn rowToObject(row, headers);\n\t}\n\t/**\n\t* Convert a row array to an object, grouping duplicate column names.\n\t* Internal helper for convertRowToObject.\n\t*/\n\tfunction rowToObjectGrouped(row, headers) {\n\t\tconst obj = Object.create(null);\n\t\tfor (let i = 0; i < headers.length; i++) {\n\t\t\tconst header = headers[i];\n\t\t\tif (header !== null && header !== void 0 && header !== \"__proto__\") {\n\t\t\t\tconst value = row[i] ?? \"\";\n\t\t\t\tif (header in obj) {\n\t\t\t\t\tconst existing = obj[header];\n\t\t\t\t\tif (Array.isArray(existing)) existing.push(value);\n\t\t\t\t\telse obj[header] = [existing, value];\n\t\t\t\t} else obj[header] = value;\n\t\t\t}\n\t\t}\n\t\treturn obj;\n\t}\n\t/**\n\t* Filter out null/undefined values from a header array.\n\t* Returns only the valid string headers.\n\t*\n\t* @param headers - Header array that may contain null/undefined values\n\t* @returns Array of valid string headers (null/undefined removed)\n\t*/\n\tfunction filterValidHeaders(headers) {\n\t\treturn headers.filter((h) => h !== null && h !== void 0);\n\t}\n\t//#endregion\n\t//#region src/modules/csv/utils/detect.ts\n\t/**\n\t* CSV Detection Utilities\n\t*\n\t* Auto-detection of CSV characteristics:\n\t* - Delimiter detection (comma, tab, semicolon, pipe, etc.)\n\t* - Line ending detection (LF, CRLF, CR)\n\t* - Quote character normalization\n\t*\n\t* This module is part of the csv/utils subsystem:\n\t* - detect.ts: Auto-detection of CSV format\n\t* - row.ts: Row format conversions (RowHashArray, headers)\n\t* - dynamic-typing.ts: Type coercion (string -> number/boolean/date)\n\t* - number.ts: Number parsing utilities\n\t* - generate.ts: Test data generation\n\t*/\n\t/**\n\t* Escape special regex characters\n\t*/\n\tfunction escapeRegex(str) {\n\t\treturn str.replace(/[.*+?^${}()|[\\]\\\\]/g, \"\\\\$&\");\n\t}\n\t/**\n\t* Normalize quote option to { enabled, char } form.\n\t* Centralizes the quote/false/null handling logic.\n\t*/\n\tfunction normalizeQuoteOption(option) {\n\t\tif (option === false || option === null) return {\n\t\t\tenabled: false,\n\t\t\tchar: \"\"\n\t\t};\n\t\treturn {\n\t\t\tenabled: true,\n\t\t\tchar: option ?? \"\\\"\"\n\t\t};\n\t}\n\t/**\n\t* Normalize escape option to { enabled, char } form.\n\t* Consistent with normalizeQuoteOption API design.\n\t*\n\t* @param escapeOption - User's escape option (string, false, null, or undefined)\n\t* @param quoteChar - The quote character (used as default when escape is undefined)\n\t* @returns { enabled: boolean, char: string }\n\t* - enabled=false, char=\"\" when explicitly disabled (false/null)\n\t* - enabled=true, char=quoteChar when undefined (default behavior)\n\t* - enabled=true, char=escapeOption when string provided\n\t*/\n\tfunction normalizeEscapeOption(escapeOption, quoteChar) {\n\t\tif (escapeOption === false || escapeOption === null) return {\n\t\t\tenabled: false,\n\t\t\tchar: \"\"\n\t\t};\n\t\treturn {\n\t\t\tenabled: true,\n\t\t\tchar: escapeOption ?? quoteChar\n\t\t};\n\t}\n\t/**\n\t* Common CSV delimiters to try during auto-detection\n\t* Order matters - comma is most common, then semicolon (European), tab, pipe\n\t*/\n\tconst AUTO_DETECT_DELIMITERS = [\n\t\t\",\",\n\t\t\";\",\n\t\t\"\t\",\n\t\t\"|\"\n\t];\n\t/**\n\t* Default delimiter when auto-detection fails\n\t*/\n\tconst DEFAULT_DELIMITER = \",\";\n\t/**\n\t* Characters that trigger formula escaping (CSV injection prevention).\n\t* Per OWASP recommendations, these characters at the start of a field\n\t* could be interpreted as formulas by spreadsheet applications.\n\t*\n\t* @see https://owasp.org/www-community/attacks/CSV_Injection\n\t*/\n\tconst FORMULA_ESCAPE_CHARS = new Set([\n\t\t\"=\",\n\t\t\"+\",\n\t\t\"-\",\n\t\t\"@\",\n\t\t\"\t\",\n\t\t\"\\r\",\n\t\t\"\\n\",\n\t\t\"\uFF1D\",\n\t\t\"\uFF0B\",\n\t\t\"\uFF0D\",\n\t\t\"\uFF20\"\n\t]);\n\t/**\n\t* Strip UTF-8 BOM (Byte Order Mark) from start of string if present.\n\t* Excel exports UTF-8 CSV files with BOM (\\ufeff).\n\t*\n\t* @param input - String to process\n\t* @returns String without BOM\n\t*/\n\tfunction stripBom(input) {\n\t\treturn input.charCodeAt(0) === 65279 ? input.slice(1) : input;\n\t}\n\t/**\n\t* Check if a string starts with a formula escape character.\n\t* Used for CSV injection prevention.\n\t*/\n\tfunction startsWithFormulaChar(str) {\n\t\treturn str.length > 0 && FORMULA_ESCAPE_CHARS.has(str[0]);\n\t}\n\t/**\n\t* Detect the line terminator used in a string.\n\t* Uses quote-aware detection to avoid detecting newlines inside quoted fields.\n\t*\n\t* @param input - String to analyze\n\t* @param quote - Quote character (default: '\"')\n\t* @returns Detected line terminator or '\\n' as default\n\t*\n\t* @example\n\t* detectLinebreak('a,b\\r\\nc,d') // '\\r\\n'\n\t* detectLinebreak('a,b\\nc,d') // '\\n'\n\t* detectLinebreak('a,b\\rc,d') // '\\r'\n\t* detectLinebreak('a,b,c') // '\\n' (default)\n\t* detectLinebreak('\"a\\nb\",c\\r\\nd') // '\\r\\n' (ignores newline in quotes)\n\t*/\n\tfunction detectLinebreak(input, quote = \"\\\"\") {\n\t\tlet inQuote = false;\n\t\tfor (let i = 0; i < input.length; i++) {\n\t\t\tconst char = input[i];\n\t\t\tif (char === quote) {\n\t\t\t\tif (inQuote && input[i + 1] === quote) {\n\t\t\t\t\ti++;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tinQuote = !inQuote;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (inQuote) continue;\n\t\t\tif (char === \"\\r\") return input[i + 1] === \"\\n\" ? \"\\r\\n\" : \"\\r\";\n\t\t\tif (char === \"\\n\") return \"\\n\";\n\t\t}\n\t\treturn \"\\n\";\n\t}\n\t/**\n\t* Auto-detect the delimiter used in a CSV string\n\t*\n\t* Algorithm:\n\t* 1. Sample the first few lines (up to 10) for analysis\n\t* 2. For each candidate delimiter:\n\t* - Count occurrences per line (respecting quotes)\n\t* - Check consistency: all lines should have the same count\n\t* - Higher count = more fields = better delimiter candidate\n\t* 3. Choose the delimiter with highest consistent field count\n\t*\n\t* Tie-breaking rules (in priority order):\n\t* 1. Lowest delta (variance) wins - more consistent field counts across lines\n\t* 2. On delta tie, highest avgFieldCount wins - more fields per row\n\t* 3. On complete tie, array order wins - first delimiter in delimitersToGuess\n\t* (default order: comma, semicolon, tab, pipe)\n\t*\n\t* @param input - CSV string to analyze\n\t* @param quote - Quote character (default: '\"')\n\t* @param delimitersToGuess - Custom list of delimiters to try (default: [\",\", \";\", \"\\t\", \"|\"])\n\t* @returns Detected delimiter or first delimiter in list\n\t*\n\t* @example\n\t* detectDelimiter('a,b,c\\n1,2,3') // ','\n\t* detectDelimiter('a;b;c\\n1;2;3') // ';'\n\t* detectDelimiter('a\\tb\\tc\\n1\\t2\\t3') // '\\t'\n\t* detectDelimiter('a:b:c\\n1:2:3', '\"', [':']) // ':'\n\t*/\n\tfunction detectDelimiter(input, quote = \"\\\"\", delimitersToGuess, comment, skipEmptyLines) {\n\t\tconst delimiters = delimitersToGuess ?? AUTO_DETECT_DELIMITERS;\n\t\tconst defaultDelimiter = delimiters[0] ?? DEFAULT_DELIMITER;\n\t\tconst lines = getSampleLines(input, 10, quote, comment, skipEmptyLines);\n\t\tif (lines.length === 0) return defaultDelimiter;\n\t\tlet bestDelimiter = defaultDelimiter;\n\t\tlet bestDelta;\n\t\tlet bestAvgFieldCount;\n\t\tfor (const delimiter of delimiters) {\n\t\t\tconst { avgFieldCount, delta } = scoreDelimiter(lines, delimiter, quote);\n\t\t\tif (avgFieldCount <= 1.99) continue;\n\t\t\tif (bestDelta === void 0 || delta < bestDelta || delta === bestDelta && (bestAvgFieldCount === void 0 || avgFieldCount > bestAvgFieldCount)) {\n\t\t\t\tbestDelta = delta;\n\t\t\t\tbestAvgFieldCount = avgFieldCount;\n\t\t\t\tbestDelimiter = delimiter;\n\t\t\t}\n\t\t}\n\t\treturn bestDelimiter;\n\t}\n\t/**\n\t* Get sample lines from input, skipping empty lines\n\t*/\n\tfunction getSampleLines(input, maxLines, quote, comment, skipEmptyLines) {\n\t\tconst lines = [];\n\t\tlet start = 0;\n\t\tlet inQuotes = false;\n\t\tconst len = input.length;\n\t\tfor (let i = 0; i < len && lines.length < maxLines; i++) {\n\t\t\tconst char = input[i];\n\t\t\tif (quote && char === quote) if (inQuotes && input[i + 1] === quote) i++;\n\t\t\telse inQuotes = !inQuotes;\n\t\t\telse if (!inQuotes && (char === \"\\n\" || char === \"\\r\")) {\n\t\t\t\tconst line = input.slice(start, i);\n\t\t\t\tif (comment && line.startsWith(comment)) {} else {\n\t\t\t\t\tconst trimmed = line.trim();\n\t\t\t\t\tif (!(line.length === 0 || skipEmptyLines && trimmed === \"\") && trimmed !== \"\") lines.push(line);\n\t\t\t\t}\n\t\t\t\tif (char === \"\\r\" && input[i + 1] === \"\\n\") i++;\n\t\t\t\tstart = i + 1;\n\t\t\t}\n\t\t}\n\t\tif (start < len && lines.length < maxLines) {\n\t\t\tconst line = input.slice(start);\n\t\t\tif (!comment || !line.startsWith(comment)) {\n\t\t\t\tconst trimmed = line.trim();\n\t\t\t\tif (!(line.length === 0 || skipEmptyLines && trimmed === \"\") && trimmed !== \"\") lines.push(line);\n\t\t\t}\n\t\t}\n\t\treturn lines;\n\t}\n\t/**\n\t* Score a delimiter candidate based on consistency and field count\n\t*\n\t* Returns 0 if:\n\t* - Delimiter not found in any line\n\t* - Field counts are inconsistent across lines\n\t*\n\t* Higher score = more fields per row with consistent counts\n\t*/\n\tfunction scoreDelimiter(lines, delimiter, quote) {\n\t\tif (lines.length === 0) return {\n\t\t\tavgFieldCount: 0,\n\t\t\tdelta: Number.POSITIVE_INFINITY\n\t\t};\n\t\tlet delta = 0;\n\t\tlet avgFieldCount = 0;\n\t\tlet prevFieldCount;\n\t\tfor (const line of lines) {\n\t\t\tconst fieldCount = countDelimiters(line, delimiter, quote) + 1;\n\t\t\tavgFieldCount += fieldCount;\n\t\t\tif (prevFieldCount === void 0) {\n\t\t\t\tprevFieldCount = fieldCount;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tdelta += Math.abs(fieldCount - prevFieldCount);\n\t\t\tprevFieldCount = fieldCount;\n\t\t}\n\t\tavgFieldCount /= lines.length;\n\t\treturn {\n\t\t\tavgFieldCount,\n\t\t\tdelta\n\t\t};\n\t}\n\t/**\n\t* Count delimiters in a line, respecting quoted fields\n\t*/\n\tfunction countDelimiters(line, delimiter, quote) {\n\t\tlet count = 0;\n\t\tlet inQuotes = false;\n\t\tconst len = line.length;\n\t\tconst delimLen = delimiter.length;\n\t\tfor (let i = 0; i < len; i++) if (quote && line[i] === quote) if (inQuotes && line[i + 1] === quote) i++;\n\t\telse inQuotes = !inQuotes;\n\t\telse if (!inQuotes) {\n\t\t\tif (delimLen === 1) {\n\t\t\t\tif (line[i] === delimiter) count++;\n\t\t\t} else if (line.startsWith(delimiter, i)) {\n\t\t\t\tcount++;\n\t\t\t\ti += delimLen - 1;\n\t\t\t}\n\t\t}\n\t\treturn count;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/constants.ts\n\t/**\n\t* CSV Module Constants\n\t*\n\t* Shared constants used across the CSV module.\n\t* Extracted to avoid circular dependencies between parse-core and utils/parse.\n\t*/\n\t/**\n\t* Pre-compiled regex for line splitting (matches CR, LF, or CRLF)\n\t*/\n\tconst DEFAULT_LINEBREAK_REGEX = /\\r\\n|\\r|\\n/;\n\t/**\n\t* Lazily initialized TextEncoder + buffers for byte length calculations.\n\t* Avoids eager allocation at module load time.\n\t*/\n\tlet sharedTextEncoder = null;\n\tlet singleCharBuffer = null;\n\tlet encodeBuffer = null;\n\tfunction getEncoder() {\n\t\tif (!sharedTextEncoder) {\n\t\t\tsharedTextEncoder = new TextEncoder();\n\t\t\tsingleCharBuffer = new Uint8Array(4);\n\t\t\tencodeBuffer = new Uint8Array(4096);\n\t\t}\n\t\treturn sharedTextEncoder;\n\t}\n\t/**\n\t* Get UTF-8 byte length of a string efficiently.\n\t* Uses fast path for ASCII-only strings and encodeInto for mixed content.\n\t*\n\t* @param text - String to measure\n\t* @returns UTF-8 byte length\n\t*/\n\tfunction getUtf8ByteLength(text) {\n\t\tconst len = text.length;\n\t\tif (len === 0) return 0;\n\t\tif (len === 1) {\n\t\t\tif (text.charCodeAt(0) < 128) return 1;\n\t\t\treturn getEncoder().encodeInto(text, singleCharBuffer).written;\n\t\t}\n\t\tlet isAllAscii = true;\n\t\tfor (let i = 0; i < len; i++) if (text.charCodeAt(i) >= 128) {\n\t\t\tisAllAscii = false;\n\t\t\tbreak;\n\t\t}\n\t\tif (isAllAscii) return len;\n\t\tconst encoder = getEncoder();\n\t\tif (len * 3 > encodeBuffer.length) encodeBuffer = new Uint8Array(len * 3);\n\t\treturn encoder.encodeInto(text, encodeBuffer).written;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/config.ts\n\t/**\n\t* Create a normalized ParseConfig from options.\n\t* This is the single source of truth for configuration normalization,\n\t* used by both sync and streaming parsers.\n\t*\n\t* @example Batch parsing\n\t* ```ts\n\t* const { config, processedInput } = createParseConfig({ input: csvString, options });\n\t* ```\n\t*\n\t* @example Streaming parsing\n\t* ```ts\n\t* const { config } = createParseConfig({ options });\n\t* // Later, after delimiter detection:\n\t* config.delimiter = detectedDelimiter;\n\t* ```\n\t*/\n\tfunction createParseConfig(opts) {\n\t\tconst { input, options, detectedDelimiter } = opts;\n\t\tconst { delimiter: delimiterOption = \",\", delimitersToGuess, lineEnding: lineEndingOption = \"\", quote: quoteOption = \"\\\"\", escape: escapeOption, skipEmptyLines = false, trim = false, ltrim = false, rtrim = false, headers = false, comment, maxRows, toLine, skipLines = 0, skipRows = 0, columnMismatch, groupColumnsByName = false, fastMode = false, dynamicTyping, castDate, beforeFirstChunk, info: infoOption = false, raw: rawOption = false, relaxQuotes = false, skipRecordsWithError = false, skipRecordsWithEmptyValues = false, onSkip, maxRowBytes } = options;\n\t\tconst columnLess = columnMismatch?.less ?? \"error\";\n\t\tconst columnMore = columnMismatch?.more ?? \"error\";\n\t\tlet processedInput;\n\t\tif (input !== void 0) {\n\t\t\tprocessedInput = input;\n\t\t\tif (beforeFirstChunk) {\n\t\t\t\tconst result = beforeFirstChunk(processedInput);\n\t\t\t\tif (typeof result === \"string\") processedInput = result;\n\t\t\t\telse if (result !== void 0 && result !== null) throw new CsvError(`beforeFirstChunk must return a string or undefined, got ${typeof result}`);\n\t\t\t}\n\t\t\tprocessedInput = stripBom(processedInput);\n\t\t}\n\t\tconst shouldSkipEmpty = skipEmptyLines;\n\t\tconst { enabled: quoteEnabled, char: quote } = normalizeQuoteOption(quoteOption);\n\t\tconst escapeNormalized = normalizeEscapeOption(escapeOption, quote);\n\t\tconst escape = escapeNormalized.enabled ? escapeNormalized.char || quote : \"\";\n\t\tlet delimiter;\n\t\tif (detectedDelimiter !== void 0) delimiter = detectedDelimiter;\n\t\telse if (delimiterOption === \"\" && processedInput !== void 0) delimiter = detectDelimiter(processedInput, quote || \"\\\"\", delimitersToGuess, comment, shouldSkipEmpty);\n\t\telse if (delimiterOption === \"\") delimiter = \",\";\n\t\telse delimiter = delimiterOption;\n\t\tconst linebreak = lineEndingOption || (processedInput !== void 0 ? detectLinebreak(processedInput) : \"\\n\");\n\t\treturn {\n\t\t\tconfig: {\n\t\t\t\tdelimiter,\n\t\t\t\tlinebreak,\n\t\t\t\tlinebreakRegex: linebreak && linebreak !== \"\\n\" && linebreak !== \"\\r\\n\" && linebreak !== \"\\r\" ? linebreak : DEFAULT_LINEBREAK_REGEX,\n\t\t\t\tquote,\n\t\t\t\tescape,\n\t\t\t\tquoteEnabled,\n\t\t\t\ttrimField: makeTrimField(trim, ltrim, rtrim),\n\t\t\t\ttrimFieldIsIdentity: !trim && !ltrim && !rtrim,\n\t\t\t\tshouldSkipEmpty,\n\t\t\t\tskipLines,\n\t\t\t\tskipRows,\n\t\t\t\tmaxRows,\n\t\t\t\ttoLine,\n\t\t\t\tmaxRowBytes,\n\t\t\t\tcomment,\n\t\t\t\tfastMode,\n\t\t\t\trelaxQuotes,\n\t\t\t\tcolumnLess,\n\t\t\t\tcolumnMore,\n\t\t\t\tgroupColumnsByName,\n\t\t\t\tskipRecordsWithError,\n\t\t\t\tskipRecordsWithEmptyValues,\n\t\t\t\tinfoOption,\n\t\t\t\trawOption,\n\t\t\t\tdynamicTyping,\n\t\t\t\tcastDate,\n\t\t\t\tinvokeOnSkip: createOnSkipHandler(onSkip),\n\t\t\t\theaders\n\t\t\t},\n\t\t\tprocessedInput\n\t\t};\n\t}\n\t/**\n\t* Resolve options into a normalized config object.\n\t* Convenience wrapper around createParseConfig that ensures processedInput is non-null.\n\t*/\n\tfunction resolveParseConfig(input, options) {\n\t\tconst result = createParseConfig({\n\t\t\tinput,\n\t\t\toptions\n\t\t});\n\t\treturn {\n\t\t\tconfig: result.config,\n\t\t\tprocessedInput: result.processedInput\n\t\t};\n\t}\n\t/**\n\t* Convert ParseConfig to ScannerConfig\n\t*/\n\tfunction toScannerConfig(config) {\n\t\treturn {\n\t\t\tdelimiter: config.delimiter,\n\t\t\tquote: config.quote,\n\t\t\tescape: config.escape,\n\t\t\tquoteEnabled: config.quoteEnabled,\n\t\t\trelaxQuotes: config.relaxQuotes\n\t\t};\n\t}\n\t/**\n\t* Create a trim function based on options\n\t*/\n\tfunction makeTrimField(trim, ltrim, rtrim) {\n\t\tif (trim || ltrim && rtrim) return (s) => s.trim();\n\t\tif (ltrim) return (s) => s.trimStart();\n\t\tif (rtrim) return (s) => s.trimEnd();\n\t\treturn (s) => s;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/state.ts\n\t/**\n\t* Create initial parse state with optional header configuration\n\t*/\n\tfunction createParseState(config) {\n\t\tconst state = {\n\t\t\tlineNumber: 0,\n\t\t\tdataRowCount: 0,\n\t\t\tskippedDataRows: 0,\n\t\t\ttruncated: false,\n\t\t\theaderRow: null,\n\t\t\toriginalHeaders: null,\n\t\t\tuseHeaders: false,\n\t\t\theaderRowProcessed: false,\n\t\t\trenamedHeadersForMeta: null,\n\t\t\tcurrentRowStartLine: config.infoOption ? 1 : 0,\n\t\t\tcurrentRowStartOffset: 0,\n\t\t\tcurrentRowQuoted: [],\n\t\t\tcurrentRawRow: \"\"\n\t\t};\n\t\tconst { headers, groupColumnsByName } = config;\n\t\tif (headers === true) state.useHeaders = true;\n\t\telse if (Array.isArray(headers)) {\n\t\t\tconst result = processHeaders([], {\n\t\t\t\theaders,\n\t\t\t\tgroupColumnsByName\n\t\t\t}, null);\n\t\t\tif (result) {\n\t\t\t\tstate.headerRow = result.headers;\n\t\t\t\tstate.originalHeaders = result.originalHeaders;\n\t\t\t\tstate.renamedHeadersForMeta = result.renamedHeaders;\n\t\t\t}\n\t\t\tstate.useHeaders = true;\n\t\t\tstate.headerRowProcessed = true;\n\t\t} else if (typeof headers === \"function\") state.useHeaders = true;\n\t\treturn state;\n\t}\n\t/**\n\t* Reset info state for next row\n\t*/\n\tfunction resetInfoState(state, trackInfo, trackRaw, nextLine, nextOffset) {\n\t\tif (trackInfo) {\n\t\t\tstate.currentRowQuoted = [];\n\t\t\tstate.currentRowStartLine = nextLine;\n\t\t\tstate.currentRowStartOffset = nextOffset;\n\t\t}\n\t\tif (trackRaw) state.currentRawRow = \"\";\n\t}\n\t/**\n\t* Pre-allocated frozen array of false values for fast mode quoted tracking.\n\t* In fast mode (no quote detection), all fields are unquoted, so we can\n\t* return a shared reference instead of allocating per row.\n\t*\n\t* IMPORTANT: This array is frozen and must NOT be modified.\n\t* Callers should copy if they need to store/modify the values.\n\t*/\n\tconst SHARED_FALSE_ARRAY_SIZE = 256;\n\tconst SHARED_FALSE_ARRAY = Object.freeze(new Array(SHARED_FALSE_ARRAY_SIZE).fill(false));\n\t/**\n\t* Get a shared array of false values for unquoted field tracking.\n\t* Returns a frozen shared reference for common cases to avoid per-row allocation.\n\t*\n\t* IMPORTANT: The returned array must NOT be modified. If you need to store\n\t* the values, make a copy: `[...getUnquotedArray(n)]` or `.slice(0, n)`.\n\t*\n\t* @param length - Number of fields in the row\n\t* @returns Shared frozen array (for length <= 256) or new array (for larger rows)\n\t*/\n\tfunction getUnquotedArray(length) {\n\t\tif (length <= SHARED_FALSE_ARRAY_SIZE) return SHARED_FALSE_ARRAY;\n\t\treturn new Array(length).fill(false);\n\t}\n\tArray.from({ length: 60 }, (_, i) => i < 10 ? `0${i}` : `${i}`);\n\tconst C_0 = 48;\n\tconst C_DASH = 45;\n\tconst C_SLASH = 47;\n\tconst C_COLON = 58;\n\tconst C_T = 84;\n\tconst C_SPACE = 32;\n\tconst C_Z = 90;\n\tconst C_PLUS = 43;\n\tconst C_DOT = 46;\n\tconst digit2 = (s, i) => (s.charCodeAt(i) - C_0) * 10 + s.charCodeAt(i + 1) - C_0 | 0;\n\tconst digit4 = (s, i) => (s.charCodeAt(i) - C_0) * 1e3 + (s.charCodeAt(i + 1) - C_0) * 100 + (s.charCodeAt(i + 2) - C_0) * 10 + s.charCodeAt(i + 3) - C_0 | 0;\n\tconst DAYS_IN_MONTH = [\n\t\t0,\n\t\t31,\n\t\t29,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t31,\n\t\t30,\n\t\t31,\n\t\t30,\n\t\t31\n\t];\n\tfunction validateDate(y, m, d) {\n\t\tif (m < 1 || m > 12 || d < 1 || d > DAYS_IN_MONTH[m]) return null;\n\t\tconst date = new Date(y, m - 1, d);\n\t\treturn date.getMonth() === m - 1 ? date : null;\n\t}\n\tfunction validateDateTime(y, m, d, h, min, s) {\n\t\tif (m < 1 || m > 12 || d < 1 || d > DAYS_IN_MONTH[m]) return null;\n\t\tif (h > 23 || min > 59 || s > 59) return null;\n\t\treturn new Date(y, m - 1, d, h, min, s);\n\t}\n\tfunction parseISO(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH) return null;\n\t\treturn validateDate(digit4(s, 0), digit2(s, 5), digit2(s, 8));\n\t}\n\tfunction parseISOT(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH || s.charCodeAt(10) !== C_T || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 0), digit2(s, 5), digit2(s, 8), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseISOSpace(s) {\n\t\tif (s.charCodeAt(4) !== C_DASH || s.charCodeAt(7) !== C_DASH || s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 0), digit2(s, 5), digit2(s, 8), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseISOZ(s) {\n\t\tif (s.charCodeAt(19) !== C_Z) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOMsZ(s) {\n\t\tif (s.charCodeAt(19) !== C_DOT || s.charCodeAt(23) !== C_Z) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOOffset(s) {\n\t\tconst c = s.charCodeAt(19);\n\t\tif (c !== C_PLUS && c !== C_DASH) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseISOMsOffset(s) {\n\t\tif (s.charCodeAt(19) !== C_DOT) return null;\n\t\tconst c = s.charCodeAt(23);\n\t\tif (c !== C_PLUS && c !== C_DASH) return null;\n\t\tconst d = new Date(s);\n\t\treturn isNaN(d.getTime()) ? null : d;\n\t}\n\tfunction parseUS(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\treturn validateDate(digit4(s, 6), digit2(s, 0), digit2(s, 3));\n\t}\n\tfunction parseEU(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\treturn validateDate(digit4(s, 6), digit2(s, 3), digit2(s, 0));\n\t}\n\tfunction parseUSTime(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\tif (s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 6), digit2(s, 0), digit2(s, 3), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tfunction parseEUTime(s) {\n\t\tconst sep = s.charCodeAt(2);\n\t\tif (sep !== C_DASH && sep !== C_SLASH || s.charCodeAt(5) !== sep) return null;\n\t\tif (s.charCodeAt(10) !== C_SPACE || s.charCodeAt(13) !== C_COLON || s.charCodeAt(16) !== C_COLON) return null;\n\t\treturn validateDateTime(digit4(s, 6), digit2(s, 3), digit2(s, 0), digit2(s, 11), digit2(s, 14), digit2(s, 17));\n\t}\n\tconst PARSERS = {\n\t\t\"YYYY-MM-DD\": parseISO,\n\t\t\"YYYY-MM-DD[T]HH:mm:ss\": parseISOT,\n\t\t\"YYYY-MM-DD HH:mm:ss\": parseISOSpace,\n\t\t\"YYYY-MM-DD[T]HH:mm:ssZ\": (s) => s.length === 20 ? parseISOZ(s) : s.length === 25 ? parseISOOffset(s) : null,\n\t\t\"YYYY-MM-DD[T]HH:mm:ss.SSSZ\": (s) => s.length === 24 ? parseISOMsZ(s) : s.length === 29 ? parseISOMsOffset(s) : null,\n\t\t\"MM-DD-YYYY\": parseUS,\n\t\t\"MM-DD-YYYY HH:mm:ss\": parseUSTime,\n\t\t\"MM/DD/YYYY HH:mm:ss\": parseUSTime,\n\t\t\"DD-MM-YYYY\": parseEU,\n\t\t\"DD-MM-YYYY HH:mm:ss\": parseEUTime,\n\t\t\"DD/MM/YYYY HH:mm:ss\": parseEUTime\n\t};\n\tconst AUTO_DETECT = [\n\t\t[10, [parseISO]],\n\t\t[19, [parseISOT, parseISOSpace]],\n\t\t[20, [parseISOZ]],\n\t\t[24, [parseISOMsZ]],\n\t\t[25, [parseISOOffset]],\n\t\t[29, [parseISOMsOffset]]\n\t];\n\t/**\n\t* Optimized date parser for batch processing\n\t*\n\t* @example\n\t* const parser = DateParser.create([\"YYYY-MM-DD\"]);\n\t* const dates = parser.parseAll(csvStrings);\n\t*/\n\tvar DateParser = class DateParser {\n\t\tconstructor(fns) {\n\t\t\tthis.parse = (value) => {\n\t\t\t\tif (!value) return null;\n\t\t\t\tconst s = value.trim();\n\t\t\t\tif (!s) return null;\n\t\t\t\tif (this.single) return this.fn0(s);\n\t\t\t\tfor (let i = 0, len = this.fns.length; i < len; i++) {\n\t\t\t\t\tconst r = this.fns[i](s);\n\t\t\t\t\tif (r) return r;\n\t\t\t\t}\n\t\t\t\treturn null;\n\t\t\t};\n\t\t\tthis.fns = fns;\n\t\t\tthis.single = fns.length === 1;\n\t\t\tthis.fn0 = fns[0];\n\t\t}\n\t\t/** Create parser for specific formats */\n\t\tstatic create(formats) {\n\t\t\treturn new DateParser(formats.map((f) => PARSERS[f]).filter(Boolean));\n\t\t}\n\t\t/** Create parser for auto-detecting ISO formats */\n\t\tstatic iso() {\n\t\t\tconst fns = [];\n\t\t\tfor (const [, parsers] of AUTO_DETECT) fns.push(...parsers);\n\t\t\treturn new DateParser(fns);\n\t\t}\n\t\t/** Parse array of values */\n\t\tparseAll(values) {\n\t\t\tconst len = values.length;\n\t\t\tconst out = new Array(len);\n\t\t\tconst parse = this.parse;\n\t\t\tfor (let i = 0; i < len; i++) out[i] = parse(values[i]);\n\t\t\treturn out;\n\t\t}\n\t\t/** Parse and filter valid dates */\n\t\tparseValid(values) {\n\t\t\tconst out = [];\n\t\t\tconst parse = this.parse;\n\t\t\tfor (let i = 0, len = values.length; i < len; i++) {\n\t\t\t\tconst d = parse(values[i]);\n\t\t\t\tif (d) out.push(d);\n\t\t\t}\n\t\t\treturn out;\n\t\t}\n\t};\n\t//#endregion\n\t//#region src/modules/csv/utils/dynamic-typing.ts\n\t/**\n\t* CSV Dynamic Typing - Automatic Type Conversion\n\t*\n\t* Functions for converting CSV string values to appropriate JavaScript types.\n\t* Supports boolean, number, null detection with customizable per-column config.\n\t*/\n\t/**\n\t* Pre-compiled regex for valid number format detection.\n\t* Matches integers, decimals, and scientific notation.\n\t* Pre-compiling avoids regex compilation overhead in the hot path.\n\t*/\n\tconst NUMERIC_REGEX = /^-?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][+-]?\\d+)?$/;\n\tlet isoDateParser = null;\n\t/**\n\t* Get or create the ISO date parser singleton\n\t*/\n\tfunction getIsoDateParser() {\n\t\tif (!isoDateParser) isoDateParser = DateParser.iso();\n\t\treturn isoDateParser;\n\t}\n\t/**\n\t* Try to parse a string as an ISO date.\n\t* Returns the Date if successful, or null if not a valid date.\n\t*\n\t* Supported formats:\n\t* - YYYY-MM-DD\n\t* - YYYY-MM-DDTHH:mm:ss\n\t* - YYYY-MM-DD HH:mm:ss\n\t* - YYYY-MM-DDTHH:mm:ssZ\n\t* - YYYY-MM-DDTHH:mm:ss.SSSZ\n\t* - YYYY-MM-DDTHH:mm:ss+HH:mm\n\t*/\n\tfunction tryParseDate(value) {\n\t\tif (!value || value.length < 10) return null;\n\t\treturn getIsoDateParser().parse(value);\n\t}\n\t/**\n\t* Check if castDate config enables date parsing for a column\n\t*/\n\tfunction shouldCastDate(castDate, columnName) {\n\t\tif (!castDate) return false;\n\t\tif (castDate === true) return true;\n\t\tif (Array.isArray(castDate) && typeof columnName === \"string\") return castDate.includes(columnName);\n\t\treturn false;\n\t}\n\t/**\n\t* Check if a charCode matches a lowercase letter (case-insensitive).\n\t* @param code - The charCode to check\n\t* @param lowercaseCode - The lowercase letter's charCode to match against\n\t* @returns true if code matches (case-insensitive)\n\t*/\n\tfunction isCharEqualIgnoreCase(code, lowercaseCode) {\n\t\treturn code === lowercaseCode || code === lowercaseCode - 32;\n\t}\n\t/**\n\t* Convert a string value to its appropriate JavaScript type.\n\t* Used internally by dynamicTyping feature.\n\t*\n\t* Conversion rules:\n\t* - Empty string \u2192 \"\" (unchanged)\n\t* - \"true\"/\"TRUE\"/\"True\" \u2192 true\n\t* - \"false\"/\"FALSE\"/\"False\" \u2192 false\n\t* - \"null\"/\"NULL\" \u2192 null\n\t* - Numeric strings \u2192 number (int or float)\n\t* - Everything else \u2192 original string\n\t*\n\t* Special cases:\n\t* - Leading zeros (e.g., \"007\") \u2192 preserved as string (for zip codes, IDs)\n\t* - \"Infinity\", \"-Infinity\", \"NaN\" \u2192 corresponding number values\n\t*/\n\tfunction convertValue(value) {\n\t\tconst len = value.length;\n\t\tif (len === 0) return \"\";\n\t\tconst firstChar = value.charCodeAt(0);\n\t\tif (len === 4) {\n\t\t\tif ((firstChar === 116 || firstChar === 84) && isCharEqualIgnoreCase(value.charCodeAt(1), 114) && isCharEqualIgnoreCase(value.charCodeAt(2), 117) && isCharEqualIgnoreCase(value.charCodeAt(3), 101)) return true;\n\t\t\tif ((firstChar === 110 || firstChar === 78) && isCharEqualIgnoreCase(value.charCodeAt(1), 117) && isCharEqualIgnoreCase(value.charCodeAt(2), 108) && isCharEqualIgnoreCase(value.charCodeAt(3), 108)) return null;\n\t\t} else if (len === 5 && (firstChar === 102 || firstChar === 70) && isCharEqualIgnoreCase(value.charCodeAt(1), 97) && isCharEqualIgnoreCase(value.charCodeAt(2), 108) && isCharEqualIgnoreCase(value.charCodeAt(3), 115) && isCharEqualIgnoreCase(value.charCodeAt(4), 101)) return false;\n\t\tif (firstChar >= 48 && firstChar <= 57 || firstChar === 45 || firstChar === 46 || firstChar === 73 || firstChar === 78) {\n\t\t\tif (value.charCodeAt(len - 1) <= 32) return value;\n\t\t\tif (value === \"Infinity\") return Infinity;\n\t\t\tif (value === \"-Infinity\") return -Infinity;\n\t\t\tif (value === \"NaN\") return NaN;\n\t\t\tif (firstChar === 48 && len > 1) {\n\t\t\t\tconst secondChar = value.charCodeAt(1);\n\t\t\t\tif (secondChar >= 48 && secondChar <= 57) return value;\n\t\t\t}\n\t\t\tif (firstChar === 45 && len > 2 && value.charCodeAt(1) === 48) {\n\t\t\t\tconst thirdChar = value.charCodeAt(2);\n\t\t\t\tif (thirdChar >= 48 && thirdChar <= 57) return value;\n\t\t\t}\n\t\t\tif (NUMERIC_REGEX.test(value)) {\n\t\t\t\tconst num = Number(value);\n\t\t\t\tif (!isNaN(num)) return num;\n\t\t\t}\n\t\t}\n\t\treturn value;\n\t}\n\t/**\n\t* Type guard to check if dynamicTyping config has custom converter function\n\t*/\n\tfunction isCustomConverter(config) {\n\t\treturn typeof config === \"function\";\n\t}\n\t/**\n\t* Apply dynamic typing to a single field value\n\t*\n\t* @param value - The string value to convert\n\t* @param columnConfig - Column-specific config (true, false, or custom function)\n\t* @returns Converted value\n\t*/\n\tfunction applyDynamicTyping(value, columnConfig) {\n\t\tif (columnConfig === false) return value;\n\t\tif (isCustomConverter(columnConfig)) return columnConfig(value);\n\t\treturn convertValue(value);\n\t}\n\t/**\n\t* Apply dynamic typing and/or date casting to a single value.\n\t* Unified helper used by both object and array row processing.\n\t*\n\t* @param value - The string value to convert\n\t* @param columnName - Column identifier (string for objects, can be used for per-column config)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration\n\t* @returns Converted value\n\t*/\n\tfunction convertSingleValue(value, columnName, dynamicTyping, castDate) {\n\t\tif (shouldCastDate(castDate, columnName)) {\n\t\t\tconst dateValue = tryParseDate(value);\n\t\t\tif (dateValue !== null) return dateValue;\n\t\t}\n\t\tif (dynamicTyping === true) return convertValue(value);\n\t\tif (dynamicTyping === false) return value;\n\t\tif (columnName === void 0) return value;\n\t\tconst config = dynamicTyping[columnName];\n\t\tif (config === void 0) return value;\n\t\treturn applyDynamicTyping(value, config);\n\t}\n\t/**\n\t* Apply dynamic typing to an entire row (object form).\n\t*\n\t* Performance: Converts values IN PLACE to avoid allocating a new object.\n\t* The input object is mutated and returned with converted values.\n\t*\n\t* @param row - Row object with string values (will be mutated)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration for date parsing\n\t* @returns The same row object with converted values\n\t*/\n\tfunction applyDynamicTypingToRow(row, dynamicTyping, castDate) {\n\t\tif (dynamicTyping === false && !castDate) return row;\n\t\tfor (const key in row) if (Object.hasOwn(row, key)) row[key] = convertSingleValue(row[key], key, dynamicTyping, castDate);\n\t\treturn row;\n\t}\n\t/**\n\t* Apply dynamic typing to an array row\n\t*\n\t* @param row - Row array with string values\n\t* @param headers - Header names (for per-column config lookup)\n\t* @param dynamicTyping - DynamicTyping configuration\n\t* @param castDate - CastDate configuration for date parsing\n\t* @returns New row array with converted values\n\t*/\n\tfunction applyDynamicTypingToArrayRow(row, headers, dynamicTyping, castDate) {\n\t\tif (dynamicTyping === false && !castDate) return row;\n\t\tif (dynamicTyping !== true && dynamicTyping !== false && !headers) return row;\n\t\treturn row.map((value, index) => {\n\t\t\tconst columnName = headers?.[index];\n\t\t\treturn convertSingleValue(value, columnName, dynamicTyping, castDate);\n\t\t});\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/row-processor.ts\n\t/**\n\t* Process headers from a row (first data row or configured headers)\n\t* Returns true if the row should be skipped (was used as headers)\n\t*/\n\tfunction processHeaderRow(row, state, config) {\n\t\tconst result = processHeaders(row, {\n\t\t\theaders: config.headers,\n\t\t\tgroupColumnsByName: config.groupColumnsByName\n\t\t}, state.headerRow);\n\t\tif (result) {\n\t\t\tstate.headerRow = result.headers;\n\t\t\tstate.originalHeaders = result.originalHeaders;\n\t\t\tstate.renamedHeadersForMeta = result.renamedHeaders;\n\t\t\tstate.headerRowProcessed = true;\n\t\t\treturn result.skipCurrentRow;\n\t\t}\n\t\tstate.headerRowProcessed = true;\n\t\treturn false;\n\t}\n\t/**\n\t* Validate row column count against headers\n\t* Returns error info if validation fails, null otherwise\n\t*/\n\tfunction validateRowColumns(row, state, config) {\n\t\tif (!state.headerRow || state.headerRow.length === 0) return null;\n\t\tconst expectedCols = state.headerRow.length;\n\t\tconst actualCols = row.length;\n\t\tif (actualCols === expectedCols) return null;\n\t\tconst validation = validateAndAdjustColumns(row, expectedCols, {\n\t\t\tcolumnLess: config.columnLess,\n\t\t\tcolumnMore: config.columnMore\n\t\t});\n\t\tif (validation.errorCode) return {\n\t\t\terrorCode: validation.errorCode,\n\t\t\tmessage: validation.errorCode === \"TooManyFields\" ? `Too many fields: expected ${expectedCols}, found ${actualCols}` : `Too few fields: expected ${expectedCols}, found ${actualCols}`,\n\t\t\tisValid: validation.isValid,\n\t\t\treason: validation.reason,\n\t\t\textras: validation.extras\n\t\t};\n\t\treturn null;\n\t}\n\t/**\n\t* Build record info for a completed row\n\t*/\n\tfunction buildRecordInfo(state, dataRowIndex, includeRaw, fieldCount) {\n\t\tconst info = {\n\t\t\tindex: dataRowIndex,\n\t\t\tline: state.currentRowStartLine,\n\t\t\toffset: state.currentRowStartOffset,\n\t\t\tquoted: state.currentRowQuoted.slice(0, fieldCount)\n\t\t};\n\t\tif (includeRaw) info.raw = state.currentRawRow;\n\t\treturn info;\n\t}\n\t/**\n\t* Convert a raw row to an object record with optional dynamic typing\n\t*/\n\tfunction rowToRecord(row, state, config) {\n\t\tif (state.headerRow) {\n\t\t\tlet record = convertRowToObject(row, state.headerRow, state.originalHeaders, config.groupColumnsByName);\n\t\t\tif (config.dynamicTyping || config.castDate) record = applyDynamicTypingToRow(record, config.dynamicTyping || false, config.castDate);\n\t\t\treturn record;\n\t\t}\n\t\tconst result = {};\n\t\tfor (let i = 0; i < row.length; i++) result[i] = row[i];\n\t\treturn result;\n\t}\n\t/**\n\t* Process a completed row through headers, validation, etc.\n\t* This is the core row processing logic shared between sync and streaming parsers.\n\t*/\n\tfunction processCompletedRow(row, state, config, errors, lineNumber) {\n\t\tif (state.useHeaders && !state.headerRowProcessed) {\n\t\t\tif (processHeaderRow(row, state, config)) return {\n\t\t\t\tstop: false,\n\t\t\t\tskipped: true\n\t\t\t};\n\t\t}\n\t\tif (state.skippedDataRows < config.skipRows) {\n\t\t\tstate.skippedDataRows++;\n\t\t\treturn {\n\t\t\t\tstop: false,\n\t\t\t\tskipped: true\n\t\t\t};\n\t\t}\n\t\tconst validationError = validateRowColumns(row, state, config);\n\t\tlet extras;\n\t\tif (validationError) {\n\t\t\tconst errorObj = {\n\t\t\t\tcode: validationError.errorCode,\n\t\t\t\tmessage: validationError.message,\n\t\t\t\tline: lineNumber\n\t\t\t};\n\t\t\terrors.push(errorObj);\n\t\t\tif (!validationError.isValid) {\n\t\t\t\tif (config.skipRecordsWithError) {\n\t\t\t\t\tconfig.invokeOnSkip?.({\n\t\t\t\t\t\tcode: validationError.errorCode,\n\t\t\t\t\t\tmessage: validationError.reason || \"Column mismatch\",\n\t\t\t\t\t\tline: lineNumber\n\t\t\t\t\t}, row);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tstop: false,\n\t\t\t\t\t\tskipped: true,\n\t\t\t\t\t\trow,\n\t\t\t\t\t\terror: {\n\t\t\t\t\t\t\tcode: validationError.errorCode,\n\t\t\t\t\t\t\tmessage: validationError.reason || \"Column mismatch\",\n\t\t\t\t\t\t\tline: lineNumber\n\t\t\t\t\t\t},\n\t\t\t\t\t\treason: validationError.reason || \"Column mismatch\"\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\tstop: false,\n\t\t\t\t\tskipped: true,\n\t\t\t\t\trow,\n\t\t\t\t\terror: errorObj,\n\t\t\t\t\treason: validationError.reason || \"Column mismatch\"\n\t\t\t\t};\n\t\t\t}\n\t\t\textras = validationError.extras;\n\t\t}\n\t\tif (config.skipRecordsWithEmptyValues && hasAllEmptyValues(row)) return {\n\t\t\tstop: false,\n\t\t\tskipped: true\n\t\t};\n\t\tif (config.maxRows !== void 0 && state.dataRowCount >= config.maxRows) {\n\t\t\tstate.truncated = true;\n\t\t\treturn {\n\t\t\t\tstop: true,\n\t\t\t\tskipped: false\n\t\t\t};\n\t\t}\n\t\tstate.dataRowCount++;\n\t\tlet info;\n\t\tif (config.infoOption) info = buildRecordInfo(state, state.dataRowCount - 1, config.rawOption, row.length);\n\t\treturn {\n\t\t\tstop: false,\n\t\t\tskipped: false,\n\t\t\trow,\n\t\t\tinfo,\n\t\t\textras\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/scanner/scanner.ts\n\t/**\n\t* Find the next newline position and determine its type.\n\t*\n\t* @returns [position, length] where length is 1 for \\n/\\r, 2 for \\r\\n, or [-1, 0] if not found\n\t*/\n\tfunction findNewline(input, start) {\n\t\tconst len = input.length;\n\t\tconst lfPos = input.indexOf(\"\\n\", start);\n\t\tconst crPos = input.indexOf(\"\\r\", start);\n\t\tif (lfPos === -1 && crPos === -1) return [-1, 0];\n\t\tif (crPos === -1 || lfPos !== -1 && lfPos < crPos) return [lfPos, 1];\n\t\tif (crPos + 1 < len) return input[crPos + 1] === \"\\n\" ? [crPos, 2] : [crPos, 1];\n\t\treturn [crPos, -1];\n\t}\n\t/**\n\t* Check if position is at a delimiter (supports multi-character delimiters).\n\t*/\n\tfunction isAtDelimiter(input, pos, delimiter) {\n\t\tif (delimiter.length === 1) return input[pos] === delimiter;\n\t\treturn input.startsWith(delimiter, pos);\n\t}\n\t/**\n\t* Find the next delimiter position (supports multi-character delimiters).\n\t*/\n\tfunction findDelimiter(input, start, delimiter) {\n\t\treturn input.indexOf(delimiter, start);\n\t}\n\t/**\n\t* Scan a quoted field starting at the opening quote.\n\t*\n\t* Handles:\n\t* - Escaped quotes (RFC 4180: \"\" -> \")\n\t* - Backslash escapes when escape !== quote\n\t* - CRLF normalization inside quoted fields (CRLF -> LF)\n\t* - relaxQuotes mode (allow unescaped quotes mid-field)\n\t*\n\t* Performance optimization: Uses array to collect segments instead of\n\t* string concatenation to avoid O(n\u00B2) string building in fields with\n\t* many escaped quotes or embedded newlines.\n\t*\n\t* @param input - Input string\n\t* @param start - Position of opening quote\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @returns Field scan result\n\t*/\n\tfunction scanQuotedField(input, start, config, isEof) {\n\t\tconst { quote, escape, delimiter, relaxQuotes } = config;\n\t\tconst len = input.length;\n\t\tlet pos = start + 1;\n\t\tlet segments = null;\n\t\tlet segmentStart = pos;\n\t\tconst buildValue = (endPos) => {\n\t\t\tconst lastSegment = endPos > segmentStart ? input.slice(segmentStart, endPos) : \"\";\n\t\t\tif (segments === null) return lastSegment;\n\t\t\tif (lastSegment) segments.push(lastSegment);\n\t\t\treturn segments.length === 1 ? segments[0] : segments.join(\"\");\n\t\t};\n\t\twhile (pos < len) {\n\t\t\tconst char = input[pos];\n\t\t\tif (escape && char === escape) {\n\t\t\t\tif (pos + 1 < len && input[pos + 1] === quote) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\tpos += 2;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (escape !== quote && pos + 1 < len && input[pos + 1] === escape) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(escape);\n\t\t\t\t\tpos += 2;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (escape === quote) {\n\t\t\t\t\tif (pos + 1 >= len) {\n\t\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\t\tresumePos: start\n\t\t\t\t\t\t};\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tconst nextChar = input[pos + 1];\n\t\t\t\t\tif ((delimiter.length === 1 ? nextChar === delimiter : isAtDelimiter(input, pos + 1, delimiter)) || nextChar === \"\\n\" || nextChar === \"\\r\") return {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t\tif (relaxQuotes) {\n\t\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\t\tpos++;\n\t\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (char === quote && escape !== quote) {\n\t\t\t\tif (pos + 1 >= len) {\n\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\tresumePos: start\n\t\t\t\t\t};\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\t\tquoted: true,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tneedMore: false\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t\tconst nextChar = input[pos + 1];\n\t\t\t\tif ((delimiter.length === 1 ? nextChar === delimiter : isAtDelimiter(input, pos + 1, delimiter)) || nextChar === \"\\n\" || nextChar === \"\\r\") return {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tneedMore: false\n\t\t\t\t};\n\t\t\t\tif (relaxQuotes) {\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(quote);\n\t\t\t\t\tpos++;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tneedMore: false\n\t\t\t\t};\n\t\t\t}\n\t\t\tif (char === \"\\r\") {\n\t\t\t\tif (pos + 1 < len) {\n\t\t\t\t\tif (input[pos + 1] === \"\\n\") {\n\t\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\t\t\tpos += 2;\n\t\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\t\tpos++;\n\t\t\t\t\tsegmentStart = pos;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (!isEof) return {\n\t\t\t\t\tvalue: buildValue(pos),\n\t\t\t\t\tquoted: true,\n\t\t\t\t\tendPos: pos,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: start\n\t\t\t\t};\n\t\t\t\tif (pos > segmentStart) (segments ??= []).push(input.slice(segmentStart, pos));\n\t\t\t\t(segments ??= []).push(\"\\n\");\n\t\t\t\tpos++;\n\t\t\t\tsegmentStart = pos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tpos++;\n\t\t}\n\t\tif (!isEof) return {\n\t\t\tvalue: buildValue(pos),\n\t\t\tquoted: true,\n\t\t\tendPos: pos,\n\t\t\tneedMore: true,\n\t\t\tresumePos: start\n\t\t};\n\t\treturn {\n\t\t\tvalue: buildValue(pos),\n\t\t\tquoted: true,\n\t\t\tendPos: pos,\n\t\t\tneedMore: false,\n\t\t\tunterminated: true\n\t\t};\n\t}\n\t/**\n\t* Scan an unquoted field using indexOf for batch searching.\n\t*\n\t* This is the performance-critical path for most CSV files.\n\t* Uses indexOf to find the next delimiter or newline in O(n) time\n\t* with optimized native string search.\n\t*\n\t* @param input - Input string\n\t* @param start - Starting position\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @returns Field scan result\n\t*/\n\tfunction scanUnquotedField(input, start, config, isEof) {\n\t\tconst { delimiter } = config;\n\t\tconst len = input.length;\n\t\tconst delimPos = findDelimiter(input, start, delimiter);\n\t\tconst [newlinePos, newlineLen] = findNewline(input, start);\n\t\tlet endPos;\n\t\tlet atNewline = false;\n\t\tif (delimPos === -1 && newlinePos === -1) {\n\t\t\tif (!isEof) return {\n\t\t\t\tvalue: input.slice(start),\n\t\t\t\tquoted: false,\n\t\t\t\tendPos: len,\n\t\t\t\tneedMore: true,\n\t\t\t\tresumePos: start\n\t\t\t};\n\t\t\treturn {\n\t\t\t\tvalue: input.slice(start),\n\t\t\t\tquoted: false,\n\t\t\t\tendPos: len,\n\t\t\t\tneedMore: false\n\t\t\t};\n\t\t}\n\t\tif (delimPos === -1) {\n\t\t\tendPos = newlinePos;\n\t\t\tatNewline = true;\n\t\t} else if (newlinePos === -1) endPos = delimPos;\n\t\telse if (delimPos < newlinePos) endPos = delimPos;\n\t\telse {\n\t\t\tendPos = newlinePos;\n\t\t\tatNewline = true;\n\t\t}\n\t\tif (atNewline && newlineLen === -1 && !isEof) return {\n\t\t\tvalue: input.slice(start, endPos),\n\t\t\tquoted: false,\n\t\t\tendPos,\n\t\t\tneedMore: true,\n\t\t\tresumePos: start\n\t\t};\n\t\treturn {\n\t\t\tvalue: input.slice(start, endPos),\n\t\t\tquoted: false,\n\t\t\tendPos,\n\t\t\tneedMore: false\n\t\t};\n\t}\n\t/**\n\t* Scan a complete row from the input string.\n\t*\n\t* @param input - Input string\n\t* @param start - Starting position\n\t* @param config - Scanner configuration\n\t* @param isEof - Whether this is the end of input\n\t* @param outFields - Optional reusable array for fields (will be cleared)\n\t* @param outQuoted - Optional reusable array for quoted flags (will be cleared)\n\t* @returns Row scan result with rawStart/rawEnd for zero-copy raw row extraction\n\t*/\n\tfunction scanRow(input, start, config, isEof, outFields, outQuoted) {\n\t\tconst { delimiter, quote, quoteEnabled } = config;\n\t\tconst delimLen = delimiter.length;\n\t\tconst len = input.length;\n\t\tconst fields = outFields ?? [];\n\t\tconst quoted = outQuoted ?? [];\n\t\tif (outFields) outFields.length = 0;\n\t\tif (outQuoted) outQuoted.length = 0;\n\t\tlet pos = start;\n\t\tlet hasUnterminatedQuote = false;\n\t\tconst rawStart = start;\n\t\twhile (pos < len) {\n\t\t\tconst char = input[pos];\n\t\t\tif (quoteEnabled && char === quote) {\n\t\t\t\tconst result = scanQuotedField(input, pos, config, isEof);\n\t\t\t\tif (result.needMore) return {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: pos,\n\t\t\t\t\tcomplete: false,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: result.resumePos ?? start,\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: pos\n\t\t\t\t};\n\t\t\t\tif (result.unterminated) hasUnterminatedQuote = true;\n\t\t\t\tfields.push(result.value);\n\t\t\t\tquoted.push(true);\n\t\t\t\tpos = result.endPos;\n\t\t\t\tif (pos < len) {\n\t\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\t\tpos += delimLen;\n\t\t\t\t\t\tif (pos >= len && isEof) {\n\t\t\t\t\t\t\tfields.push(\"\");\n\t\t\t\t\t\t\tquoted.push(false);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tconst nextChar = input[pos];\n\t\t\t\t\tif (nextChar === \"\\n\") return {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\n\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t\tif (nextChar === \"\\r\") {\n\t\t\t\t\t\tif (pos + 1 < len) {\n\t\t\t\t\t\t\tif (input[pos + 1] === \"\\n\") return {\n\t\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\t\tendPos: pos + 2,\n\t\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\t\tnewline: \"\\r\\n\",\n\t\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (!isEof) return {\n\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\tendPos: pos,\n\t\t\t\t\t\t\tcomplete: false,\n\t\t\t\t\t\t\tneedMore: true,\n\t\t\t\t\t\t\tresumePos: start,\n\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t};\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\tfields,\n\t\t\t\t\t\t\tquoted,\n\t\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\t\trawStart,\n\t\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t\tpos++;\n\t\t\t\t\twhile (pos < len) {\n\t\t\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\t\t\tpos += delimLen;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (input[pos] === \"\\n\" || input[pos] === \"\\r\") break;\n\t\t\t\t\t\tpos++;\n\t\t\t\t\t}\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst result = scanUnquotedField(input, pos, config, isEof);\n\t\t\tif (result.needMore) {\n\t\t\t\tfields.push(result.value);\n\t\t\t\tquoted.push(false);\n\t\t\t\treturn {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: result.endPos,\n\t\t\t\t\tcomplete: false,\n\t\t\t\t\tneedMore: true,\n\t\t\t\t\tresumePos: result.resumePos ?? start,\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: result.endPos\n\t\t\t\t};\n\t\t\t}\n\t\t\tfields.push(result.value);\n\t\t\tquoted.push(false);\n\t\t\tpos = result.endPos;\n\t\t\tif (pos < len) {\n\t\t\t\tif (isAtDelimiter(input, pos, delimiter)) {\n\t\t\t\t\tpos += delimLen;\n\t\t\t\t\tif (pos >= len && isEof) {\n\t\t\t\t\t\tfields.push(\"\");\n\t\t\t\t\t\tquoted.push(false);\n\t\t\t\t\t}\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst char = input[pos];\n\t\t\t\tif (char === \"\\n\") return {\n\t\t\t\t\tfields,\n\t\t\t\t\tquoted,\n\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\tcomplete: true,\n\t\t\t\t\tneedMore: false,\n\t\t\t\t\tnewline: \"\\n\",\n\t\t\t\t\trawStart,\n\t\t\t\t\trawEnd: pos\n\t\t\t\t};\n\t\t\t\tif (char === \"\\r\") {\n\t\t\t\t\tif (pos + 1 < len && input[pos + 1] === \"\\n\") return {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 2,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\r\\n\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t\treturn {\n\t\t\t\t\t\tfields,\n\t\t\t\t\t\tquoted,\n\t\t\t\t\t\tendPos: pos + 1,\n\t\t\t\t\t\tcomplete: true,\n\t\t\t\t\t\tneedMore: false,\n\t\t\t\t\t\tnewline: \"\\r\",\n\t\t\t\t\t\trawStart,\n\t\t\t\t\t\trawEnd: pos\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (isEof) {\n\t\t\tif (fields.length > 0 || pos > start) return {\n\t\t\t\tfields,\n\t\t\t\tquoted,\n\t\t\t\tendPos: pos,\n\t\t\t\tcomplete: true,\n\t\t\t\tneedMore: false,\n\t\t\t\tunterminatedQuote: hasUnterminatedQuote || void 0,\n\t\t\t\trawStart,\n\t\t\t\trawEnd: pos\n\t\t\t};\n\t\t}\n\t\treturn {\n\t\t\tfields,\n\t\t\tquoted,\n\t\t\tendPos: pos,\n\t\t\tcomplete: false,\n\t\t\tneedMore: !isEof,\n\t\t\tresumePos: start,\n\t\t\tunterminatedQuote: hasUnterminatedQuote || void 0,\n\t\t\trawStart,\n\t\t\trawEnd: pos\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/lines.ts\n\t/**\n\t* Cache for global-flag versions of RegExp objects.\n\t* Avoids re-creating `new RegExp(..., 'g')` on every call to splitLinesWithEndings.\n\t*/\n\tconst globalRegexCache = /* @__PURE__ */ new WeakMap();\n\tfunction getCachedGlobalRegex(re) {\n\t\tlet cached = globalRegexCache.get(re);\n\t\tif (!cached) {\n\t\t\tcached = new RegExp(re.source, `${re.flags.replace(/g/g, \"\")}g`);\n\t\t\tglobalRegexCache.set(re, cached);\n\t\t}\n\t\treturn cached;\n\t}\n\t/**\n\t* Split input into lines using the given linebreak regex and yield per-line\n\t* metadata including the actual line ending length.\n\t*\n\t* Notes:\n\t* - Works with mixed line endings.\n\t* - Skips trailing split artifacts (empty string produced by split when the input ends with a newline).\n\t*/\n\tfunction* splitLinesWithEndings(input, linebreakRegex) {\n\t\tif (input === \"\") return;\n\t\tif (typeof linebreakRegex === \"string\") {\n\t\t\tconst sep = linebreakRegex;\n\t\t\tif (sep === \"\") {\n\t\t\t\tyield {\n\t\t\t\t\tline: input,\n\t\t\t\t\tlineEndingLength: 0,\n\t\t\t\t\tlineLengthWithEnding: input.length\n\t\t\t\t};\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tlet pos = 0;\n\t\t\twhile (true) {\n\t\t\t\tconst idx = input.indexOf(sep, pos);\n\t\t\t\tif (idx === -1) {\n\t\t\t\t\tif (pos === input.length) return;\n\t\t\t\t\tconst line = input.slice(pos);\n\t\t\t\t\tyield {\n\t\t\t\t\t\tline,\n\t\t\t\t\t\tlineEndingLength: 0,\n\t\t\t\t\t\tlineLengthWithEnding: line.length\n\t\t\t\t\t};\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tconst line = input.slice(pos, idx);\n\t\t\t\tconst lineEndingLength = sep.length;\n\t\t\t\tyield {\n\t\t\t\t\tline,\n\t\t\t\t\tlineEndingLength,\n\t\t\t\t\tlineLengthWithEnding: line.length + lineEndingLength\n\t\t\t\t};\n\t\t\t\tpos = idx + sep.length;\n\t\t\t}\n\t\t}\n\t\tconst re = linebreakRegex.global ? linebreakRegex : getCachedGlobalRegex(linebreakRegex);\n\t\tlet pos = 0;\n\t\tre.lastIndex = 0;\n\t\twhile (true) {\n\t\t\tconst match = re.exec(input);\n\t\t\tif (!match) break;\n\t\t\tconst start = match.index;\n\t\t\tconst end = start + match[0].length;\n\t\t\tconst line = input.slice(pos, start);\n\t\t\tconst lineEndingLength = match[0].length;\n\t\t\tyield {\n\t\t\t\tline,\n\t\t\t\tlineEndingLength,\n\t\t\t\tlineLengthWithEnding: line.length + lineEndingLength\n\t\t\t};\n\t\t\tpos = end;\n\t\t\tif (match[0].length === 0) re.lastIndex++;\n\t\t}\n\t\tif (pos === input.length) return;\n\t\tconst tail = input.slice(pos);\n\t\tyield {\n\t\t\tline: tail,\n\t\t\tlineEndingLength: 0,\n\t\t\tlineLengthWithEnding: tail.length\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/parse/sync.ts\n\t/**\n\t* Normalize validate result to { isValid, reason } form\n\t*/\n\tfunction normalizeValidateResult(result) {\n\t\tif (typeof result === \"boolean\") return {\n\t\t\tisValid: result,\n\t\t\treason: \"Validation failed\"\n\t\t};\n\t\treturn {\n\t\t\tisValid: result.isValid,\n\t\t\treason: result.reason || \"Validation failed\"\n\t\t};\n\t}\n\t/**\n\t* Apply dynamic typing to an array row (wrapper to reduce code duplication)\n\t*/\n\tfunction applyArrayTyping(row, dynamicTyping, castDate) {\n\t\treturn applyDynamicTypingToArrayRow(row, null, dynamicTyping || false, castDate);\n\t}\n\t/**\n\t* Return array only if non-empty, otherwise undefined\n\t*/\n\tfunction optionalArray(arr) {\n\t\treturn arr.length > 0 ? arr : void 0;\n\t}\n\t/**\n\t* Build CsvParseMeta from config and state (avoids duplication between array and object mode)\n\t*/\n\tfunction buildMeta(config, state) {\n\t\treturn {\n\t\t\tdelimiter: config.delimiter,\n\t\t\tlinebreak: config.linebreak,\n\t\t\taborted: false,\n\t\t\ttruncated: state.truncated,\n\t\t\tcursor: state.dataRowCount,\n\t\t\tfields: state.headerRow ? filterValidHeaders(state.headerRow) : void 0,\n\t\t\trenamedHeaders: state.renamedHeadersForMeta\n\t\t};\n\t}\n\t/**\n\t* Apply trim function to all fields in a row.\n\t* Uses cached trimFieldIsIdentity from config to avoid per-row checking.\n\t*/\n\tfunction trimFields(fields, config) {\n\t\tif (config.trimFieldIsIdentity) return fields;\n\t\treturn fields.map(config.trimField);\n\t}\n\t/**\n\t* Parse input using fast mode (no quote detection)\n\t*/\n\tfunction* parseFastMode(input, config, state, errors) {\n\t\tif (input === \"\") return;\n\t\tlet currentCharOffset = 0;\n\t\tfor (const { line, lineLengthWithEnding: lineCharLength } of splitLinesWithEndings(input, config.linebreakRegex)) {\n\t\t\tstate.lineNumber++;\n\t\t\tif (config.toLine !== void 0 && state.lineNumber > config.toLine) {\n\t\t\t\tstate.truncated = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif (state.lineNumber <= config.skipLines) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (line === \"\" && config.shouldSkipEmpty) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.maxRowBytes !== void 0) {\n\t\t\t\tif (getUtf8ByteLength(line) > config.maxRowBytes) throw new Error(`Row exceeds the maximum size of ${config.maxRowBytes} bytes`);\n\t\t\t}\n\t\t\tif (config.infoOption) {\n\t\t\t\tstate.currentRowStartLine = state.lineNumber;\n\t\t\t\tstate.currentRowStartOffset = currentCharOffset;\n\t\t\t}\n\t\t\tif (config.rawOption) state.currentRawRow = line;\n\t\t\tconst trimmedRow = trimFields(line.split(config.delimiter), config);\n\t\t\tif (config.infoOption) state.currentRowQuoted = getUnquotedArray(trimmedRow.length);\n\t\t\tif (config.comment && trimmedRow[0]?.trimStart().startsWith(config.comment)) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.shouldSkipEmpty && isEmptyRow(trimmedRow, config.shouldSkipEmpty)) {\n\t\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst result = processCompletedRow(trimmedRow, state, config, errors, state.lineNumber);\n\t\t\tcurrentCharOffset += lineCharLength;\n\t\t\tif (result.stop) {\n\t\t\t\tyield result;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif (!result.skipped || result.error) yield result;\n\t\t\tresetInfoState(state, config.infoOption, config.rawOption, state.lineNumber + 1, currentCharOffset);\n\t\t}\n\t}\n\t/**\n\t* Parse input using Scanner-based batch scanning.\n\t* This is a high-performance alternative that uses indexOf-based field scanning\n\t* instead of character-by-character parsing.\n\t*\n\t* Key optimizations:\n\t* 1. Uses indexOf to find delimiters/quotes/newlines in bulk\n\t* 2. Uses slice for field extraction (avoids string concatenation)\n\t* 3. Processes entire rows at once instead of character-by-character\n\t*/\n\tfunction* parseWithScanner(input, config, state, errors) {\n\t\tconst scannerConfig = toScannerConfig(config);\n\t\tconst len = input.length;\n\t\tlet pos = 0;\n\t\tif (config.infoOption) state.currentRowStartOffset = 0;\n\t\twhile (pos < len) {\n\t\t\tconst scanResult = scanRow(input, pos, scannerConfig, true);\n\t\t\tif (scanResult.fields.length === 0 && scanResult.endPos === pos) break;\n\t\t\tconst row = trimFields(scanResult.fields, config);\n\t\t\tconst rowStartLine = state.lineNumber + 1;\n\t\t\t{\n\t\t\t\tconst rawStart = scanResult.rawStart;\n\t\t\t\tconst rawEnd = scanResult.rawEnd;\n\t\t\t\tlet newlines = 1;\n\t\t\t\tfor (let i = rawStart; i < rawEnd; i++) {\n\t\t\t\t\tconst ch = input.charCodeAt(i);\n\t\t\t\t\tif (ch === 10) newlines++;\n\t\t\t\t\telse if (ch === 13) {\n\t\t\t\t\t\tif (i + 1 < rawEnd && input.charCodeAt(i + 1) === 10) i++;\n\t\t\t\t\t\tnewlines++;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstate.lineNumber += newlines;\n\t\t\t}\n\t\t\tif (config.toLine !== void 0 && state.lineNumber > config.toLine) {\n\t\t\t\tstate.truncated = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tconst rawEndPos = scanResult.rawEnd;\n\t\t\tif (state.lineNumber <= config.skipLines) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.maxRowBytes !== void 0) {\n\t\t\t\tif (getUtf8ByteLength(input.slice(scanResult.rawStart, rawEndPos)) > config.maxRowBytes) throw new Error(`Row exceeds the maximum size of ${config.maxRowBytes} bytes`);\n\t\t\t}\n\t\t\tif (config.comment && row[0]?.trimStart().startsWith(config.comment)) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.shouldSkipEmpty && isEmptyRow(row, config.shouldSkipEmpty)) {\n\t\t\t\tpos = scanResult.endPos;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tif (config.infoOption) {\n\t\t\t\tstate.currentRowStartLine = rowStartLine;\n\t\t\t\tstate.currentRowStartOffset = scanResult.rawStart;\n\t\t\t\tstate.currentRowQuoted = scanResult.quoted;\n\t\t\t}\n\t\t\tif (config.rawOption) state.currentRawRow = input.slice(scanResult.rawStart, rawEndPos);\n\t\t\tif (scanResult.unterminatedQuote) errors.push({\n\t\t\t\tcode: \"MissingQuotes\",\n\t\t\t\tmessage: \"Quoted field unterminated\",\n\t\t\t\tline: state.lineNumber\n\t\t\t});\n\t\t\tconst result = processCompletedRow(row, state, config, errors, state.lineNumber);\n\t\t\tif (result.stop) {\n\t\t\t\tyield result;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tif (!result.skipped || result.error) yield result;\n\t\t\tpos = scanResult.endPos;\n\t\t\tif (config.infoOption) state.currentRowStartOffset = scanResult.endPos;\n\t\t}\n\t}\n\t/**\n\t* Parse CSV string synchronously.\n\t*\n\t* @example\n\t* ```ts\n\t* // Simple array output (no headers)\n\t* const rows = parseCsv(\"a,b,c\\n1,2,3\");\n\t* // rows: string[][] = [[\"a\",\"b\",\"c\"], [\"1\",\"2\",\"3\"]]\n\t*\n\t* // Object output with headers\n\t* const result = parseCsv(\"name,age\\nAlice,30\", { headers: true });\n\t* // result.rows: Record<string, unknown>[] = [{ name: \"Alice\", age: \"30\" }]\n\t*\n\t* // With info option\n\t* const result = parseCsv(\"a,b\\n1,2\", { info: true });\n\t* // result.rows: RecordWithInfo<string[]>[] = [{ record: [\"a\",\"b\"], info: {...} }, ...]\n\t* ```\n\t*/\n\tfunction parseCsv(input, options = {}) {\n\t\tconst { config, processedInput } = resolveParseConfig(input, options);\n\t\tconst state = createParseState(config);\n\t\tconst errors = [];\n\t\tconst invalidRows = [];\n\t\tconst parser = config.fastMode ? parseFastMode(processedInput, config, state, errors) : parseWithScanner(processedInput, config, state, errors);\n\t\tif (!state.useHeaders) {\n\t\t\tconst processedRows = [];\n\t\t\tfor (const result of parser) {\n\t\t\t\tif (result.row && !result.skipped) {\n\t\t\t\t\tlet row = result.row;\n\t\t\t\t\tif (options.rowTransform) {\n\t\t\t\t\t\tconst transformed = options.rowTransform(row);\n\t\t\t\t\t\tif (transformed === null || transformed === void 0) continue;\n\t\t\t\t\t\trow = transformed;\n\t\t\t\t\t}\n\t\t\t\t\tif (options.validate) {\n\t\t\t\t\t\tconst { isValid, reason } = normalizeValidateResult(options.validate(row));\n\t\t\t\t\t\tif (!isValid) {\n\t\t\t\t\t\t\tinvalidRows.push({\n\t\t\t\t\t\t\t\trow,\n\t\t\t\t\t\t\t\treason\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif (config.dynamicTyping || config.castDate) row = applyArrayTyping(row, config.dynamicTyping, config.castDate);\n\t\t\t\t\tif (config.infoOption && result.info) processedRows.push({\n\t\t\t\t\t\trecord: row,\n\t\t\t\t\t\tinfo: result.info\n\t\t\t\t\t});\n\t\t\t\t\telse processedRows.push(row);\n\t\t\t\t} else if (result.row && result.skipped && result.error) invalidRows.push({\n\t\t\t\t\trow: result.row,\n\t\t\t\t\treason: result.reason || result.error.message\n\t\t\t\t});\n\t\t\t\tif (result.stop) break;\n\t\t\t}\n\t\t\tconst meta = buildMeta(config, state);\n\t\t\tif (config.infoOption) return {\n\t\t\t\theaders: void 0,\n\t\t\t\trows: processedRows,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t\tif (options.validate) return {\n\t\t\t\theaders: void 0,\n\t\t\t\trows: processedRows,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t\treturn processedRows;\n\t\t}\n\t\tconst objectRows = [];\n\t\tfor (const result of parser) {\n\t\t\tif (result.row && !result.skipped) {\n\t\t\t\tlet record = rowToRecord(result.row, state, config);\n\t\t\t\tif (result.extras && result.extras.length > 0) record._extra = result.extras;\n\t\t\t\tif (options.rowTransform) {\n\t\t\t\t\tconst transformed = options.rowTransform(record);\n\t\t\t\t\tif (transformed === null || transformed === void 0) continue;\n\t\t\t\t\trecord = transformed;\n\t\t\t\t}\n\t\t\t\tif (options.validate) {\n\t\t\t\t\tconst { isValid, reason } = normalizeValidateResult(options.validate(record));\n\t\t\t\t\tif (!isValid) {\n\t\t\t\t\t\tinvalidRows.push({\n\t\t\t\t\t\t\trow: result.row,\n\t\t\t\t\t\t\treason\n\t\t\t\t\t\t});\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (config.infoOption && result.info) objectRows.push({\n\t\t\t\t\trecord,\n\t\t\t\t\tinfo: result.info\n\t\t\t\t});\n\t\t\t\telse objectRows.push(record);\n\t\t\t} else if (result.row && result.skipped && result.error) invalidRows.push({\n\t\t\t\trow: result.row,\n\t\t\t\treason: result.reason || result.error.message\n\t\t\t});\n\t\t\tif (result.stop) break;\n\t\t}\n\t\tconst meta = buildMeta(config, state);\n\t\tconst { objname } = options;\n\t\tif (objname && state.headerRow) {\n\t\t\tconst objResult = {};\n\t\t\tfor (const item of objectRows) {\n\t\t\t\tconst key = (config.infoOption ? item.record : item)[objname];\n\t\t\t\tconst keyStr = key === void 0 || key === null ? \"\" : String(key);\n\t\t\t\tobjResult[keyStr] = item;\n\t\t\t}\n\t\t\treturn {\n\t\t\t\theaders: meta.fields,\n\t\t\t\trows: objResult,\n\t\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\t\terrors: optionalArray(errors),\n\t\t\t\tmeta\n\t\t\t};\n\t\t}\n\t\treturn {\n\t\t\theaders: meta.fields,\n\t\t\trows: objectRows,\n\t\t\tinvalidRows: optionalArray(invalidRows),\n\t\t\terrors: optionalArray(errors),\n\t\t\tmeta\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/formatted-value.ts\n\t/**\n\t* Formatted value wrapper for controlling field-level quoting in CSV output.\n\t*\n\t* This module provides helper functions to override the global quoting behavior\n\t* for individual field values during CSV formatting.\n\t*\n\t* @module\n\t*/\n\t/**\n\t* Symbol to identify FormattedValue instances.\n\t* Using Symbol.for ensures reliable detection across module boundaries.\n\t*/\n\tconst FORMATTED_VALUE_SYMBOL = Symbol.for(\"csv.FormattedValue\");\n\t/**\n\t* Check if a value is a FormattedValue instance.\n\t*/\n\tfunction isFormattedValue(value) {\n\t\treturn value !== null && typeof value === \"object\" && value[FORMATTED_VALUE_SYMBOL] === true;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/config.ts\n\t/**\n\t* Escape a string for use inside a regex character class [...].\n\t* Handles all special characters: \\ ] ^ -\n\t*/\n\tfunction escapeForCharClass(str) {\n\t\treturn str.replace(/[\\\\\\]^-]/g, \"\\\\$&\");\n\t}\n\t/**\n\t* Create pre-compiled regex patterns for CSV formatting\n\t*/\n\tfunction createFormatRegex(options) {\n\t\tconst { quote: quoteOption, delimiter, escape: escapeOption } = options;\n\t\tconst { enabled: quoteEnabled, char: quote } = normalizeQuoteOption(quoteOption);\n\t\tconst escapeNormalized = normalizeEscapeOption(escapeOption, quote);\n\t\tif (!quoteEnabled) return {\n\t\t\tneedsQuoteRegex: null,\n\t\t\tescapeQuoteRegex: null,\n\t\t\tescapedQuote: \"\",\n\t\t\tquoteEnabled: false,\n\t\t\tquote: \"\",\n\t\t\tescape: \"\",\n\t\t\tdelimiter,\n\t\t\tuseFastCheck: false\n\t\t};\n\t\tconst escape = escapeNormalized.char || quote;\n\t\tconst useFastCheck = delimiter.length === 1 && quote.length === 1 && escape.length === 1;\n\t\treturn {\n\t\t\tneedsQuoteRegex: useFastCheck ? null : (() => {\n\t\t\t\tconst classContent = `${escapeForCharClass(delimiter)}${escapeForCharClass(quote)}${escape !== quote ? escapeForCharClass(escape) : \"\"}\\r\\n`;\n\t\t\t\treturn new RegExp(`[${classContent}]`);\n\t\t\t})(),\n\t\t\tescapeQuoteRegex: escape !== quote ? new RegExp(`${escapeRegex(quote)}|${escapeRegex(escape)}`, \"g\") : new RegExp(escapeRegex(quote), \"g\"),\n\t\t\tescapedQuote: escape + quote,\n\t\t\tquoteEnabled: true,\n\t\t\tquote,\n\t\t\tescape,\n\t\t\tdelimiter,\n\t\t\tuseFastCheck\n\t\t};\n\t}\n\tfunction createQuoteLookup(quoteConfig) {\n\t\tif (quoteConfig === true) return () => true;\n\t\tif (quoteConfig === false || quoteConfig === void 0) return () => false;\n\t\tif (Array.isArray(quoteConfig)) return (index) => quoteConfig[index] === true;\n\t\treturn (_index, header) => header ? quoteConfig[header] === true : false;\n\t}\n\t/**\n\t* Create complete format configuration from options\n\t*/\n\tfunction createFormatConfig(options) {\n\t\tconst { delimiter = \",\", lineEnding = \"\\n\", quote: quoteOption = \"\\\"\", escape: escapeOption, quoteColumns = false, quoteHeaders = false, writeHeaders: writeHeadersOption, bom = false, trailingNewline = false, escapeFormulae = false, decimalSeparator = \".\", typeTransform } = options;\n\t\tif (decimalSeparator !== \".\" && decimalSeparator !== \",\") throw new CsvError(`Invalid decimalSeparator: \"${decimalSeparator}\". Must be \".\" or \",\".`);\n\t\tif (decimalSeparator === delimiter) throw new CsvError(\"decimalSeparator cannot be the same as delimiter\");\n\t\tconst regex = createFormatRegex({\n\t\t\tquote: quoteOption,\n\t\t\tdelimiter,\n\t\t\tescape: escapeOption\n\t\t});\n\t\treturn {\n\t\t\tdelimiter,\n\t\t\tlineEnding,\n\t\t\tquoteAll: quoteColumns === true,\n\t\t\tescapeFormulae,\n\t\t\tdecimalSeparator,\n\t\t\twriteHeaders: writeHeadersOption ?? true,\n\t\t\tbom,\n\t\t\ttrailingNewline,\n\t\t\ttypeTransform,\n\t\t\tregex,\n\t\t\tshouldQuoteColumn: createQuoteLookup(quoteColumns),\n\t\t\tshouldQuoteHeader: createQuoteLookup(quoteHeaders)\n\t\t};\n\t}\n\t//#endregion\n\t//#region src/modules/csv/utils/number.ts\n\t/**\n\t* Format a number for CSV output with the specified decimal separator.\n\t*\n\t* @param value - The number to format\n\t* @param decimalSeparator - The decimal separator to use\n\t* @returns Formatted string representation\n\t*\n\t* @example\n\t* formatNumberForCsv(3.14, \".\") // \"3.14\"\n\t* formatNumberForCsv(3.14, \",\") // \"3,14\"\n\t*/\n\tfunction formatNumberForCsv(value, decimalSeparator) {\n\t\tif (decimalSeparator !== \",\") return String(value);\n\t\treturn String(value).replace(\".\", \",\");\n\t}\n\t//#endregion\n\t//#region src/modules/csv/format/formatter.ts\n\t/**\n\t* Apply type-based transform to a single value.\n\t* Returns the transformed result, or undefined if no transform applies.\n\t*/\n\tfunction applyTypeTransform(value, transform, ctx) {\n\t\tif (value === null || value === void 0) return;\n\t\tconst type = typeof value;\n\t\tif (type === \"boolean\" && transform.boolean) return transform.boolean(value, ctx);\n\t\tif (value instanceof Date && transform.date) return transform.date(value, ctx);\n\t\tif (type === \"number\" && transform.number) return transform.number(value, ctx);\n\t\tif (type === \"bigint\" && transform.bigint) return transform.bigint(value, ctx);\n\t\tif (type === \"string\" && transform.string) return transform.string(value, ctx);\n\t\tif (type === \"object\" && !Array.isArray(value) && !(value instanceof Date)) {\n\t\t\tif (transform.object) return transform.object(value, ctx);\n\t\t}\n\t}\n\t/**\n\t* Default type conversion to string.\n\t*/\n\tfunction defaultToString(value, decimalSeparator) {\n\t\tif (value === null || value === void 0) return \"\";\n\t\tif (typeof value === \"number\") return formatNumberForCsv(value, decimalSeparator);\n\t\tif (value instanceof Date) return value.toISOString();\n\t\tif (typeof value === \"bigint\") return String(value);\n\t\tif (typeof value === \"boolean\") return value ? \"true\" : \"false\";\n\t\tif (typeof value === \"object\") try {\n\t\t\treturn JSON.stringify(value);\n\t\t} catch {\n\t\t\treturn \"[object Object]\";\n\t\t}\n\t\treturn String(value);\n\t}\n\t/**\n\t* Fast check if a string needs quoting (for single-char delimiter/quote/escape)\n\t* Uses indexOf for slightly better V8 optimization\n\t*/\n\tfunction needsQuoteFast(str, delimiter, quote, escape) {\n\t\treturn str.indexOf(delimiter) !== -1 || str.indexOf(quote) !== -1 || escape !== quote && str.indexOf(escape) !== -1 || str.indexOf(\"\\n\") !== -1 || str.indexOf(\"\\r\") !== -1;\n\t}\n\t/**\n\t* Format a single field value to CSV string\n\t*/\n\tfunction formatField(value, regex, ctx) {\n\t\tconst { index, header, isHeader, outputRowIndex, forceQuote, quoteAll, escapeFormulae, decimalSeparator, transform } = ctx;\n\t\tlet str;\n\t\tlet transformQuoteHint;\n\t\tif (!isHeader && transform) {\n\t\t\tconst transformed = applyTypeTransform(value, transform, {\n\t\t\t\tcolumn: header ?? index,\n\t\t\t\tindex: outputRowIndex\n\t\t\t});\n\t\t\tif (transformed === void 0 || transformed === null) str = defaultToString(value, decimalSeparator);\n\t\t\telse if (isFormattedValue(transformed)) {\n\t\t\t\tstr = transformed.value;\n\t\t\t\ttransformQuoteHint = transformed.quote;\n\t\t\t} else str = transformed;\n\t\t} else str = defaultToString(value, decimalSeparator);\n\t\tif (escapeFormulae && transformQuoteHint !== false && startsWithFormulaChar(str)) str = \"'\" + str;\n\t\tif (!regex.quoteEnabled) return str;\n\t\tlet needsQuote;\n\t\tif (transformQuoteHint !== void 0) needsQuote = transformQuoteHint;\n\t\telse needsQuote = quoteAll || forceQuote || (regex.useFastCheck ? needsQuoteFast(str, regex.delimiter, regex.quote, regex.escape) : regex.needsQuoteRegex.test(str));\n\t\tif (needsQuote) {\n\t\t\tlet escaped;\n\t\t\tif (regex.escape !== regex.quote) escaped = str.replace(regex.escapeQuoteRegex, (ch) => ch === regex.quote ? regex.escape + regex.quote : regex.escape + regex.escape);\n\t\t\telse escaped = str.replace(regex.escapeQuoteRegex, regex.escapedQuote);\n\t\t\treturn regex.quote + escaped + regex.quote;\n\t\t}\n\t\treturn str;\n\t}\n\t/**\n\t* Format an entire row to CSV string.\n\t*\n\t* Performance optimizations:\n\t* - Uses for loop with direct string building instead of map().join()\n\t* - Reuses a single mutable context object instead of creating one per field\n\t*/\n\tfunction formatRowWithLookup(row, regex, options) {\n\t\tconst { quoteLookup, delimiter, headers, isHeader, outputRowIndex, quoteAll, escapeFormulae, decimalSeparator, transform } = options;\n\t\tconst len = row.length;\n\t\tif (len === 0) return \"\";\n\t\tconst ctx = {\n\t\t\tindex: 0,\n\t\t\theader: headers?.[0],\n\t\t\tisHeader,\n\t\t\toutputRowIndex,\n\t\t\tforceQuote: quoteLookup(0, headers?.[0]),\n\t\t\tquoteAll,\n\t\t\tescapeFormulae,\n\t\t\tdecimalSeparator,\n\t\t\ttransform\n\t\t};\n\t\tlet result = formatField(row[0], regex, ctx);\n\t\tfor (let i = 1; i < len; i++) {\n\t\t\tctx.index = i;\n\t\t\tctx.header = headers?.[i];\n\t\t\tctx.forceQuote = quoteLookup(i, ctx.header);\n\t\t\tresult += delimiter + formatField(row[i], regex, ctx);\n\t\t}\n\t\treturn result;\n\t}\n\t/**\n\t* Apply row transform if configured. Returns null to skip the row.\n\t*/\n\tfunction applyRowTransform(cfg, row, index) {\n\t\tif (!cfg.typeTransform?.row) return row;\n\t\tconst t = cfg.typeTransform.row(row, index);\n\t\treturn t === null ? null : t;\n\t}\n\t/**\n\t* Normalize all input types to a unified format.\n\t* Handles: objects, arrays, RowHashArray, and columns config.\n\t*/\n\tfunction normalizeInput(data, options, cfg) {\n\t\tconst { headers, columns } = options;\n\t\tif (data.length === 0) {\n\t\t\tif (columns && columns.length > 0) return {\n\t\t\t\tkeys: null,\n\t\t\t\tdisplayHeaders: columns.map((c) => typeof c === \"string\" ? c : c.header ?? c.key),\n\t\t\t\trows: []\n\t\t\t};\n\t\t\tif (Array.isArray(headers)) return {\n\t\t\t\tkeys: headers,\n\t\t\t\tdisplayHeaders: headers,\n\t\t\t\trows: []\n\t\t\t};\n\t\t\treturn {\n\t\t\t\tkeys: null,\n\t\t\t\tdisplayHeaders: null,\n\t\t\t\trows: []\n\t\t\t};\n\t\t}\n\t\tconst firstRow = data[0];\n\t\tif (columns && columns.length > 0) {\n\t\t\tconst processed = processColumns(columns);\n\t\t\tconst keys = processed.keys;\n\t\t\tconst displayHeaders = processed.headers;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < data.length; i++) {\n\t\t\t\tconst row = applyRowTransform(cfg, data[i], i);\n\t\t\t\tif (row === null) continue;\n\t\t\t\tlet values;\n\t\t\t\tif (isRowHashArray(row)) values = rowHashArrayMapByHeaders(row, keys);\n\t\t\t\telse if (Array.isArray(row)) values = row;\n\t\t\t\telse values = keys.map((k) => row[k]);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tif (isRowHashArray(firstRow)) {\n\t\t\tconst hashArrays = data;\n\t\t\tconst keys = headers === true ? rowHashArrayToHeaders(hashArrays[0]) : Array.isArray(headers) ? headers : null;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < hashArrays.length; i++) {\n\t\t\t\tconst row = applyRowTransform(cfg, hashArrays[i], i);\n\t\t\t\tif (row === null) continue;\n\t\t\t\tlet values;\n\t\t\t\tif (isRowHashArray(row)) values = keys ? rowHashArrayMapByHeaders(row, keys) : rowHashArrayToValues(row);\n\t\t\t\telse if (Array.isArray(row)) values = row;\n\t\t\t\telse values = keys ? keys.map((k) => row[k]) : Object.values(row);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders: keys,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tif (!Array.isArray(firstRow) && typeof firstRow === \"object\") {\n\t\t\tconst objects = data;\n\t\t\tconst keys = headers === true ? Object.keys(objects[0]) : Array.isArray(headers) ? headers : null;\n\t\t\tconst rows = [];\n\t\t\tfor (let i = 0; i < objects.length; i++) {\n\t\t\t\tconst obj = applyRowTransform(cfg, objects[i], i);\n\t\t\t\tif (obj === null) continue;\n\t\t\t\tconst values = keys ? keys.map((k) => obj[k]) : Object.values(obj);\n\t\t\t\trows.push(values);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tkeys,\n\t\t\t\tdisplayHeaders: keys,\n\t\t\t\trows\n\t\t\t};\n\t\t}\n\t\tconst arrays = data;\n\t\tconst keys = Array.isArray(headers) ? headers : null;\n\t\tconst rows = [];\n\t\tfor (let i = 0; i < arrays.length; i++) {\n\t\t\tconst row = applyRowTransform(cfg, arrays[i], i);\n\t\t\tif (row === null) continue;\n\t\t\trows.push(row);\n\t\t}\n\t\treturn {\n\t\t\tkeys,\n\t\t\tdisplayHeaders: keys,\n\t\t\trows\n\t\t};\n\t}\n\t/**\n\t* Format data as CSV string.\n\t*\n\t* Performance optimization: Builds result string directly without\n\t* intermediate arrays from map().join() operations.\n\t*\n\t* @example\n\t* ```ts\n\t* // Array of arrays\n\t* formatCsv([[\"a\", \"b\"], [\"1\", \"2\"]])\n\t* // \"a,b\\n1,2\"\n\t*\n\t* // Array of objects\n\t* formatCsv([{ name: \"Alice\", age: 30 }])\n\t* // \"name,age\\nAlice,30\"\n\t*\n\t* // With options\n\t* formatCsv(data, {\n\t* delimiter: \";\",\n\t* quoteColumns: { name: true },\n\t* escapeFormulae: true,\n\t* bom: true\n\t* })\n\t* ```\n\t*/\n\tfunction formatCsv(data, options = {}) {\n\t\tconst cfg = createFormatConfig(options);\n\t\tconst { displayHeaders, rows } = normalizeInput(data, options, cfg);\n\t\tconst lines = [];\n\t\tconst effectiveHeaders = displayHeaders ? deduplicateHeaders(displayHeaders) : void 0;\n\t\tif (effectiveHeaders && cfg.writeHeaders) lines.push(formatRowWithLookup(effectiveHeaders, cfg.regex, {\n\t\t\tquoteLookup: cfg.shouldQuoteHeader,\n\t\t\tdelimiter: cfg.delimiter,\n\t\t\theaders: effectiveHeaders,\n\t\t\tisHeader: true,\n\t\t\toutputRowIndex: 0,\n\t\t\tquoteAll: cfg.quoteAll,\n\t\t\tescapeFormulae: cfg.escapeFormulae,\n\t\t\tdecimalSeparator: cfg.decimalSeparator,\n\t\t\ttransform: void 0\n\t\t}));\n\t\tfor (let rowIdx = 0; rowIdx < rows.length; rowIdx++) lines.push(formatRowWithLookup(rows[rowIdx], cfg.regex, {\n\t\t\tquoteLookup: cfg.shouldQuoteColumn,\n\t\t\tdelimiter: cfg.delimiter,\n\t\t\theaders: effectiveHeaders,\n\t\t\tisHeader: false,\n\t\t\toutputRowIndex: rowIdx,\n\t\t\tquoteAll: cfg.quoteAll,\n\t\t\tescapeFormulae: cfg.escapeFormulae,\n\t\t\tdecimalSeparator: cfg.decimalSeparator,\n\t\t\ttransform: cfg.typeTransform\n\t\t}));\n\t\tlet result = cfg.bom ? \"\uFEFF\" : \"\";\n\t\tresult += lines.join(cfg.lineEnding);\n\t\tif (lines.length > 0 && cfg.trailingNewline) result += cfg.lineEnding;\n\t\treturn result;\n\t}\n\t//#endregion\n\t//#region src/modules/csv/worker/worker.entry.ts\n\t/**\n\t* CSV Worker Entry\n\t*\n\t* This file runs inside a Web Worker (classic script after bundling).\n\t* It implements the worker message protocol used by CsvWorkerPool.\n\t*\n\t* IMPORTANT:\n\t* - Keep this file browser/worker-safe (no Node.js APIs)\n\t* - Parsing/formatting are delegated to the main CSV implementations to avoid drift.\n\t*/\n\tconst sessions = /* @__PURE__ */ new Map();\n\tfunction getSession(sessionId) {\n\t\tconst session = sessions.get(sessionId);\n\t\tif (!session) throw new Error(`Session not found: ${sessionId}`);\n\t\treturn session;\n\t}\n\tfunction reply(taskId, start, data) {\n\t\tself.postMessage({\n\t\t\ttype: \"result\",\n\t\t\ttaskId,\n\t\t\tdata,\n\t\t\tduration: performance.now() - start\n\t\t});\n\t}\n\tfunction replyError(taskId, start, error) {\n\t\tconst message = error instanceof Error ? error.message : String(error);\n\t\tself.postMessage({\n\t\t\ttype: \"error\",\n\t\t\ttaskId,\n\t\t\terror: message,\n\t\t\tduration: performance.now() - start\n\t\t});\n\t}\n\tfunction toObjectRows(data, headers) {\n\t\tif (!Array.isArray(data)) return {\n\t\t\trows: [],\n\t\t\theaders: headers ?? []\n\t\t};\n\t\tif (data.length === 0) return {\n\t\t\trows: [],\n\t\t\theaders: headers ?? []\n\t\t};\n\t\tconst first = data[0];\n\t\tif (first && typeof first === \"object\" && !Array.isArray(first)) return {\n\t\t\trows: data,\n\t\t\theaders: headers ?? Object.keys(first)\n\t\t};\n\t\tconst arrayRows = data;\n\t\tlet resolvedHeaders;\n\t\tlet rows;\n\t\tif (headers && headers.length > 0) {\n\t\t\tresolvedHeaders = headers;\n\t\t\trows = arrayRows;\n\t\t} else {\n\t\t\tresolvedHeaders = (arrayRows[0] ?? []).map((v) => String(v));\n\t\t\trows = arrayRows.slice(1);\n\t\t}\n\t\treturn {\n\t\t\trows: rows.map((row) => {\n\t\t\t\tconst obj = Object.create(null);\n\t\t\t\tfor (let i = 0; i < resolvedHeaders.length; i++) {\n\t\t\t\t\tconst key = resolvedHeaders[i];\n\t\t\t\t\tif (key !== \"__proto__\") obj[key] = row[i];\n\t\t\t\t}\n\t\t\t\treturn obj;\n\t\t\t}),\n\t\t\theaders: resolvedHeaders\n\t\t};\n\t}\n\tfunction sortData(data, configs) {\n\t\tconst list = Array.isArray(configs) ? configs : [configs];\n\t\tdata.sort((a, b) => {\n\t\t\tfor (const config of list) {\n\t\t\t\tconst { column, order = \"asc\", comparator = \"auto\" } = config;\n\t\t\t\tconst aVal = a[column];\n\t\t\t\tconst bVal = b[column];\n\t\t\t\tlet result;\n\t\t\t\tif (comparator === \"number\" || comparator === \"auto\" && !Number.isNaN(Number(aVal)) && !Number.isNaN(Number(bVal))) result = Number(aVal ?? 0) - Number(bVal ?? 0);\n\t\t\t\telse if (comparator === \"date\") result = new Date(aVal ?? 0).getTime() - new Date(bVal ?? 0).getTime();\n\t\t\t\telse result = String(aVal ?? \"\").localeCompare(String(bVal ?? \"\"));\n\t\t\t\tif (result !== 0) return order === \"desc\" ? -result : result;\n\t\t\t}\n\t\t\treturn 0;\n\t\t});\n\t}\n\tfunction evaluateCondition(row, condition, compiledRegex) {\n\t\tconst { column, operator, value, ignoreCase = false } = condition;\n\t\tlet fieldValue = row?.[column];\n\t\tlet compareValue = value;\n\t\tif (ignoreCase && typeof fieldValue === \"string\" && operator !== \"regex\") {\n\t\t\tfieldValue = fieldValue.toLowerCase();\n\t\t\tif (typeof compareValue === \"string\") compareValue = compareValue.toLowerCase();\n\t\t\telse if (Array.isArray(compareValue)) compareValue = compareValue.map((v) => typeof v === \"string\" ? v.toLowerCase() : v);\n\t\t}\n\t\tswitch (operator) {\n\t\t\tcase \"eq\": return fieldValue === compareValue;\n\t\t\tcase \"neq\": return fieldValue !== compareValue;\n\t\t\tcase \"gt\": return Number(fieldValue) > Number(compareValue);\n\t\t\tcase \"gte\": return Number(fieldValue) >= Number(compareValue);\n\t\t\tcase \"lt\": return Number(fieldValue) < Number(compareValue);\n\t\t\tcase \"lte\": return Number(fieldValue) <= Number(compareValue);\n\t\t\tcase \"contains\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.includes(cv);\n\t\t\t}\n\t\t\tcase \"startsWith\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.startsWith(cv);\n\t\t\t}\n\t\t\tcase \"endsWith\": {\n\t\t\t\tconst fv = ignoreCase ? String(fieldValue).toLowerCase() : String(fieldValue);\n\t\t\t\tconst cv = ignoreCase ? String(compareValue).toLowerCase() : String(compareValue);\n\t\t\t\treturn fv.endsWith(cv);\n\t\t\t}\n\t\t\tcase \"regex\": return (compiledRegex ?? new RegExp(compareValue, ignoreCase ? \"i\" : \"\")).test(String(fieldValue));\n\t\t\tcase \"in\": return Array.isArray(compareValue) && compareValue.includes(fieldValue);\n\t\t\tcase \"notIn\": return !Array.isArray(compareValue) || !compareValue.includes(fieldValue);\n\t\t\tcase \"isNull\": return fieldValue === null || fieldValue === void 0 || fieldValue === \"\";\n\t\t\tcase \"notNull\": return fieldValue !== null && fieldValue !== void 0 && fieldValue !== \"\";\n\t\t\tdefault: return true;\n\t\t}\n\t}\n\tfunction filterData(data, config) {\n\t\tconst { conditions, logic = \"and\" } = config;\n\t\tconst compiledRegexMap = /* @__PURE__ */ new Map();\n\t\tfor (const cond of conditions) if (cond.operator === \"regex\") compiledRegexMap.set(cond, new RegExp(cond.value, cond.ignoreCase ? \"i\" : \"\"));\n\t\tconst evaluate = logic === \"and\" ? (row) => conditions.every((cond) => evaluateCondition(row, cond, compiledRegexMap.get(cond))) : (row) => conditions.some((cond) => evaluateCondition(row, cond, compiledRegexMap.get(cond)));\n\t\treturn data.filter(evaluate);\n\t}\n\tfunction searchData(data, config) {\n\t\tconst { query, columns, ignoreCase = true } = config;\n\t\tconst searchQuery = ignoreCase ? query.toLowerCase() : query;\n\t\tconst resolvedColumns = columns ?? Object.keys(data[0] ?? {});\n\t\treturn data.filter((row) => {\n\t\t\treturn resolvedColumns.some((col) => {\n\t\t\t\tlet value = String(row[col] ?? \"\");\n\t\t\t\tif (ignoreCase) value = value.toLowerCase();\n\t\t\t\treturn value.includes(searchQuery);\n\t\t\t});\n\t\t});\n\t}\n\tfunction computeAggregate(rows, column, fn) {\n\t\tif (fn === \"count\") return rows.length;\n\t\tif (fn === \"first\") return rows.length > 0 ? rows[0]?.[column] : null;\n\t\tif (fn === \"last\") return rows.length > 0 ? rows[rows.length - 1]?.[column] : null;\n\t\tconst nums = rows.map((r) => Number(r?.[column])).filter((n) => !Number.isNaN(n));\n\t\tif (nums.length === 0) return fn === \"avg\" ? 0 : null;\n\t\tif (fn === \"sum\" || fn === \"avg\") {\n\t\t\tconst sum = nums.reduce((a, b) => a + b, 0);\n\t\t\treturn fn === \"avg\" ? sum / nums.length : sum;\n\t\t}\n\t\tif (fn === \"min\") return nums.reduce((a, b) => a < b ? a : b, nums[0]);\n\t\tif (fn === \"max\") return nums.reduce((a, b) => a > b ? a : b, nums[0]);\n\t\treturn null;\n\t}\n\tfunction groupByData(data, config) {\n\t\tconst { columns, aggregates } = config;\n\t\tconst groups = /* @__PURE__ */ new Map();\n\t\tfor (const row of data) {\n\t\t\tconst keyValues = columns.map((col) => row[col]);\n\t\t\tconst key = keyValues.join(\"\\0\");\n\t\t\tconst existing = groups.get(key);\n\t\t\tif (existing) existing.rows.push(row);\n\t\t\telse groups.set(key, {\n\t\t\t\tkeyValues,\n\t\t\t\trows: [row]\n\t\t\t});\n\t\t}\n\t\tconst result = [];\n\t\tfor (const group of groups.values()) {\n\t\t\tconst obj = Object.create(null);\n\t\t\tcolumns.forEach((col, idx) => {\n\t\t\t\tconst k = String(col);\n\t\t\t\tif (k !== \"__proto__\") obj[k] = group.keyValues[idx];\n\t\t\t});\n\t\t\tfor (const { column, fn, alias } of aggregates) {\n\t\t\t\tconst key = alias || `${column}_${fn}`;\n\t\t\t\tif (key !== \"__proto__\") obj[key] = computeAggregate(group.rows, column, fn);\n\t\t\t}\n\t\t\tresult.push(obj);\n\t\t}\n\t\treturn result;\n\t}\n\tfunction aggregateData(data, configs) {\n\t\tconst result = Object.create(null);\n\t\tfor (const config of configs) {\n\t\t\tconst { column, fn, alias } = config;\n\t\t\tconst key = alias || `${column}_${fn}`;\n\t\t\tif (key !== \"__proto__\") result[key] = computeAggregate(data, column, fn);\n\t\t}\n\t\treturn result;\n\t}\n\tfunction getPageData(data, config) {\n\t\tconst page = Math.max(1, config.page);\n\t\tlet { pageSize } = config;\n\t\tif (pageSize <= 0) pageSize = data.length || 1;\n\t\tconst start = (page - 1) * pageSize;\n\t\treturn {\n\t\t\tdata: data.slice(start, start + pageSize),\n\t\t\tpage,\n\t\t\tpageSize,\n\t\t\ttotalRows: data.length,\n\t\t\ttotalPages: Math.ceil(data.length / pageSize)\n\t\t};\n\t}\n\tfunction executeQuery(session, config) {\n\t\tlet data = config.sort ? [...session.originalData] : session.originalData;\n\t\tconst result = { data: [] };\n\t\tif (config.sort) sortData(data, config.sort);\n\t\tif (config.filter) {\n\t\t\tdata = filterData(data, config.filter);\n\t\t\tresult.matchCount = data.length;\n\t\t}\n\t\tif (config.search) {\n\t\t\tdata = searchData(data, config.search);\n\t\t\tresult.matchCount = data.length;\n\t\t}\n\t\tif (config.groupBy) {\n\t\t\tdata = groupByData(data, config.groupBy);\n\t\t\tresult.groupCount = data.length;\n\t\t} else if (config.aggregate) result.aggregates = aggregateData(data, config.aggregate);\n\t\tif (config.page) {\n\t\t\tconst pageResult = getPageData(data, config.page);\n\t\t\tresult.data = pageResult.data;\n\t\t\tresult.page = pageResult.page;\n\t\t\tresult.pageSize = pageResult.pageSize;\n\t\t\tresult.totalRows = pageResult.totalRows;\n\t\t\tresult.totalPages = pageResult.totalPages;\n\t\t} else result.data = data;\n\t\treturn result;\n\t}\n\tself.addEventListener(\"message\", (event) => {\n\t\tif (event.origin !== \"\" && event.origin !== self.location?.origin) return;\n\t\tconst msg = event.data;\n\t\tif (!msg || typeof msg.type !== \"string\") return;\n\t\tconst taskId = msg.taskId ?? 0;\n\t\tconst start = performance.now();\n\t\ttry {\n\t\t\tswitch (msg.type) {\n\t\t\t\tcase \"parse\": {\n\t\t\t\t\tconst result = parseCsv(msg.data, msg.options);\n\t\t\t\t\tif (msg.sessionId) {\n\t\t\t\t\t\tconst isObj = result && result.headers;\n\t\t\t\t\t\tsessions.set(msg.sessionId, {\n\t\t\t\t\t\t\tdata: isObj ? result.rows : result,\n\t\t\t\t\t\t\theaders: isObj ? result.headers : null,\n\t\t\t\t\t\t\toriginalData: isObj ? [...result.rows] : [...result]\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\treply(taskId, start, result);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"format\":\n\t\t\t\t\treply(taskId, start, formatCsv(msg.data, msg.options));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"load\": {\n\t\t\t\t\tconst { rows, headers } = toObjectRows(msg.data, msg.headers ?? null);\n\t\t\t\t\tsessions.set(msg.sessionId, {\n\t\t\t\t\t\tdata: rows,\n\t\t\t\t\t\theaders: headers ?? null,\n\t\t\t\t\t\toriginalData: [...rows]\n\t\t\t\t\t});\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\trowCount: rows.length,\n\t\t\t\t\t\theaders\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"getData\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\theaders: session.headers || [],\n\t\t\t\t\t\trowCount: session.data.length\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"clear\":\n\t\t\t\t\tif (msg.sessionId) sessions.delete(msg.sessionId);\n\t\t\t\t\telse sessions.clear();\n\t\t\t\t\treply(taskId, start, void 0);\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"sort\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tsortData(session.data, msg.config);\n\t\t\t\t\tsession.originalData = [...session.data];\n\t\t\t\t\treply(taskId, start, { rowCount: session.data.length });\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"filter\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tconst totalCount = session.originalData.length;\n\t\t\t\t\tsession.data = filterData(session.originalData, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\tmatchCount: session.data.length,\n\t\t\t\t\t\ttotalCount\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"search\": {\n\t\t\t\t\tconst session = getSession(msg.sessionId);\n\t\t\t\t\tconst totalCount = session.originalData.length;\n\t\t\t\t\tsession.data = searchData(session.originalData, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: session.data,\n\t\t\t\t\t\tmatchCount: session.data.length,\n\t\t\t\t\t\ttotalCount\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"groupBy\": {\n\t\t\t\t\tconst groups = groupByData(getSession(msg.sessionId).data, msg.config);\n\t\t\t\t\treply(taskId, start, {\n\t\t\t\t\t\tdata: groups,\n\t\t\t\t\t\tgroupCount: groups.length\n\t\t\t\t\t});\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase \"aggregate\":\n\t\t\t\t\treply(taskId, start, { data: aggregateData(getSession(msg.sessionId).data, msg.config) });\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"getPage\":\n\t\t\t\t\treply(taskId, start, getPageData(getSession(msg.sessionId).data, msg.config));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"query\":\n\t\t\t\t\treply(taskId, start, executeQuery(getSession(msg.sessionId), msg.config));\n\t\t\t\t\tbreak;\n\t\t\t\tcase \"terminate\":\n\t\t\t\t\tsessions.clear();\n\t\t\t\t\tbreak;\n\t\t\t\tdefault: throw new Error(`Unknown message type: ${msg.type}`);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\treplyError(taskId, start, error);\n\t\t}\n\t});\n\tself.postMessage({ type: \"ready\" });\n\t//#endregion\n})();\n";
|