@socketsecurity/lib 1.0.4 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/dist/abort.js.map +2 -2
- package/dist/argv/parse.js.map +2 -2
- package/dist/arrays.d.ts +143 -0
- package/dist/arrays.js.map +2 -2
- package/dist/bin.js +1 -4
- package/dist/bin.js.map +2 -2
- package/dist/cacache.d.ts +0 -2
- package/dist/cacache.js +0 -1
- package/dist/cacache.js.map +2 -2
- package/dist/cache-with-ttl.js.map +2 -2
- package/dist/dlx.js.map +2 -2
- package/dist/external/@yarnpkg/extensions.d.ts +0 -1
- package/dist/external/cacache.d.ts +0 -7
- package/dist/external/debug.d.ts +0 -3
- package/dist/external/fast-sort.d.ts +0 -1
- package/dist/external/libnpmpack.d.ts +0 -1
- package/dist/external/make-fetch-happen.d.ts +0 -1
- package/dist/external/pacote.d.ts +0 -5
- package/dist/external/semver.d.ts +0 -1
- package/dist/external/validate-npm-package-name.js +1 -1
- package/dist/external/yargs-parser.d.ts +0 -1
- package/dist/external/yoctocolors-cjs.js +1 -1
- package/dist/external/zod.js +9 -9
- package/dist/fs.d.ts +595 -23
- package/dist/fs.js.map +2 -2
- package/dist/git.d.ts +488 -41
- package/dist/git.js.map +2 -2
- package/dist/github.d.ts +361 -12
- package/dist/github.js.map +2 -2
- package/dist/http-request.d.ts +463 -4
- package/dist/http-request.js.map +2 -2
- package/dist/json.d.ts +177 -4
- package/dist/json.js.map +2 -2
- package/dist/logger.d.ts +823 -70
- package/dist/logger.js +654 -51
- package/dist/logger.js.map +2 -2
- package/dist/objects.d.ts +386 -10
- package/dist/objects.js.map +2 -2
- package/dist/path.d.ts +270 -6
- package/dist/path.js.map +2 -2
- package/dist/promises.d.ts +432 -27
- package/dist/promises.js +3 -0
- package/dist/promises.js.map +2 -2
- package/dist/signal-exit.js.map +2 -2
- package/dist/sorts.js.map +2 -2
- package/dist/spawn.d.ts +242 -33
- package/dist/spawn.js.map +2 -2
- package/dist/spinner.d.ts +260 -20
- package/dist/spinner.js +201 -63
- package/dist/spinner.js.map +2 -2
- package/dist/stdio/clear.d.ts +130 -9
- package/dist/stdio/clear.js.map +2 -2
- package/dist/stdio/divider.d.ts +106 -10
- package/dist/stdio/divider.js +10 -0
- package/dist/stdio/divider.js.map +2 -2
- package/dist/stdio/footer.d.ts +70 -3
- package/dist/stdio/footer.js.map +2 -2
- package/dist/stdio/header.d.ts +93 -12
- package/dist/stdio/header.js.map +2 -2
- package/dist/stdio/mask.d.ts +82 -14
- package/dist/stdio/mask.js +25 -4
- package/dist/stdio/mask.js.map +2 -2
- package/dist/stdio/progress.d.ts +112 -15
- package/dist/stdio/progress.js +43 -3
- package/dist/stdio/progress.js.map +2 -2
- package/dist/stdio/prompts.d.ts +95 -5
- package/dist/stdio/prompts.js.map +2 -2
- package/dist/stdio/stderr.d.ts +114 -11
- package/dist/stdio/stderr.js.map +2 -2
- package/dist/stdio/stdout.d.ts +107 -11
- package/dist/stdio/stdout.js.map +2 -2
- package/dist/strings.d.ts +357 -28
- package/dist/strings.js.map +2 -2
- package/dist/suppress-warnings.js.map +2 -2
- package/dist/validation/json-parser.d.ts +226 -7
- package/dist/validation/json-parser.js.map +2 -2
- package/dist/validation/types.d.ts +114 -12
- package/dist/validation/types.js.map +1 -1
- package/package.json +5 -3
package/dist/strings.js.map
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../src/strings.ts"],
|
|
4
|
-
"sourcesContent": ["/**\n * @fileoverview String manipulation utilities including ANSI code handling.\n * Provides string processing, prefix application, and terminal output utilities.\n */\n\nimport { ansiRegex, stripAnsi } from './ansi'\nimport { eastAsianWidth } from './external/get-east-asian-width'\n// Import get-east-asian-width from external wrapper.\n// This library implements Unicode Standard Annex #11 (East Asian Width).\n// https://www.unicode.org/reports/tr11/\n\n// Re-export ANSI utilities for backward compatibility.\nexport { ansiRegex, stripAnsi }\n\n// Type definitions\ndeclare const BlankStringBrand: unique symbol\nexport type BlankString = string & { [BlankStringBrand]: true }\ndeclare const EmptyStringBrand: unique symbol\nexport type EmptyString = string & { [EmptyStringBrand]: true }\n\n// IMPORTANT: Do not use destructuring here - use direct assignment instead.\n// tsgo has a bug that incorrectly transpiles destructured exports, resulting in\n// `exports.SomeName = void 0;` which causes runtime errors.\n// See: https://github.com/SocketDev/socket-packageurl-js/issues/3\nexport const fromCharCode = String.fromCharCode\n\nexport interface ApplyLinePrefixOptions {\n prefix?: string\n}\n\n/**\n * Apply a prefix to each line of a string.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function applyLinePrefix(\n str: string,\n options?: ApplyLinePrefixOptions | undefined,\n): string {\n const { prefix = '' } = {\n __proto__: null,\n ...options,\n } as ApplyLinePrefixOptions\n return prefix.length\n ? `${prefix}${str.includes('\\n') ? str.replace(/\\n/g, `\\n${prefix}`) : str}`\n : str\n}\n\n/**\n * Convert a camelCase string to kebab-case.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function camelToKebab(str: string): string {\n const { length } = str\n if (!length) {\n return ''\n }\n let result = ''\n let i = 0\n while (i < length) {\n const char = str[i]\n if (!char) {\n break\n }\n const charCode = char.charCodeAt(0)\n // Check if current character is uppercase letter.\n // A = 65, Z = 90\n const isUpperCase = charCode >= 65 /*'A'*/ && charCode <= 90 /*'Z'*/\n if (isUpperCase) {\n // Add dash before uppercase sequence (except at start).\n if (result.length > 0) {\n result += '-'\n }\n // Collect all consecutive uppercase letters.\n while (i < length) {\n const currChar = str[i]\n if (!currChar) {\n break\n }\n const currCharCode = currChar.charCodeAt(0)\n const isCurrUpper =\n currCharCode >= 65 /*'A'*/ && currCharCode <= 90 /*'Z'*/\n if (isCurrUpper) {\n // Convert uppercase to lowercase: subtract 32 (A=65 -> a=97, diff=32)\n result += fromCharCode(currCharCode + 32 /*'a'-'A'*/)\n i += 1\n } else {\n // Stop when we hit non-uppercase.\n break\n }\n }\n } else {\n // Handle lowercase letters, digits, and other characters.\n result += char\n i += 1\n }\n }\n return result\n}\n\nexport interface IndentStringOptions {\n count?: number\n}\n\n/**\n * Indent each line of a string with spaces.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function indentString(\n str: string,\n options?: IndentStringOptions | undefined,\n): string {\n const { count = 1 } = { __proto__: null, ...options } as IndentStringOptions\n return str.replace(/^(?!\\s*$)/gm, ' '.repeat(count))\n}\n\n/**\n * Check if a value is a blank string (empty or only whitespace).\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function isBlankString(value: unknown): value is BlankString {\n return typeof value === 'string' && (!value.length || /^\\s+$/.test(value))\n}\n\n/**\n * Check if a value is a non-empty string.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function isNonEmptyString(\n value: unknown,\n): value is Exclude<string, EmptyString> {\n return typeof value === 'string' && value.length > 0\n}\n\nexport interface SearchOptions {\n fromIndex?: number\n}\n\n/**\n * Search for a regular expression in a string starting from an index.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function search(\n str: string,\n regexp: RegExp,\n options?: SearchOptions | undefined,\n): number {\n const { fromIndex = 0 } = { __proto__: null, ...options } as SearchOptions\n const { length } = str\n if (fromIndex >= length) {\n return -1\n }\n if (fromIndex === 0) {\n return str.search(regexp)\n }\n const offset = fromIndex < 0 ? Math.max(length + fromIndex, 0) : fromIndex\n const result = str.slice(offset).search(regexp)\n return result === -1 ? -1 : result + offset\n}\n\n/**\n * Strip the Byte Order Mark (BOM) from the beginning of a string.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function stripBom(str: string): string {\n // In JavaScript, string data is stored as UTF-16, so BOM is 0xFEFF.\n // https://tc39.es/ecma262/#sec-unicode-format-control-characters\n return str.length > 0 && str.charCodeAt(0) === 0xfe_ff ? str.slice(1) : str\n}\n\n// Initialize Intl.Segmenter for proper grapheme cluster segmentation.\n// Hoisted outside stringWidth() for reuse across multiple calls.\n//\n// A grapheme cluster is what a user perceives as a single character, but may\n// be composed of multiple Unicode code points.\n//\n// Why this matters:\n// - '\uD83D\uDC4D' (thumbs up) is 1 code point but appears as 1 character \u2192 1 grapheme\n// - '\uD83D\uDC4D\uD83C\uDFFD' (thumbs up + skin tone) is 2 code points but appears as 1 character \u2192 1 grapheme\n// - '\uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66' (family) is 7 code points (4 people + 3 ZWJ) but appears as 1 character \u2192 1 grapheme\n// - '\u00E9' can be 1 code point (U+00E9) OR 2 code points (e + \u0301) but appears as 1 character \u2192 1 grapheme\n//\n// Without Intl.Segmenter, simple iteration treats each code point separately,\n// leading to incorrect width calculations for complex sequences.\n//\n// Intl.Segmenter is available in:\n// - Node.js 16.0.0+ (our minimum is 18.0.0, so always available)\n// - All modern browsers\n//\n// Performance: Creating this once and reusing it is more efficient than\n// creating a new Intl.Segmenter instance on every stringWidth() call.\nconst segmenter = new Intl.Segmenter()\n\n// Feature-detect Unicode property escapes support and create regex patterns.\n// Hoisted outside stringWidth() for reuse across multiple calls.\n//\n// Unicode property escapes in regex allow matching characters by their Unicode properties.\n// The 'v' flag (ES2024, Node 20+) provides the most accurate Unicode support including:\n// - \\p{RGI_Emoji} - Matches only emoji recommended for general interchange\n// - Full support for Unicode sets and properties\n//\n// The 'u' flag (ES2015, Node 18+) provides basic Unicode support but:\n// - No \\p{RGI_Emoji} property (must use broader \\p{Extended_Pictographic})\n// - No \\p{Surrogate} property (must omit from patterns)\n// - Less accurate for complex emoji sequences\n//\n// We feature-detect by attempting to create a regex with 'v' flag.\n// If it throws, we fall back to 'u' flag with adjusted patterns.\n//\n// This ensures:\n// - Best accuracy on Node 20+ (our test matrix: 20, 22, 24)\n// - Backward compatibility with Node 18 (our minimum version)\n// - No runtime errors from unsupported regex features\n//\n// Performance: Creating these once and reusing them is more efficient than\n// creating new regex instances on every stringWidth() call.\nlet zeroWidthClusterRegex: RegExp\nlet leadingNonPrintingRegex: RegExp\nlet emojiRegex: RegExp\n\ntry {\n // Try 'v' flag first (Node 20+) for most accurate Unicode property support.\n //\n // ZERO-WIDTH CLUSTER PATTERN:\n // Matches entire clusters that should be invisible (width = 0):\n // - \\p{Default_Ignorable_Code_Point} - Characters like Zero Width Space (U+200B)\n // - \\p{Control} - ASCII control chars (0x00-0x1F, 0x7F-0x9F) like \\t, \\n\n // - \\p{Mark} - Combining marks that modify previous character (accents, diacritics)\n // - \\p{Surrogate} - Lone surrogate halves (invalid UTF-16, should not appear)\n zeroWidthClusterRegex =\n /^(?:\\p{Default_Ignorable_Code_Point}|\\p{Control}|\\p{Mark}|\\p{Surrogate})+$/v\n\n // LEADING NON-PRINTING PATTERN:\n // Matches non-printing characters at the start of a cluster.\n // Used to find the \"base\" visible character in a cluster.\n // - \\p{Format} - Formatting characters like Right-to-Left marks\n // Example: In a cluster starting with format chars, we skip them to find the base character.\n leadingNonPrintingRegex =\n /^[\\p{Default_Ignorable_Code_Point}\\p{Control}\\p{Format}\\p{Mark}\\p{Surrogate}]+/v\n\n // RGI EMOJI PATTERN:\n // \\p{RGI_Emoji} matches emoji in the \"Recommended for General Interchange\" set.\n // This is the most accurate way to detect emoji that should render as double-width.\n //\n // RGI emoji include:\n // - Basic emoji: \uD83D\uDC4D, \uD83D\uDE00, \u26A1\n // - Emoji with modifiers: \uD83D\uDC4D\uD83C\uDFFD (thumbs up + medium skin tone)\n // - ZWJ sequences: \uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66 (family: man, woman, girl, boy)\n // - Keycap sequences: 1\uFE0F\u20E3 (digit + variation selector + combining enclosing keycap)\n //\n // Why RGI? The Unicode Consortium recommends this subset for interchange because:\n // - They have consistent rendering across platforms\n // - They're widely supported\n // - They follow a standardized format\n //\n // Non-RGI emoji might be symbols that look like emoji but render as 1 column.\n emojiRegex = /^\\p{RGI_Emoji}$/v\n} catch {\n // Fall back to 'u' flag (Node 18+) with slightly less accurate patterns.\n //\n // KEY DIFFERENCES from 'v' flag patterns:\n // 1. No \\p{Surrogate} property - omitted from patterns\n // 2. No \\p{RGI_Emoji} property - use \\p{Extended_Pictographic} instead\n //\n // \\p{Extended_Pictographic} is broader than \\p{RGI_Emoji}:\n // - Includes emoji-like symbols that might render as 1 column\n // - Less precise but better than nothing\n // - Defined in Unicode Technical Standard #51\n //\n // The patterns are otherwise identical, just with \\p{Surrogate} removed\n // and \\p{RGI_Emoji} replaced with \\p{Extended_Pictographic}.\n zeroWidthClusterRegex =\n /^(?:\\p{Default_Ignorable_Code_Point}|\\p{Control}|\\p{Mark})+$/u\n leadingNonPrintingRegex =\n /^[\\p{Default_Ignorable_Code_Point}\\p{Control}\\p{Format}\\p{Mark}]+/u\n emojiRegex = /^\\p{Extended_Pictographic}$/u\n}\n\n/**\n * Get the visual width of a string in terminal columns.\n * Strips ANSI escape codes and accounts for wide characters.\n *\n * Based on string-width:\n * https://socket.dev/npm/package/string-width/overview/7.2.0\n * MIT License\n * Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)\n *\n * Terminal emulators display characters in a grid of cells (columns).\n * Most ASCII characters take 1 column, but some characters (especially\n * emoji and CJK characters) take 2 columns.\n *\n * This function calculates how many columns a string will occupy when\n * displayed in a terminal, which is crucial for:\n * - Aligning text properly\n * - Preventing text from jumping when characters change\n * - Calculating padding/spacing\n *\n * Logic:\n * - Segment graphemes to match how terminals render clusters.\n * - Width rules:\n * 1. Skip non-printing clusters (Default_Ignorable, Control, pure Mark, lone Surrogates).\n * 2. RGI emoji clusters (\\p{RGI_Emoji}) are double-width.\n * 3. Otherwise use East Asian Width of the cluster's first visible code point.\n * 4. Add widths for trailing Halfwidth/Fullwidth Forms within the same cluster.\n *\n * East Asian Width categories (Unicode Standard Annex #11):\n * - F (Fullwidth): 2 columns - e.g., fullwidth Latin letters (\uFF21, \uFF22)\n * - W (Wide): 2 columns - e.g., CJK ideographs (\u6F22\u5B57), emoji (\u26A1, \uD83D\uDE00)\n * - H (Halfwidth): 1 column - e.g., halfwidth Katakana (\uFF71, \uFF72)\n * - Na (Narrow): 1 column - e.g., ASCII (a-z, 0-9)\n * - A (Ambiguous): Context-dependent, we treat as 1 column\n * - N (Neutral): 1 column - e.g., most symbols (\u2726, \u2727, \u22C6)\n *\n * Why this matters for Socket spinners:\n * - Lightning bolt (\u26A1) takes 2 columns\n * - Stars (\u2726, \u2727, \u22C6) take 1 column\n * - Without compensation, text jumps when frames change\n * - We use this to calculate padding for consistent alignment\n *\n * @example\n * stringWidth('hello') // => 5 (5 ASCII chars = 5 columns)\n * stringWidth('\u26A1') // => 2 (lightning bolt is wide)\n * stringWidth('\u2726') // => 1 (star is narrow)\n * stringWidth('\\x1b[31mred\\x1b[0m') // => 3 (ANSI codes stripped, 'red' = 3)\n *\n * @throws {TypeError} When input is not a string.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function stringWidth(text: string): number {\n if (typeof text !== 'string' || !text.length) {\n return 0\n }\n\n // Strip ANSI escape codes first (colors, bold, italic, etc.).\n // These are invisible and don't contribute to visual width.\n // Example: '\\x1b[31mred\\x1b[0m' becomes 'red'.\n const plainText = stripAnsi(text)\n\n if (!plainText.length) {\n return 0\n }\n\n // KEY IMPROVEMENT #1: Proper Grapheme Cluster Segmentation\n //\n // Use the hoisted Intl.Segmenter instance (defined outside this function).\n // See comments above for detailed explanation of grapheme cluster segmentation.\n\n // KEY IMPROVEMENT #2: Feature Detection for Unicode Property Escapes\n //\n // Use the hoisted regex patterns (defined outside this function).\n // See comments above for detailed explanation of feature detection and fallback patterns.\n\n let width = 0\n\n // Configure East Asian Width calculation.\n // ambiguousAsWide: false - treat ambiguous-width characters as narrow (1 column).\n //\n // Ambiguous width characters (category 'A') include:\n // - Greek letters: \u03B1, \u03B2, \u03B3\n // - Cyrillic letters: \u0410, \u0411, \u0412\n // - Box drawing characters: \u2500, \u2502, \u250C\n //\n // In East Asian contexts, these are often rendered as wide (2 columns).\n // In Western contexts, they're typically narrow (1 column).\n //\n // We choose narrow (false) because:\n // - Socket's primary audience is Western developers\n // - Most terminal emulators default to narrow for ambiguous characters\n // - Consistent with string-width's default behavior\n const eastAsianWidthOptions = { ambiguousAsWide: false }\n\n // KEY IMPROVEMENT #3: Comprehensive Width Calculation\n //\n // Segment the string into grapheme clusters and calculate width for each.\n // This is the core algorithm that handles all the complexity of Unicode text rendering.\n for (const { segment } of segmenter.segment(plainText)) {\n // STEP 1: Skip zero-width / non-printing clusters\n //\n // These clusters contain only invisible characters that take no space.\n // Examples:\n // - '\\t' (tab) - Control character\n // - '\\n' (newline) - Control character\n // - '\\u200B' (zero-width space) - Default ignorable\n // - Combining marks without base character\n //\n // Why skip? Terminals don't allocate columns for these characters.\n // They're either control codes or modify adjacent characters without adding width.\n if (zeroWidthClusterRegex.test(segment)) {\n continue\n }\n\n // STEP 2: Handle emoji (double-width)\n //\n // RGI emoji are always rendered as double-width (2 columns) in terminals.\n // This is true even for complex sequences:\n // - \uD83D\uDC4D (basic emoji) = 2 columns\n // - \uD83D\uDC4D\uD83C\uDFFD (emoji + skin tone modifier) = 2 columns (not 4!)\n // - \uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66 (family ZWJ sequence) = 2 columns (not 14!)\n //\n // Why double-width? Historical reasons:\n // - Emoji originated in Japanese mobile carriers\n // - They were designed to match CJK character width\n // - Terminal emulators inherited this behavior\n //\n // The key insight: The ENTIRE grapheme cluster is 2 columns, regardless\n // of how many code points it contains. That's why we need Intl.Segmenter!\n if (emojiRegex.test(segment)) {\n width += 2\n continue\n }\n\n // STEP 3: Use East Asian Width for everything else\n //\n // For non-emoji clusters, calculate width based on the first visible character.\n //\n // Why first visible character? In a grapheme cluster like \"\u00E9\" (e + combining acute),\n // the base character 'e' determines the width, and the combining mark modifies it\n // without adding width.\n //\n // Strip leading non-printing characters to find the base character.\n // Example: If a cluster starts with format characters, skip them to find\n // the actual visible character that determines width.\n const baseSegment = segment.replace(leadingNonPrintingRegex, '')\n const codePoint = baseSegment.codePointAt(0)\n\n if (codePoint === undefined) {\n // If no visible character remains after stripping non-printing chars, skip.\n // This shouldn't happen if our zero-width regex is correct, but defensive programming.\n continue\n }\n\n // Calculate width using East Asian Width property.\n // This handles:\n // - Narrow (1 column): ASCII a-z, A-Z, 0-9, most symbols\n // - Wide (2 columns): CJK ideographs (\u6F22, \u5B57), fullwidth forms (\uFF21, \uFF22)\n // - Halfwidth (1 column): Halfwidth Katakana (\uFF71, \uFF72, \uFF73)\n // - Ambiguous (1 column per our config): Greek, Cyrillic, box drawing\n width += eastAsianWidth(codePoint, eastAsianWidthOptions)\n\n // STEP 4: Handle trailing Halfwidth and Fullwidth Forms\n //\n // The Halfwidth and Fullwidth Forms Unicode block (U+FF00-U+FFEF) contains\n // compatibility characters for legacy East Asian encodings.\n //\n // Examples:\n // - \uFF9E (U+FF9E) - Halfwidth Katakana voiced sound mark (dakuten)\n // - \uFF9F (U+FF9F) - Halfwidth Katakana semi-voiced sound mark (handakuten)\n // - \uFF70 (U+FF70) - Halfwidth Katakana-Hiragana prolonged sound mark\n //\n // These can appear as TRAILING characters in a grapheme cluster (not leading).\n // When they do, they add their own width to the cluster.\n //\n // Example: A cluster might be [base character][dakuten]\n // - Base character contributes its width (calculated above)\n // - Dakuten contributes its width (calculated here)\n //\n // Why is this necessary? These forms are spacing characters, not combining marks.\n // They occupy their own column(s) even when following another character.\n //\n // Note: We only check trailing characters (segment.slice(1)).\n // The base character width was already calculated above.\n if (segment.length > 1) {\n for (const char of segment.slice(1)) {\n const charCode = char.charCodeAt(0)\n // Check if character is in Halfwidth and Fullwidth Forms range.\n if (charCode >= 0xff_00 && charCode <= 0xff_ef) {\n const trailingCodePoint = char.codePointAt(0)\n if (trailingCodePoint !== undefined) {\n // Add the East Asian Width of this trailing character.\n // Most halfwidth forms contribute 1 column, fullwidth contribute 2.\n width += eastAsianWidth(trailingCodePoint, eastAsianWidthOptions)\n }\n }\n }\n }\n }\n\n return width\n}\n\n/**\n * Convert a string to kebab-case (handles camelCase and snake_case).\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function toKebabCase(str: string): string {\n if (!str.length) {\n return str\n }\n return (\n str\n // Convert camelCase to kebab-case\n .replace(/([a-z]+[0-9]*)([A-Z])/g, '$1-$2')\n // Convert underscores to hyphens\n .replace(/_/g, '-')\n .toLowerCase()\n )\n}\n\n/**\n * Trim newlines from the beginning and end of a string.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function trimNewlines(str: string): string {\n const { length } = str\n if (length === 0) {\n return str\n }\n const first = str.charCodeAt(0)\n const noFirstNewline = first !== 13 /*'\\r'*/ && first !== 10 /*'\\n'*/\n if (length === 1) {\n return noFirstNewline ? str : ''\n }\n const last = str.charCodeAt(length - 1)\n const noLastNewline = last !== 13 /*'\\r'*/ && last !== 10 /*'\\n'*/\n if (noFirstNewline && noLastNewline) {\n return str\n }\n let start = 0\n let end = length\n while (start < end) {\n const code = str.charCodeAt(start)\n if (code !== 13 /*'\\r'*/ && code !== 10 /*'\\n'*/) {\n break\n }\n start += 1\n }\n while (end > start) {\n const code = str.charCodeAt(end - 1)\n if (code !== 13 /*'\\r'*/ && code !== 10 /*'\\n'*/) {\n break\n }\n end -= 1\n }\n return start === 0 && end === length ? str : str.slice(start, end)\n}\n\n/**\n * Repeat a string n times.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function repeatString(str: string, count: number): string {\n if (count <= 0) {\n return ''\n }\n return str.repeat(count)\n}\n\n/**\n * Center text within a given width.\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function centerText(text: string, width: number): string {\n const textLength = stripAnsi(text).length\n if (textLength >= width) {\n return text\n }\n\n const padding = width - textLength\n const leftPad = Math.floor(padding / 2)\n const rightPad = padding - leftPad\n\n return ' '.repeat(leftPad) + text + ' '.repeat(rightPad)\n}\n"],
|
|
5
|
-
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAKA,kBAAqC;AACrC,kCAA+B;AAkBxB,MAAM,eAAe,OAAO;AAAA;
|
|
4
|
+
"sourcesContent": ["/**\n * @fileoverview String manipulation utilities including ANSI code handling.\n * Provides string processing, prefix application, and terminal output utilities.\n */\n\nimport { ansiRegex, stripAnsi } from './ansi'\nimport { eastAsianWidth } from './external/get-east-asian-width'\n// Import get-east-asian-width from external wrapper.\n// This library implements Unicode Standard Annex #11 (East Asian Width).\n// https://www.unicode.org/reports/tr11/\n\n// Re-export ANSI utilities for backward compatibility.\nexport { ansiRegex, stripAnsi }\n\n// Type definitions\ndeclare const BlankStringBrand: unique symbol\nexport type BlankString = string & { [BlankStringBrand]: true }\ndeclare const EmptyStringBrand: unique symbol\nexport type EmptyString = string & { [EmptyStringBrand]: true }\n\n// IMPORTANT: Do not use destructuring here - use direct assignment instead.\n// tsgo has a bug that incorrectly transpiles destructured exports, resulting in\n// `exports.SomeName = void 0;` which causes runtime errors.\n// See: https://github.com/SocketDev/socket-packageurl-js/issues/3\nexport const fromCharCode = String.fromCharCode\n\nexport interface ApplyLinePrefixOptions {\n /**\n * The prefix to add to each line.\n * @default ''\n */\n prefix?: string | undefined\n}\n\n/**\n * Apply a prefix to each line of a string.\n *\n * Prepends the specified prefix to the beginning of each line in the input string.\n * If the string contains newlines, the prefix is added after each newline as well.\n * When no prefix is provided or prefix is empty, returns the original string unchanged.\n *\n * @param str - The string to add prefixes to\n * @param options - Configuration options\n * @returns The string with prefix applied to each line\n *\n * @example\n * ```ts\n * applyLinePrefix('hello\\nworld', { prefix: '> ' })\n * // Returns: '> hello\\n> world'\n *\n * applyLinePrefix('single line', { prefix: ' ' })\n * // Returns: ' single line'\n *\n * applyLinePrefix('no prefix')\n * // Returns: 'no prefix'\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function applyLinePrefix(\n str: string,\n options?: ApplyLinePrefixOptions | undefined,\n): string {\n const { prefix = '' } = {\n __proto__: null,\n ...options,\n } as ApplyLinePrefixOptions\n return prefix.length\n ? `${prefix}${str.includes('\\n') ? str.replace(/\\n/g, `\\n${prefix}`) : str}`\n : str\n}\n\n/**\n * Convert a camelCase string to kebab-case.\n *\n * Transforms camelCase strings by converting uppercase letters to lowercase\n * and inserting hyphens before uppercase sequences. Handles consecutive\n * uppercase letters (like \"XMLHttpRequest\") by treating them as a single word.\n * Returns empty string for empty input.\n *\n * Note: This function only handles camelCase. For mixed formats including\n * snake_case, use `toKebabCase()` instead.\n *\n * @param str - The camelCase string to convert\n * @returns The kebab-case string\n *\n * @example\n * ```ts\n * camelToKebab('helloWorld')\n * // Returns: 'hello-world'\n *\n * camelToKebab('XMLHttpRequest')\n * // Returns: 'xmlhttprequest'\n *\n * camelToKebab('iOS')\n * // Returns: 'ios'\n *\n * camelToKebab('')\n * // Returns: ''\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function camelToKebab(str: string): string {\n const { length } = str\n if (!length) {\n return ''\n }\n let result = ''\n let i = 0\n while (i < length) {\n const char = str[i]\n if (!char) {\n break\n }\n const charCode = char.charCodeAt(0)\n // Check if current character is uppercase letter.\n // A = 65, Z = 90\n const isUpperCase = charCode >= 65 /*'A'*/ && charCode <= 90 /*'Z'*/\n if (isUpperCase) {\n // Add dash before uppercase sequence (except at start).\n if (result.length > 0) {\n result += '-'\n }\n // Collect all consecutive uppercase letters.\n while (i < length) {\n const currChar = str[i]\n if (!currChar) {\n break\n }\n const currCharCode = currChar.charCodeAt(0)\n const isCurrUpper =\n currCharCode >= 65 /*'A'*/ && currCharCode <= 90 /*'Z'*/\n if (isCurrUpper) {\n // Convert uppercase to lowercase: subtract 32 (A=65 -> a=97, diff=32)\n result += fromCharCode(currCharCode + 32 /*'a'-'A'*/)\n i += 1\n } else {\n // Stop when we hit non-uppercase.\n break\n }\n }\n } else {\n // Handle lowercase letters, digits, and other characters.\n result += char\n i += 1\n }\n }\n return result\n}\n\nexport interface IndentStringOptions {\n /**\n * Number of spaces to indent each line.\n * @default 1\n */\n count?: number | undefined\n}\n\n/**\n * Indent each line of a string with spaces.\n *\n * Adds the specified number of spaces to the beginning of each non-empty line\n * in the input string. Empty lines (containing only whitespace) are not indented.\n * Uses a regular expression to efficiently handle multi-line strings.\n *\n * @param str - The string to indent\n * @param options - Configuration options\n * @returns The indented string\n *\n * @example\n * ```ts\n * indentString('hello\\nworld', { count: 2 })\n * // Returns: ' hello\\n world'\n *\n * indentString('line1\\n\\nline3', { count: 4 })\n * // Returns: ' line1\\n\\n line3'\n *\n * indentString('single line')\n * // Returns: ' single line' (default: 1 space)\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function indentString(\n str: string,\n options?: IndentStringOptions | undefined,\n): string {\n const { count = 1 } = { __proto__: null, ...options } as IndentStringOptions\n return str.replace(/^(?!\\s*$)/gm, ' '.repeat(count))\n}\n\n/**\n * Check if a value is a blank string (empty or only whitespace).\n *\n * A blank string is defined as a string that is either:\n * - Completely empty (length 0)\n * - Contains only whitespace characters (spaces, tabs, newlines, etc.)\n *\n * This is useful for validation when you need to ensure user input\n * contains actual content, not just whitespace.\n *\n * @param value - The value to check\n * @returns `true` if the value is a blank string, `false` otherwise\n *\n * @example\n * ```ts\n * isBlankString('')\n * // Returns: true\n *\n * isBlankString(' ')\n * // Returns: true\n *\n * isBlankString('\\n\\t ')\n * // Returns: true\n *\n * isBlankString('hello')\n * // Returns: false\n *\n * isBlankString(null)\n * // Returns: false\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function isBlankString(value: unknown): value is BlankString {\n return typeof value === 'string' && (!value.length || /^\\s+$/.test(value))\n}\n\n/**\n * Check if a value is a non-empty string.\n *\n * Returns `true` only if the value is a string with at least one character.\n * This includes strings containing only whitespace (use `isBlankString()` if\n * you want to exclude those). Type guard ensures TypeScript knows the value\n * is a string after this check.\n *\n * @param value - The value to check\n * @returns `true` if the value is a non-empty string, `false` otherwise\n *\n * @example\n * ```ts\n * isNonEmptyString('hello')\n * // Returns: true\n *\n * isNonEmptyString(' ')\n * // Returns: true (contains whitespace)\n *\n * isNonEmptyString('')\n * // Returns: false\n *\n * isNonEmptyString(null)\n * // Returns: false\n *\n * isNonEmptyString(123)\n * // Returns: false\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function isNonEmptyString(\n value: unknown,\n): value is Exclude<string, EmptyString> {\n return typeof value === 'string' && value.length > 0\n}\n\nexport interface SearchOptions {\n /**\n * The position in the string to begin searching from.\n * Negative values count back from the end of the string.\n * @default 0\n */\n fromIndex?: number | undefined\n}\n\n/**\n * Search for a regular expression in a string starting from an index.\n *\n * Similar to `String.prototype.search()` but allows specifying a starting\n * position. Returns the index of the first match at or after `fromIndex`,\n * or -1 if no match is found. Negative `fromIndex` values count back from\n * the end of the string.\n *\n * This is more efficient than using `str.slice(fromIndex).search()` when\n * you need the absolute position in the original string, as it handles\n * the offset calculation for you.\n *\n * @param str - The string to search in\n * @param regexp - The regular expression to search for\n * @param options - Configuration options\n * @returns The index of the first match, or -1 if not found\n *\n * @example\n * ```ts\n * search('hello world hello', /hello/, { fromIndex: 0 })\n * // Returns: 0 (first 'hello')\n *\n * search('hello world hello', /hello/, { fromIndex: 6 })\n * // Returns: 12 (second 'hello')\n *\n * search('hello world', /goodbye/, { fromIndex: 0 })\n * // Returns: -1 (not found)\n *\n * search('hello world', /hello/, { fromIndex: -5 })\n * // Returns: -1 (starts searching from 'world', no match)\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function search(\n str: string,\n regexp: RegExp,\n options?: SearchOptions | undefined,\n): number {\n const { fromIndex = 0 } = { __proto__: null, ...options } as SearchOptions\n const { length } = str\n if (fromIndex >= length) {\n return -1\n }\n if (fromIndex === 0) {\n return str.search(regexp)\n }\n const offset = fromIndex < 0 ? Math.max(length + fromIndex, 0) : fromIndex\n const result = str.slice(offset).search(regexp)\n return result === -1 ? -1 : result + offset\n}\n\n/**\n * Strip the Byte Order Mark (BOM) from the beginning of a string.\n *\n * The BOM (U+FEFF) is a Unicode character that can appear at the start of\n * a text file to indicate byte order and encoding. In UTF-16 (JavaScript's\n * internal string representation), it appears as 0xFEFF. This function\n * removes it if present, leaving the rest of the string unchanged.\n *\n * Most text processing doesn't need to handle the BOM explicitly, but it\n * can cause issues when parsing JSON, CSV, or other structured data formats\n * that don't expect a leading invisible character.\n *\n * @param str - The string to strip BOM from\n * @returns The string without BOM\n *\n * @example\n * ```ts\n * stripBom('\\uFEFFhello world')\n * // Returns: 'hello world'\n *\n * stripBom('hello world')\n * // Returns: 'hello world' (no BOM to strip)\n *\n * stripBom('')\n * // Returns: ''\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function stripBom(str: string): string {\n // In JavaScript, string data is stored as UTF-16, so BOM is 0xFEFF.\n // https://tc39.es/ecma262/#sec-unicode-format-control-characters\n return str.length > 0 && str.charCodeAt(0) === 0xfe_ff ? str.slice(1) : str\n}\n\n// Initialize Intl.Segmenter for proper grapheme cluster segmentation.\n// Hoisted outside stringWidth() for reuse across multiple calls.\n//\n// A grapheme cluster is what a user perceives as a single character, but may\n// be composed of multiple Unicode code points.\n//\n// Why this matters:\n// - '\uD83D\uDC4D' (thumbs up) is 1 code point but appears as 1 character \u2192 1 grapheme\n// - '\uD83D\uDC4D\uD83C\uDFFD' (thumbs up + skin tone) is 2 code points but appears as 1 character \u2192 1 grapheme\n// - '\uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66' (family) is 7 code points (4 people + 3 ZWJ) but appears as 1 character \u2192 1 grapheme\n// - '\u00E9' can be 1 code point (U+00E9) OR 2 code points (e + \u0301) but appears as 1 character \u2192 1 grapheme\n//\n// Without Intl.Segmenter, simple iteration treats each code point separately,\n// leading to incorrect width calculations for complex sequences.\n//\n// Intl.Segmenter is available in:\n// - Node.js 16.0.0+ (our minimum is 18.0.0, so always available)\n// - All modern browsers\n//\n// Performance: Creating this once and reusing it is more efficient than\n// creating a new Intl.Segmenter instance on every stringWidth() call.\nconst segmenter = new Intl.Segmenter()\n\n// Feature-detect Unicode property escapes support and create regex patterns.\n// Hoisted outside stringWidth() for reuse across multiple calls.\n//\n// Unicode property escapes in regex allow matching characters by their Unicode properties.\n// The 'v' flag (ES2024, Node 20+) provides the most accurate Unicode support including:\n// - \\p{RGI_Emoji} - Matches only emoji recommended for general interchange\n// - Full support for Unicode sets and properties\n//\n// The 'u' flag (ES2015, Node 18+) provides basic Unicode support but:\n// - No \\p{RGI_Emoji} property (must use broader \\p{Extended_Pictographic})\n// - No \\p{Surrogate} property (must omit from patterns)\n// - Less accurate for complex emoji sequences\n//\n// We feature-detect by attempting to create a regex with 'v' flag.\n// If it throws, we fall back to 'u' flag with adjusted patterns.\n//\n// This ensures:\n// - Best accuracy on Node 20+ (our test matrix: 20, 22, 24)\n// - Backward compatibility with Node 18 (our minimum version)\n// - No runtime errors from unsupported regex features\n//\n// Performance: Creating these once and reusing them is more efficient than\n// creating new regex instances on every stringWidth() call.\nlet zeroWidthClusterRegex: RegExp\nlet leadingNonPrintingRegex: RegExp\nlet emojiRegex: RegExp\n\ntry {\n // Try 'v' flag first (Node 20+) for most accurate Unicode property support.\n //\n // ZERO-WIDTH CLUSTER PATTERN:\n // Matches entire clusters that should be invisible (width = 0):\n // - \\p{Default_Ignorable_Code_Point} - Characters like Zero Width Space (U+200B)\n // - \\p{Control} - ASCII control chars (0x00-0x1F, 0x7F-0x9F) like \\t, \\n\n // - \\p{Mark} - Combining marks that modify previous character (accents, diacritics)\n // - \\p{Surrogate} - Lone surrogate halves (invalid UTF-16, should not appear)\n zeroWidthClusterRegex =\n /^(?:\\p{Default_Ignorable_Code_Point}|\\p{Control}|\\p{Mark}|\\p{Surrogate})+$/v\n\n // LEADING NON-PRINTING PATTERN:\n // Matches non-printing characters at the start of a cluster.\n // Used to find the \"base\" visible character in a cluster.\n // - \\p{Format} - Formatting characters like Right-to-Left marks\n // Example: In a cluster starting with format chars, we skip them to find the base character.\n leadingNonPrintingRegex =\n /^[\\p{Default_Ignorable_Code_Point}\\p{Control}\\p{Format}\\p{Mark}\\p{Surrogate}]+/v\n\n // RGI EMOJI PATTERN:\n // \\p{RGI_Emoji} matches emoji in the \"Recommended for General Interchange\" set.\n // This is the most accurate way to detect emoji that should render as double-width.\n //\n // RGI emoji include:\n // - Basic emoji: \uD83D\uDC4D, \uD83D\uDE00, \u26A1\n // - Emoji with modifiers: \uD83D\uDC4D\uD83C\uDFFD (thumbs up + medium skin tone)\n // - ZWJ sequences: \uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66 (family: man, woman, girl, boy)\n // - Keycap sequences: 1\uFE0F\u20E3 (digit + variation selector + combining enclosing keycap)\n //\n // Why RGI? The Unicode Consortium recommends this subset for interchange because:\n // - They have consistent rendering across platforms\n // - They're widely supported\n // - They follow a standardized format\n //\n // Non-RGI emoji might be symbols that look like emoji but render as 1 column.\n emojiRegex = /^\\p{RGI_Emoji}$/v\n} catch {\n // Fall back to 'u' flag (Node 18+) with slightly less accurate patterns.\n //\n // KEY DIFFERENCES from 'v' flag patterns:\n // 1. No \\p{Surrogate} property - omitted from patterns\n // 2. No \\p{RGI_Emoji} property - use \\p{Extended_Pictographic} instead\n //\n // \\p{Extended_Pictographic} is broader than \\p{RGI_Emoji}:\n // - Includes emoji-like symbols that might render as 1 column\n // - Less precise but better than nothing\n // - Defined in Unicode Technical Standard #51\n //\n // The patterns are otherwise identical, just with \\p{Surrogate} removed\n // and \\p{RGI_Emoji} replaced with \\p{Extended_Pictographic}.\n zeroWidthClusterRegex =\n /^(?:\\p{Default_Ignorable_Code_Point}|\\p{Control}|\\p{Mark})+$/u\n leadingNonPrintingRegex =\n /^[\\p{Default_Ignorable_Code_Point}\\p{Control}\\p{Format}\\p{Mark}]+/u\n emojiRegex = /^\\p{Extended_Pictographic}$/u\n}\n\n/**\n * Get the visual width of a string in terminal columns.\n *\n * Calculates how many columns a string will occupy when displayed in a terminal,\n * accounting for:\n * - ANSI escape codes (stripped before calculation)\n * - Wide characters (CJK ideographs, fullwidth forms) that take 2 columns\n * - Emoji (including complex sequences) that take 2 columns\n * - Combining marks and zero-width characters (take 0 columns)\n * - East Asian Width properties (Fullwidth, Wide, Halfwidth, Narrow, etc.)\n *\n * Based on string-width by Sindre Sorhus:\n * https://socket.dev/npm/package/string-width/overview/7.2.0\n * MIT License\n * Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)\n *\n * Terminal emulators display characters in a grid of cells (columns).\n * Most ASCII characters take 1 column, but some characters (especially\n * emoji and CJK characters) take 2 columns. This function calculates\n * the actual visual width, which is crucial for:\n * - Aligning text properly in tables or columns\n * - Preventing text from jumping when characters change\n * - Calculating padding/spacing for spinners and progress bars\n * - Wrapping text at the correct column width\n *\n * Algorithm Overview:\n * 1. Strip ANSI escape codes (invisible in terminal)\n * 2. Segment into grapheme clusters (user-perceived characters)\n * 3. For each cluster:\n * - Skip zero-width/non-printing clusters (width = 0)\n * - RGI emoji clusters are double-width (width = 2)\n * - Otherwise use East Asian Width of first visible code point\n * - Add width for trailing Halfwidth/Fullwidth Forms\n *\n * East Asian Width Categories (Unicode Standard Annex #11):\n * - F (Fullwidth): 2 columns - e.g., fullwidth Latin letters (\uFF21, \uFF22)\n * - W (Wide): 2 columns - e.g., CJK ideographs (\u6F22\u5B57), emoji (\u26A1, \uD83D\uDE00)\n * - H (Halfwidth): 1 column - e.g., halfwidth Katakana (\uFF71, \uFF72)\n * - Na (Narrow): 1 column - e.g., ASCII (a-z, 0-9)\n * - A (Ambiguous): Context-dependent, treated as 1 column by default\n * - N (Neutral): 1 column - e.g., most symbols (\u2726, \u2727, \u22C6)\n *\n * Why This Matters for Socket:\n * - Lightning bolt (\u26A1) takes 2 columns\n * - Stars (\u2726, \u2727, \u22C6) take 1 column\n * - Without proper width calculation, spinner text jumps between frames\n * - This function enables consistent alignment by calculating padding\n *\n * @param text - The string to measure\n * @returns The visual width in terminal columns\n *\n * @example\n * ```ts\n * stringWidth('hello')\n * // Returns: 5 (5 ASCII chars = 5 columns)\n *\n * stringWidth('\u26A1')\n * // Returns: 2 (lightning bolt is wide)\n *\n * stringWidth('\u2726')\n * // Returns: 1 (star is narrow)\n *\n * stringWidth('\u6F22\u5B57')\n * // Returns: 4 (2 CJK characters \u00D7 2 columns each)\n *\n * stringWidth('\\x1b[31mred\\x1b[0m')\n * // Returns: 3 (ANSI codes stripped, 'red' = 3)\n *\n * stringWidth('\uD83D\uDC4D\uD83C\uDFFD')\n * // Returns: 2 (emoji with skin tone = 1 grapheme cluster = 2 columns)\n *\n * stringWidth('\u00E9')\n * // Returns: 1 (combining accent doesn't add width)\n *\n * stringWidth('')\n * // Returns: 0\n * ```\n *\n * @throws {TypeError} When input is not a string\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function stringWidth(text: string): number {\n if (typeof text !== 'string' || !text.length) {\n return 0\n }\n\n // Strip ANSI escape codes first (colors, bold, italic, etc.).\n // These are invisible and don't contribute to visual width.\n // Example: '\\x1b[31mred\\x1b[0m' becomes 'red'.\n const plainText = stripAnsi(text)\n\n if (!plainText.length) {\n return 0\n }\n\n // KEY IMPROVEMENT #1: Proper Grapheme Cluster Segmentation\n //\n // Use the hoisted Intl.Segmenter instance (defined outside this function).\n // See comments above for detailed explanation of grapheme cluster segmentation.\n\n // KEY IMPROVEMENT #2: Feature Detection for Unicode Property Escapes\n //\n // Use the hoisted regex patterns (defined outside this function).\n // See comments above for detailed explanation of feature detection and fallback patterns.\n\n let width = 0\n\n // Configure East Asian Width calculation.\n // ambiguousAsWide: false - treat ambiguous-width characters as narrow (1 column).\n //\n // Ambiguous width characters (category 'A') include:\n // - Greek letters: \u03B1, \u03B2, \u03B3\n // - Cyrillic letters: \u0410, \u0411, \u0412\n // - Box drawing characters: \u2500, \u2502, \u250C\n //\n // In East Asian contexts, these are often rendered as wide (2 columns).\n // In Western contexts, they're typically narrow (1 column).\n //\n // We choose narrow (false) because:\n // - Socket's primary audience is Western developers\n // - Most terminal emulators default to narrow for ambiguous characters\n // - Consistent with string-width's default behavior\n const eastAsianWidthOptions = { ambiguousAsWide: false }\n\n // KEY IMPROVEMENT #3: Comprehensive Width Calculation\n //\n // Segment the string into grapheme clusters and calculate width for each.\n // This is the core algorithm that handles all the complexity of Unicode text rendering.\n for (const { segment } of segmenter.segment(plainText)) {\n // STEP 1: Skip zero-width / non-printing clusters\n //\n // These clusters contain only invisible characters that take no space.\n // Examples:\n // - '\\t' (tab) - Control character\n // - '\\n' (newline) - Control character\n // - '\\u200B' (zero-width space) - Default ignorable\n // - Combining marks without base character\n //\n // Why skip? Terminals don't allocate columns for these characters.\n // They're either control codes or modify adjacent characters without adding width.\n if (zeroWidthClusterRegex.test(segment)) {\n continue\n }\n\n // STEP 2: Handle emoji (double-width)\n //\n // RGI emoji are always rendered as double-width (2 columns) in terminals.\n // This is true even for complex sequences:\n // - \uD83D\uDC4D (basic emoji) = 2 columns\n // - \uD83D\uDC4D\uD83C\uDFFD (emoji + skin tone modifier) = 2 columns (not 4!)\n // - \uD83D\uDC68\u200D\uD83D\uDC69\u200D\uD83D\uDC67\u200D\uD83D\uDC66 (family ZWJ sequence) = 2 columns (not 14!)\n //\n // Why double-width? Historical reasons:\n // - Emoji originated in Japanese mobile carriers\n // - They were designed to match CJK character width\n // - Terminal emulators inherited this behavior\n //\n // The key insight: The ENTIRE grapheme cluster is 2 columns, regardless\n // of how many code points it contains. That's why we need Intl.Segmenter!\n if (emojiRegex.test(segment)) {\n width += 2\n continue\n }\n\n // STEP 3: Use East Asian Width for everything else\n //\n // For non-emoji clusters, calculate width based on the first visible character.\n //\n // Why first visible character? In a grapheme cluster like \"\u00E9\" (e + combining acute),\n // the base character 'e' determines the width, and the combining mark modifies it\n // without adding width.\n //\n // Strip leading non-printing characters to find the base character.\n // Example: If a cluster starts with format characters, skip them to find\n // the actual visible character that determines width.\n const baseSegment = segment.replace(leadingNonPrintingRegex, '')\n const codePoint = baseSegment.codePointAt(0)\n\n if (codePoint === undefined) {\n // If no visible character remains after stripping non-printing chars, skip.\n // This shouldn't happen if our zero-width regex is correct, but defensive programming.\n continue\n }\n\n // Calculate width using East Asian Width property.\n // This handles:\n // - Narrow (1 column): ASCII a-z, A-Z, 0-9, most symbols\n // - Wide (2 columns): CJK ideographs (\u6F22, \u5B57), fullwidth forms (\uFF21, \uFF22)\n // - Halfwidth (1 column): Halfwidth Katakana (\uFF71, \uFF72, \uFF73)\n // - Ambiguous (1 column per our config): Greek, Cyrillic, box drawing\n width += eastAsianWidth(codePoint, eastAsianWidthOptions)\n\n // STEP 4: Handle trailing Halfwidth and Fullwidth Forms\n //\n // The Halfwidth and Fullwidth Forms Unicode block (U+FF00-U+FFEF) contains\n // compatibility characters for legacy East Asian encodings.\n //\n // Examples:\n // - \uFF9E (U+FF9E) - Halfwidth Katakana voiced sound mark (dakuten)\n // - \uFF9F (U+FF9F) - Halfwidth Katakana semi-voiced sound mark (handakuten)\n // - \uFF70 (U+FF70) - Halfwidth Katakana-Hiragana prolonged sound mark\n //\n // These can appear as TRAILING characters in a grapheme cluster (not leading).\n // When they do, they add their own width to the cluster.\n //\n // Example: A cluster might be [base character][dakuten]\n // - Base character contributes its width (calculated above)\n // - Dakuten contributes its width (calculated here)\n //\n // Why is this necessary? These forms are spacing characters, not combining marks.\n // They occupy their own column(s) even when following another character.\n //\n // Note: We only check trailing characters (segment.slice(1)).\n // The base character width was already calculated above.\n if (segment.length > 1) {\n for (const char of segment.slice(1)) {\n const charCode = char.charCodeAt(0)\n // Check if character is in Halfwidth and Fullwidth Forms range.\n if (charCode >= 0xff_00 && charCode <= 0xff_ef) {\n const trailingCodePoint = char.codePointAt(0)\n if (trailingCodePoint !== undefined) {\n // Add the East Asian Width of this trailing character.\n // Most halfwidth forms contribute 1 column, fullwidth contribute 2.\n width += eastAsianWidth(trailingCodePoint, eastAsianWidthOptions)\n }\n }\n }\n }\n }\n\n return width\n}\n\n/**\n * Convert a string to kebab-case (handles camelCase and snake_case).\n *\n * Transforms strings from camelCase or snake_case to kebab-case by:\n * - Converting uppercase letters to lowercase\n * - Inserting hyphens before uppercase letters (for camelCase)\n * - Replacing underscores with hyphens (for snake_case)\n *\n * This is more comprehensive than `camelToKebab()` as it handles mixed\n * formats including snake_case. Returns empty string for empty input.\n *\n * @param str - The string to convert\n * @returns The kebab-case string\n *\n * @example\n * ```ts\n * toKebabCase('helloWorld')\n * // Returns: 'hello-world'\n *\n * toKebabCase('hello_world')\n * // Returns: 'hello-world'\n *\n * toKebabCase('XMLHttpRequest')\n * // Returns: 'xmlhttp-request'\n *\n * toKebabCase('iOS_Version')\n * // Returns: 'io-s-version'\n *\n * toKebabCase('')\n * // Returns: ''\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function toKebabCase(str: string): string {\n if (!str.length) {\n return str\n }\n return (\n str\n // Convert camelCase to kebab-case\n .replace(/([a-z]+[0-9]*)([A-Z])/g, '$1-$2')\n // Convert underscores to hyphens\n .replace(/_/g, '-')\n .toLowerCase()\n )\n}\n\n/**\n * Trim newlines from the beginning and end of a string.\n *\n * Removes all leading and trailing newline characters (both `\\n` and `\\r`)\n * from a string, while preserving any newlines in the middle. This is similar\n * to `String.prototype.trim()` but specifically targets newlines instead of\n * all whitespace.\n *\n * Optimized for performance by checking the first and last characters before\n * doing any string manipulation. Returns the original string unchanged if no\n * newlines are found at the edges.\n *\n * @param str - The string to trim\n * @returns The string with leading and trailing newlines removed\n *\n * @example\n * ```ts\n * trimNewlines('\\n\\nhello\\n\\n')\n * // Returns: 'hello'\n *\n * trimNewlines('\\r\\nworld\\r\\n')\n * // Returns: 'world'\n *\n * trimNewlines('hello\\nworld')\n * // Returns: 'hello\\nworld' (middle newline preserved)\n *\n * trimNewlines(' hello ')\n * // Returns: ' hello ' (spaces not trimmed, only newlines)\n *\n * trimNewlines('hello')\n * // Returns: 'hello'\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function trimNewlines(str: string): string {\n const { length } = str\n if (length === 0) {\n return str\n }\n const first = str.charCodeAt(0)\n const noFirstNewline = first !== 13 /*'\\r'*/ && first !== 10 /*'\\n'*/\n if (length === 1) {\n return noFirstNewline ? str : ''\n }\n const last = str.charCodeAt(length - 1)\n const noLastNewline = last !== 13 /*'\\r'*/ && last !== 10 /*'\\n'*/\n if (noFirstNewline && noLastNewline) {\n return str\n }\n let start = 0\n let end = length\n while (start < end) {\n const code = str.charCodeAt(start)\n if (code !== 13 /*'\\r'*/ && code !== 10 /*'\\n'*/) {\n break\n }\n start += 1\n }\n while (end > start) {\n const code = str.charCodeAt(end - 1)\n if (code !== 13 /*'\\r'*/ && code !== 10 /*'\\n'*/) {\n break\n }\n end -= 1\n }\n return start === 0 && end === length ? str : str.slice(start, end)\n}\n\n/**\n * Repeat a string n times.\n *\n * Creates a new string by repeating the input string the specified number of times.\n * Returns an empty string if count is zero or negative. This is a simple wrapper\n * around `String.prototype.repeat()` with guard for non-positive counts.\n *\n * @param str - The string to repeat\n * @param count - The number of times to repeat the string\n * @returns The repeated string, or empty string if count <= 0\n *\n * @example\n * ```ts\n * repeatString('hello', 3)\n * // Returns: 'hellohellohello'\n *\n * repeatString('x', 5)\n * // Returns: 'xxxxx'\n *\n * repeatString('hello', 0)\n * // Returns: ''\n *\n * repeatString('hello', -1)\n * // Returns: ''\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function repeatString(str: string, count: number): string {\n if (count <= 0) {\n return ''\n }\n return str.repeat(count)\n}\n\n/**\n * Center text within a given width.\n *\n * Adds spaces before and after the text to center it within the specified width.\n * Distributes padding evenly on both sides. When the padding is odd, the extra\n * space is added to the right side. Strips ANSI codes before calculating text\n * length to ensure accurate centering of colored text.\n *\n * If the text is already wider than or equal to the target width, returns the\n * original text unchanged (no truncation occurs).\n *\n * @param text - The text to center (may include ANSI codes)\n * @param width - The target width in columns\n * @returns The centered text with padding\n *\n * @example\n * ```ts\n * centerText('hello', 11)\n * // Returns: ' hello ' (3 spaces on each side)\n *\n * centerText('hi', 10)\n * // Returns: ' hi ' (4 spaces on each side)\n *\n * centerText('odd', 8)\n * // Returns: ' odd ' (2 left, 3 right)\n *\n * centerText('\\x1b[31mred\\x1b[0m', 7)\n * // Returns: ' \\x1b[31mred\\x1b[0m ' (ANSI codes preserved, 'red' centered)\n *\n * centerText('too long text', 5)\n * // Returns: 'too long text' (no truncation, returned as-is)\n * ```\n */\n/*@__NO_SIDE_EFFECTS__*/\nexport function centerText(text: string, width: number): string {\n const textLength = stripAnsi(text).length\n if (textLength >= width) {\n return text\n }\n\n const padding = width - textLength\n const leftPad = Math.floor(padding / 2)\n const rightPad = padding - leftPad\n\n return ' '.repeat(leftPad) + text + ' '.repeat(rightPad)\n}\n"],
|
|
5
|
+
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAKA,kBAAqC;AACrC,kCAA+B;AAkBxB,MAAM,eAAe,OAAO;AAAA;AAkC5B,SAAS,gBACd,KACA,SACQ;AACR,QAAM,EAAE,SAAS,GAAG,IAAI;AAAA,IACtB,WAAW;AAAA,IACX,GAAG;AAAA,EACL;AACA,SAAO,OAAO,SACV,GAAG,MAAM,GAAG,IAAI,SAAS,IAAI,IAAI,IAAI,QAAQ,OAAO;AAAA,EAAK,MAAM,EAAE,IAAI,GAAG,KACxE;AACN;AAAA;AAgCO,SAAS,aAAa,KAAqB;AAChD,QAAM,EAAE,OAAO,IAAI;AACnB,MAAI,CAAC,QAAQ;AACX,WAAO;AAAA,EACT;AACA,MAAI,SAAS;AACb,MAAI,IAAI;AACR,SAAO,IAAI,QAAQ;AACjB,UAAM,OAAO,IAAI,CAAC;AAClB,QAAI,CAAC,MAAM;AACT;AAAA,IACF;AACA,UAAM,WAAW,KAAK,WAAW,CAAC;AAGlC,UAAM,cAAc,YAAY,MAAc,YAAY;AAC1D,QAAI,aAAa;AAEf,UAAI,OAAO,SAAS,GAAG;AACrB,kBAAU;AAAA,MACZ;AAEA,aAAO,IAAI,QAAQ;AACjB,cAAM,WAAW,IAAI,CAAC;AACtB,YAAI,CAAC,UAAU;AACb;AAAA,QACF;AACA,cAAM,eAAe,SAAS,WAAW,CAAC;AAC1C,cAAM,cACJ,gBAAgB,MAAc,gBAAgB;AAChD,YAAI,aAAa;AAEf,oBAAU;AAAA,YAAa,eAAe;AAAA;AAAA,UAAc;AACpD,eAAK;AAAA,QACP,OAAO;AAEL;AAAA,QACF;AAAA,MACF;AAAA,IACF,OAAO;AAEL,gBAAU;AACV,WAAK;AAAA,IACP;AAAA,EACF;AACA,SAAO;AACT;AAAA;AAkCO,SAAS,aACd,KACA,SACQ;AACR,QAAM,EAAE,QAAQ,EAAE,IAAI,EAAE,WAAW,MAAM,GAAG,QAAQ;AACpD,SAAO,IAAI,QAAQ,eAAe,IAAI,OAAO,KAAK,CAAC;AACrD;AAAA;AAkCO,SAAS,cAAc,OAAsC;AAClE,SAAO,OAAO,UAAU,aAAa,CAAC,MAAM,UAAU,QAAQ,KAAK,KAAK;AAC1E;AAAA;AAgCO,SAAS,iBACd,OACuC;AACvC,SAAO,OAAO,UAAU,YAAY,MAAM,SAAS;AACrD;AAAA;AA4CO,SAAS,OACd,KACA,QACA,SACQ;AACR,QAAM,EAAE,YAAY,EAAE,IAAI,EAAE,WAAW,MAAM,GAAG,QAAQ;AACxD,QAAM,EAAE,OAAO,IAAI;AACnB,MAAI,aAAa,QAAQ;AACvB,WAAO;AAAA,EACT;AACA,MAAI,cAAc,GAAG;AACnB,WAAO,IAAI,OAAO,MAAM;AAAA,EAC1B;AACA,QAAM,SAAS,YAAY,IAAI,KAAK,IAAI,SAAS,WAAW,CAAC,IAAI;AACjE,QAAM,SAAS,IAAI,MAAM,MAAM,EAAE,OAAO,MAAM;AAC9C,SAAO,WAAW,KAAK,KAAK,SAAS;AACvC;AAAA;AA8BO,SAAS,SAAS,KAAqB;AAG5C,SAAO,IAAI,SAAS,KAAK,IAAI,WAAW,CAAC,MAAM,QAAU,IAAI,MAAM,CAAC,IAAI;AAC1E;AAuBA,MAAM,YAAY,IAAI,KAAK,UAAU;AAyBrC,IAAI;AACJ,IAAI;AACJ,IAAI;AAEJ,IAAI;AASF,0BACE,WAAC,kFAA2E,GAAC;AAO/E,4BACE,WAAC,uFAA+E,GAAC;AAkBnF,eAAa,WAAC,oBAAgB,GAAC;AACjC,QAAQ;AAcN,0BACE,WAAC,mEAA6D,GAAC;AACjE,4BACE;AACF,eAAa,WAAC,gCAA4B,GAAC;AAC7C;AAAA;AAmFO,SAAS,YAAY,MAAsB;AAChD,MAAI,OAAO,SAAS,YAAY,CAAC,KAAK,QAAQ;AAC5C,WAAO;AAAA,EACT;AAKA,QAAM,gBAAY,uBAAU,IAAI;AAEhC,MAAI,CAAC,UAAU,QAAQ;AACrB,WAAO;AAAA,EACT;AAYA,MAAI,QAAQ;AAiBZ,QAAM,wBAAwB,EAAE,iBAAiB,MAAM;AAMvD,aAAW,EAAE,QAAQ,KAAK,UAAU,QAAQ,SAAS,GAAG;AAYtD,QAAI,sBAAsB,KAAK,OAAO,GAAG;AACvC;AAAA,IACF;AAiBA,QAAI,WAAW,KAAK,OAAO,GAAG;AAC5B,eAAS;AACT;AAAA,IACF;AAaA,UAAM,cAAc,QAAQ,QAAQ,yBAAyB,EAAE;AAC/D,UAAM,YAAY,YAAY,YAAY,CAAC;AAE3C,QAAI,cAAc,QAAW;AAG3B;AAAA,IACF;AAQA,iBAAS,4CAAe,WAAW,qBAAqB;AAwBxD,QAAI,QAAQ,SAAS,GAAG;AACtB,iBAAW,QAAQ,QAAQ,MAAM,CAAC,GAAG;AACnC,cAAM,WAAW,KAAK,WAAW,CAAC;AAElC,YAAI,YAAY,SAAW,YAAY,OAAS;AAC9C,gBAAM,oBAAoB,KAAK,YAAY,CAAC;AAC5C,cAAI,sBAAsB,QAAW;AAGnC,yBAAS,4CAAe,mBAAmB,qBAAqB;AAAA,UAClE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAAA;AAmCO,SAAS,YAAY,KAAqB;AAC/C,MAAI,CAAC,IAAI,QAAQ;AACf,WAAO;AAAA,EACT;AACA,SACE,IAEG,QAAQ,0BAA0B,OAAO,EAEzC,QAAQ,MAAM,GAAG,EACjB,YAAY;AAEnB;AAAA;AAoCO,SAAS,aAAa,KAAqB;AAChD,QAAM,EAAE,OAAO,IAAI;AACnB,MAAI,WAAW,GAAG;AAChB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,WAAW,CAAC;AAC9B,QAAM,iBAAiB,UAAU,MAAe,UAAU;AAC1D,MAAI,WAAW,GAAG;AAChB,WAAO,iBAAiB,MAAM;AAAA,EAChC;AACA,QAAM,OAAO,IAAI,WAAW,SAAS,CAAC;AACtC,QAAM,gBAAgB,SAAS,MAAe,SAAS;AACvD,MAAI,kBAAkB,eAAe;AACnC,WAAO;AAAA,EACT;AACA,MAAI,QAAQ;AACZ,MAAI,MAAM;AACV,SAAO,QAAQ,KAAK;AAClB,UAAM,OAAO,IAAI,WAAW,KAAK;AACjC,QAAI,SAAS,MAAe,SAAS,IAAa;AAChD;AAAA,IACF;AACA,aAAS;AAAA,EACX;AACA,SAAO,MAAM,OAAO;AAClB,UAAM,OAAO,IAAI,WAAW,MAAM,CAAC;AACnC,QAAI,SAAS,MAAe,SAAS,IAAa;AAChD;AAAA,IACF;AACA,WAAO;AAAA,EACT;AACA,SAAO,UAAU,KAAK,QAAQ,SAAS,MAAM,IAAI,MAAM,OAAO,GAAG;AACnE;AAAA;AA6BO,SAAS,aAAa,KAAa,OAAuB;AAC/D,MAAI,SAAS,GAAG;AACd,WAAO;AAAA,EACT;AACA,SAAO,IAAI,OAAO,KAAK;AACzB;AAAA;AAoCO,SAAS,WAAW,MAAc,OAAuB;AAC9D,QAAM,iBAAa,uBAAU,IAAI,EAAE;AACnC,MAAI,cAAc,OAAO;AACvB,WAAO;AAAA,EACT;AAEA,QAAM,UAAU,QAAQ;AACxB,QAAM,UAAU,KAAK,MAAM,UAAU,CAAC;AACtC,QAAM,WAAW,UAAU;AAE3B,SAAO,IAAI,OAAO,OAAO,IAAI,OAAO,IAAI,OAAO,QAAQ;AACzD;",
|
|
6
6
|
"names": []
|
|
7
7
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../src/suppress-warnings.ts"],
|
|
4
|
-
"sourcesContent": ["/**\n * @fileoverview Utilities to suppress specific process warnings.\n */\n\nconst { apply: ReflectApply } = Reflect\n\n// Store the original emitWarning function to avoid repeat wrapping.\nlet originalEmitWarning: typeof process.emitWarning | undefined\n\n// Track which warning types are currently suppressed.\nconst suppressedWarnings = new Set<string>()\n\n/**\n * Internal function to set up warning suppression.\n * Only wraps process.emitWarning once, regardless of how many times it's called.\n */\nfunction setupSuppression(): void {\n // Only wrap once - store the original on first call.\n if (!originalEmitWarning) {\n originalEmitWarning = process.emitWarning\n
|
|
5
|
-
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAIA,MAAM,EAAE,OAAO,aAAa,IAAI;AAGhC,IAAI;AAGJ,MAAM,qBAAqB,oBAAI,IAAY;AAM3C,SAAS,mBAAyB;AAEhC,MAAI,CAAC,qBAAqB;AACxB,0BAAsB,QAAQ;
|
|
4
|
+
"sourcesContent": ["/**\n * @fileoverview Utilities to suppress specific process warnings.\n */\n\nconst { apply: ReflectApply } = Reflect\n\n// Store the original emitWarning function to avoid repeat wrapping.\nlet originalEmitWarning: typeof process.emitWarning | undefined\n\n// Track which warning types are currently suppressed.\nconst suppressedWarnings = new Set<string>()\n\n/**\n * Internal function to set up warning suppression.\n * Only wraps process.emitWarning once, regardless of how many times it's called.\n */\nfunction setupSuppression(): void {\n // Only wrap once - store the original on first call.\n if (!originalEmitWarning) {\n originalEmitWarning = process.emitWarning\n process.emitWarning = (warning: string | Error, ...args: any[]) => {\n // Check both string warnings and warning objects.\n if (typeof warning === 'string') {\n // Check if any suppressed warning type matches.\n for (const suppressedType of suppressedWarnings) {\n if (warning.includes(suppressedType)) {\n return\n }\n }\n } else if (warning && typeof warning === 'object') {\n const warningObj = warning as { name?: string }\n if (warningObj.name && suppressedWarnings.has(warningObj.name)) {\n return\n }\n }\n // Not suppressed - call the original function.\n return ReflectApply(\n originalEmitWarning as typeof process.emitWarning,\n process,\n [warning, ...args],\n )\n }\n }\n}\n\n/**\n * Suppress MaxListenersExceededWarning messages.\n * This is useful in tests or scripts where multiple listeners are expected.\n *\n * @example\n * import { suppressMaxListenersWarning } from '@socketsecurity/registry/lib/suppress-warnings'\n *\n * suppressMaxListenersWarning()\n */\nexport function suppressMaxListenersWarning(): void {\n suppressedWarnings.add('MaxListenersExceededWarning')\n setupSuppression()\n}\n\n/**\n * Suppress all process warnings of a specific type.\n *\n * @param warningType - The warning type to suppress (e.g., 'DeprecationWarning', 'ExperimentalWarning')\n *\n * @example\n * import { suppressWarningType } from '@socketsecurity/registry/lib/suppress-warnings'\n *\n * suppressWarningType('ExperimentalWarning')\n */\nexport function suppressWarningType(warningType: string): void {\n suppressedWarnings.add(warningType)\n setupSuppression()\n}\n\n/**\n * Set max listeners on an EventTarget (like AbortSignal) to avoid TypeError.\n *\n * By manually setting `kMaxEventTargetListeners` on the target we avoid:\n * TypeError [ERR_INVALID_ARG_TYPE]: The \"emitter\" argument must be an\n * instance of EventEmitter or EventTarget. Received an instance of\n * AbortSignal\n *\n * in some patch releases of Node 18-23 when calling events.getMaxListeners().\n * See https://github.com/nodejs/node/pull/56807.\n *\n * Instead of calling events.setMaxListeners(n, target) we set the symbol\n * property directly to avoid depending on 'node:events' module.\n *\n * @param target - The EventTarget or AbortSignal to configure\n * @param maxListeners - Maximum number of listeners (defaults to 10, the Node.js default)\n *\n * @example\n * import { setMaxEventTargetListeners } from '@socketsecurity/registry/lib/suppress-warnings'\n *\n * const controller = new AbortController()\n * setMaxEventTargetListeners(controller.signal)\n */\nexport function setMaxEventTargetListeners(\n target: EventTarget | AbortSignal | undefined,\n maxListeners: number = 10,\n): void {\n if (!target) {\n return\n }\n const symbols = Object.getOwnPropertySymbols(target)\n const kMaxEventTargetListeners = symbols.find(\n s => s.description === 'events.maxEventTargetListeners',\n )\n if (kMaxEventTargetListeners) {\n // The default events.defaultMaxListeners value is 10.\n // https://nodejs.org/api/events.html#eventsdefaultmaxlisteners\n ;(target as any)[kMaxEventTargetListeners] = maxListeners\n }\n}\n\n/**\n * Restore the original process.emitWarning function.\n * Call this to re-enable all warnings after suppressing them.\n */\nexport function restoreWarnings(): void {\n if (originalEmitWarning) {\n process.emitWarning = originalEmitWarning\n originalEmitWarning = undefined\n suppressedWarnings.clear()\n }\n}\n\n/**\n * Suppress warnings temporarily within a callback.\n *\n * @param warningType - The warning type to suppress\n * @param callback - Function to execute with warnings suppressed\n * @returns The result of the callback\n *\n * @example\n * import { withSuppressedWarnings } from '@socketsecurity/registry/lib/suppress-warnings'\n *\n * const result = await withSuppressedWarnings('ExperimentalWarning', async () => {\n * // Code that triggers experimental warnings\n * return someValue\n * })\n */\nexport async function withSuppressedWarnings<T>(\n warningType: string,\n callback: () => T | Promise<T>,\n): Promise<T> {\n const original = process.emitWarning\n suppressWarningType(warningType)\n try {\n return await callback()\n } finally {\n process.emitWarning = original\n }\n}\n"],
|
|
5
|
+
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAIA,MAAM,EAAE,OAAO,aAAa,IAAI;AAGhC,IAAI;AAGJ,MAAM,qBAAqB,oBAAI,IAAY;AAM3C,SAAS,mBAAyB;AAEhC,MAAI,CAAC,qBAAqB;AACxB,0BAAsB,QAAQ;AAC9B,YAAQ,cAAc,CAAC,YAA4B,SAAgB;AAEjE,UAAI,OAAO,YAAY,UAAU;AAE/B,mBAAW,kBAAkB,oBAAoB;AAC/C,cAAI,QAAQ,SAAS,cAAc,GAAG;AACpC;AAAA,UACF;AAAA,QACF;AAAA,MACF,WAAW,WAAW,OAAO,YAAY,UAAU;AACjD,cAAM,aAAa;AACnB,YAAI,WAAW,QAAQ,mBAAmB,IAAI,WAAW,IAAI,GAAG;AAC9D;AAAA,QACF;AAAA,MACF;AAEA,aAAO;AAAA,QACL;AAAA,QACA;AAAA,QACA,CAAC,SAAS,GAAG,IAAI;AAAA,MACnB;AAAA,IACF;AAAA,EACF;AACF;AAWO,SAAS,8BAAoC;AAClD,qBAAmB,IAAI,6BAA6B;AACpD,mBAAiB;AACnB;AAYO,SAAS,oBAAoB,aAA2B;AAC7D,qBAAmB,IAAI,WAAW;AAClC,mBAAiB;AACnB;AAyBO,SAAS,2BACd,QACA,eAAuB,IACjB;AACN,MAAI,CAAC,QAAQ;AACX;AAAA,EACF;AACA,QAAM,UAAU,OAAO,sBAAsB,MAAM;AACnD,QAAM,2BAA2B,QAAQ;AAAA,IACvC,OAAK,EAAE,gBAAgB;AAAA,EACzB;AACA,MAAI,0BAA0B;AAG5B;AAAC,IAAC,OAAe,wBAAwB,IAAI;AAAA,EAC/C;AACF;AAMO,SAAS,kBAAwB;AACtC,MAAI,qBAAqB;AACvB,YAAQ,cAAc;AACtB,0BAAsB;AACtB,uBAAmB,MAAM;AAAA,EAC3B;AACF;AAiBA,eAAsB,uBACpB,aACA,UACY;AACZ,QAAM,WAAW,QAAQ;AACzB,sBAAoB,WAAW;AAC/B,MAAI;AACF,WAAO,MAAM,SAAS;AAAA,EACxB,UAAE;AACA,YAAQ,cAAc;AAAA,EACxB;AACF;",
|
|
6
6
|
"names": []
|
|
7
7
|
}
|
|
@@ -1,10 +1,229 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* @fileoverview Safe JSON parsing with validation.
|
|
2
|
+
* @fileoverview Safe JSON parsing with validation and security controls.
|
|
3
|
+
* Provides protection against prototype pollution, size limits, and schema validation.
|
|
4
|
+
*
|
|
5
|
+
* Key Features:
|
|
6
|
+
* - Prototype pollution protection: Blocks `__proto__`, `constructor`, and `prototype` keys
|
|
7
|
+
* - Size limits: Configurable maximum JSON string size (default 10MB)
|
|
8
|
+
* - Schema validation: Optional Zod-compatible schema validation
|
|
9
|
+
* - NDJSON support: Parse newline-delimited JSON streams
|
|
10
|
+
* - Memory safety: Prevents memory exhaustion attacks
|
|
3
11
|
*/
|
|
4
12
|
import type { JsonParseOptions, JsonParseResult, Schema } from './types';
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
13
|
+
/**
|
|
14
|
+
* Safely parse JSON with optional schema validation and security controls.
|
|
15
|
+
* Throws errors on parse failures, validation failures, or security violations.
|
|
16
|
+
*
|
|
17
|
+
* This is the recommended method for parsing untrusted JSON input as it provides
|
|
18
|
+
* multiple layers of security including prototype pollution protection and size limits.
|
|
19
|
+
*
|
|
20
|
+
* @template T - The expected type of the parsed data
|
|
21
|
+
* @param jsonString - The JSON string to parse
|
|
22
|
+
* @param schema - Optional Zod-compatible schema for validation
|
|
23
|
+
* @param options - Parsing options for security and behavior control
|
|
24
|
+
* @returns The parsed and validated data
|
|
25
|
+
*
|
|
26
|
+
* @throws {Error} When JSON string exceeds `maxSize`
|
|
27
|
+
* @throws {Error} When JSON parsing fails
|
|
28
|
+
* @throws {Error} When prototype pollution keys are detected (unless `allowPrototype` is `true`)
|
|
29
|
+
* @throws {Error} When schema validation fails
|
|
30
|
+
*
|
|
31
|
+
* @example
|
|
32
|
+
* ```ts
|
|
33
|
+
* // Basic parsing with type inference
|
|
34
|
+
* const data = safeJsonParse<User>('{"name":"Alice","age":30}')
|
|
35
|
+
*
|
|
36
|
+
* // With schema validation
|
|
37
|
+
* import { z } from 'zod'
|
|
38
|
+
* const userSchema = z.object({
|
|
39
|
+
* name: z.string(),
|
|
40
|
+
* age: z.number()
|
|
41
|
+
* })
|
|
42
|
+
* const user = safeJsonParse('{"name":"Alice","age":30}', userSchema)
|
|
43
|
+
*
|
|
44
|
+
* // With size limit
|
|
45
|
+
* const data = safeJsonParse(jsonString, undefined, {
|
|
46
|
+
* maxSize: 1024 * 1024 // 1MB
|
|
47
|
+
* })
|
|
48
|
+
*
|
|
49
|
+
* // Allow prototype keys (dangerous - only for trusted sources)
|
|
50
|
+
* const data = safeJsonParse(jsonString, undefined, {
|
|
51
|
+
* allowPrototype: true
|
|
52
|
+
* })
|
|
53
|
+
* ```
|
|
54
|
+
*/
|
|
55
|
+
export declare function safeJsonParse<T = unknown>(jsonString: string, schema?: Schema<T> | undefined, options?: JsonParseOptions): T;
|
|
56
|
+
/**
|
|
57
|
+
* Attempt to parse JSON, returning `undefined` on any error.
|
|
58
|
+
* This is a non-throwing wrapper around `safeJsonParse` for cases where
|
|
59
|
+
* you want to gracefully handle parse failures without try-catch blocks.
|
|
60
|
+
*
|
|
61
|
+
* Use this when parsing is optional or you have a fallback strategy.
|
|
62
|
+
* For critical parsing where you need error details, use `safeJsonParse` or `parseJsonWithResult`.
|
|
63
|
+
*
|
|
64
|
+
* @template T - The expected type of the parsed data
|
|
65
|
+
* @param jsonString - The JSON string to parse
|
|
66
|
+
* @param schema - Optional Zod-compatible schema for validation
|
|
67
|
+
* @param options - Parsing options for security and behavior control
|
|
68
|
+
* @returns The parsed data on success, or `undefined` on any error
|
|
69
|
+
*
|
|
70
|
+
* @example
|
|
71
|
+
* ```ts
|
|
72
|
+
* // Graceful fallback to default
|
|
73
|
+
* const config = tryJsonParse<Config>(jsonString) ?? defaultConfig
|
|
74
|
+
*
|
|
75
|
+
* // Optional parsing
|
|
76
|
+
* const data = tryJsonParse(possiblyInvalidJson)
|
|
77
|
+
* if (data) {
|
|
78
|
+
* console.log('Parsed successfully:', data)
|
|
79
|
+
* }
|
|
80
|
+
*
|
|
81
|
+
* // With schema validation
|
|
82
|
+
* const user = tryJsonParse(jsonString, userSchema)
|
|
83
|
+
* ```
|
|
84
|
+
*/
|
|
85
|
+
export declare function tryJsonParse<T = unknown>(jsonString: string, schema?: Schema<T> | undefined, options?: JsonParseOptions | undefined): T | undefined;
|
|
86
|
+
/**
|
|
87
|
+
* Parse JSON and return a discriminated union result.
|
|
88
|
+
* Never throws - always returns a result object with success/failure information.
|
|
89
|
+
*
|
|
90
|
+
* This is ideal when you need detailed error messages and type-safe result handling.
|
|
91
|
+
* The discriminated union allows TypeScript to narrow types based on the `success` flag.
|
|
92
|
+
*
|
|
93
|
+
* @template T - The expected type of the parsed data
|
|
94
|
+
* @param jsonString - The JSON string to parse
|
|
95
|
+
* @param schema - Optional Zod-compatible schema for validation
|
|
96
|
+
* @param options - Parsing options for security and behavior control
|
|
97
|
+
* @returns Result object with either `{success: true, data}` or `{success: false, error}`
|
|
98
|
+
*
|
|
99
|
+
* @example
|
|
100
|
+
* ```ts
|
|
101
|
+
* // Type-safe error handling
|
|
102
|
+
* const result = parseJsonWithResult<User>(jsonString, userSchema)
|
|
103
|
+
*
|
|
104
|
+
* if (result.success) {
|
|
105
|
+
* // TypeScript knows result.data is available
|
|
106
|
+
* console.log(`User: ${result.data.name}`)
|
|
107
|
+
* } else {
|
|
108
|
+
* // TypeScript knows result.error is available
|
|
109
|
+
* console.error(`Parse failed: ${result.error}`)
|
|
110
|
+
* }
|
|
111
|
+
*
|
|
112
|
+
* // Early return pattern
|
|
113
|
+
* const result = parseJsonWithResult(jsonString)
|
|
114
|
+
* if (!result.success) {
|
|
115
|
+
* logger.error(result.error)
|
|
116
|
+
* return
|
|
117
|
+
* }
|
|
118
|
+
* processData(result.data)
|
|
119
|
+
* ```
|
|
120
|
+
*/
|
|
121
|
+
export declare function parseJsonWithResult<T = unknown>(jsonString: string, schema?: Schema<T> | undefined, options?: JsonParseOptions | undefined): JsonParseResult<T>;
|
|
122
|
+
/**
|
|
123
|
+
* Create a reusable JSON parser with pre-configured schema and options.
|
|
124
|
+
* Useful for parsing multiple JSON strings with the same validation rules.
|
|
125
|
+
*
|
|
126
|
+
* The returned parser function can accept per-call options that override the defaults.
|
|
127
|
+
* This factory pattern reduces repetition when parsing many similar JSON payloads.
|
|
128
|
+
*
|
|
129
|
+
* @template T - The expected type of the parsed data
|
|
130
|
+
* @param schema - Optional Zod-compatible schema for validation
|
|
131
|
+
* @param defaultOptions - Default parsing options applied to all parse calls
|
|
132
|
+
* @returns A parser function that accepts a JSON string and optional per-call options
|
|
133
|
+
*
|
|
134
|
+
* @example
|
|
135
|
+
* ```ts
|
|
136
|
+
* // Create a parser for API responses
|
|
137
|
+
* import { z } from 'zod'
|
|
138
|
+
* const apiResponseSchema = z.object({
|
|
139
|
+
* status: z.string(),
|
|
140
|
+
* data: z.unknown()
|
|
141
|
+
* })
|
|
142
|
+
*
|
|
143
|
+
* const parseApiResponse = createJsonParser(apiResponseSchema, {
|
|
144
|
+
* maxSize: 5 * 1024 * 1024 // 5MB limit for API responses
|
|
145
|
+
* })
|
|
146
|
+
*
|
|
147
|
+
* // Use the parser multiple times
|
|
148
|
+
* const response1 = parseApiResponse(json1)
|
|
149
|
+
* const response2 = parseApiResponse(json2)
|
|
150
|
+
*
|
|
151
|
+
* // Override options for specific calls
|
|
152
|
+
* const response3 = parseApiResponse(json3, { maxSize: 10 * 1024 * 1024 })
|
|
153
|
+
* ```
|
|
154
|
+
*/
|
|
155
|
+
export declare function createJsonParser<T = unknown>(schema?: Schema<T> | undefined, defaultOptions?: JsonParseOptions | undefined): (jsonString: string, options?: JsonParseOptions) => T;
|
|
156
|
+
/**
|
|
157
|
+
* Parse newline-delimited JSON (NDJSON) into an array.
|
|
158
|
+
* Each line is treated as a separate JSON object. Empty lines are skipped.
|
|
159
|
+
*
|
|
160
|
+
* NDJSON format is commonly used for streaming logs, bulk data transfers,
|
|
161
|
+
* and event streams where each line represents a complete JSON document.
|
|
162
|
+
*
|
|
163
|
+
* @template T - The expected type of each parsed JSON object
|
|
164
|
+
* @param ndjson - Newline-delimited JSON string (supports both `\n` and `\r\n`)
|
|
165
|
+
* @param schema - Optional Zod-compatible schema for validation of each line
|
|
166
|
+
* @param options - Parsing options applied to each line
|
|
167
|
+
* @returns Array of parsed objects, one per non-empty line
|
|
168
|
+
*
|
|
169
|
+
* @throws {Error} When any line fails to parse (includes line number in error message)
|
|
170
|
+
*
|
|
171
|
+
* @example
|
|
172
|
+
* ```ts
|
|
173
|
+
* // Parse NDJSON logs
|
|
174
|
+
* const ndjsonString = `
|
|
175
|
+
* {"level":"info","message":"Server started"}
|
|
176
|
+
* {"level":"error","message":"Connection failed"}
|
|
177
|
+
* {"level":"info","message":"Retrying..."}
|
|
178
|
+
* `
|
|
179
|
+
* const logs = parseNdjson<LogEntry>(ndjsonString, logSchema)
|
|
180
|
+
* console.log(logs.length) // 3
|
|
181
|
+
*
|
|
182
|
+
* // Parse with size limits per line
|
|
183
|
+
* const entries = parseNdjson(ndjson, undefined, { maxSize: 1024 })
|
|
184
|
+
*
|
|
185
|
+
* // Empty lines are automatically skipped
|
|
186
|
+
* const data = parseNdjson('{"a":1}\n\n{"b":2}\n') // 2 objects
|
|
187
|
+
* ```
|
|
188
|
+
*/
|
|
189
|
+
export declare function parseNdjson<T = unknown>(ndjson: string, schema?: Schema<T> | undefined, options?: JsonParseOptions | undefined): T[];
|
|
190
|
+
/**
|
|
191
|
+
* Stream-parse newline-delimited JSON (NDJSON) using a generator.
|
|
192
|
+
* Yields one parsed object at a time, enabling memory-efficient processing of large NDJSON files.
|
|
193
|
+
*
|
|
194
|
+
* Unlike `parseNdjson` which loads all results into memory, this generator allows
|
|
195
|
+
* processing each line individually, making it ideal for large datasets or streaming scenarios.
|
|
196
|
+
*
|
|
197
|
+
* @template T - The expected type of each parsed JSON object
|
|
198
|
+
* @param ndjson - Newline-delimited JSON string (supports both `\n` and `\r\n`)
|
|
199
|
+
* @param schema - Optional Zod-compatible schema for validation of each line
|
|
200
|
+
* @param options - Parsing options applied to each line
|
|
201
|
+
* @yields Parsed objects one at a time as the generator iterates
|
|
202
|
+
*
|
|
203
|
+
* @throws {Error} When any line fails to parse (includes line number in error message)
|
|
204
|
+
*
|
|
205
|
+
* @example
|
|
206
|
+
* ```ts
|
|
207
|
+
* // Memory-efficient processing of large NDJSON files
|
|
208
|
+
* const ndjsonString = readLargeFile('logs.ndjson')
|
|
209
|
+
*
|
|
210
|
+
* for (const log of streamNdjson<LogEntry>(ndjsonString, logSchema)) {
|
|
211
|
+
* if (log.level === 'error') {
|
|
212
|
+
* console.error('Error found:', log.message)
|
|
213
|
+
* }
|
|
214
|
+
* }
|
|
215
|
+
*
|
|
216
|
+
* // Collect filtered results without loading everything
|
|
217
|
+
* const errors = [...streamNdjson(ndjson)]
|
|
218
|
+
* .filter(log => log.level === 'error')
|
|
219
|
+
*
|
|
220
|
+
* // Early termination when condition is met
|
|
221
|
+
* for (const entry of streamNdjson(ndjson)) {
|
|
222
|
+
* if (entry.id === targetId) {
|
|
223
|
+
* processEntry(entry)
|
|
224
|
+
* break // Stop processing remaining lines
|
|
225
|
+
* }
|
|
226
|
+
* }
|
|
227
|
+
* ```
|
|
228
|
+
*/
|
|
229
|
+
export declare function streamNdjson<T = unknown>(ndjson: string, schema?: Schema<T> | undefined, options?: JsonParseOptions | undefined): Generator<T, void, unknown>;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../../src/validation/json-parser.ts"],
|
|
4
|
-
"sourcesContent": ["/**\n * @fileoverview Safe JSON parsing with validation.\n */\n\nimport type { JsonParseOptions, JsonParseResult, Schema } from './types'\n\nconst { hasOwn: ObjectHasOwn } = Object\n\nexport function safeJsonParse<T = unknown>(\n jsonString: string,\n schema?: Schema<T>,\n options: JsonParseOptions = {},\n): T {\n const { allowPrototype = false, maxSize = 10 * 1024 * 1024 } = options\n\n // Check size limit\n const byteLength = Buffer.byteLength(jsonString, 'utf8')\n if (byteLength > maxSize) {\n throw new Error(\n `JSON string exceeds maximum size limit${maxSize !== 10 * 1024 * 1024 ? ` of ${maxSize} bytes` : ''}`,\n )\n }\n\n // Parse JSON\n let parsed: unknown\n try {\n parsed = JSON.parse(jsonString)\n } catch (error) {\n throw new Error(`Failed to parse JSON: ${error}`)\n }\n\n // Check for prototype pollution\n if (\n !allowPrototype &&\n typeof parsed === 'object' &&\n parsed !== null &&\n !Array.isArray(parsed)\n ) {\n const dangerous = ['__proto__', 'constructor', 'prototype']\n for (const key of dangerous) {\n if (ObjectHasOwn(parsed, key)) {\n throw new Error(\n 'JSON contains potentially malicious prototype pollution keys',\n )\n }\n }\n }\n\n // Validate against schema if provided\n if (schema) {\n const result = schema.safeParse(parsed)\n if (!result.success) {\n const errors = result.error.issues\n .map(\n (issue: { path: Array<string | number>; message: string }) =>\n `${issue.path.join('.')}: ${issue.message}`,\n )\n .join(', ')\n throw new Error(`Validation failed: ${errors}`)\n }\n return result.data as T\n }\n\n return parsed as T\n}\n\nexport function tryJsonParse<T = unknown>(\n jsonString: string,\n schema?: Schema<T>,\n options?: JsonParseOptions,\n): T | undefined {\n try {\n return safeJsonParse(jsonString, schema, options)\n } catch {\n return undefined\n }\n}\n\nexport function parseJsonWithResult<T = unknown>(\n jsonString: string,\n schema?: Schema<T>,\n options?: JsonParseOptions,\n): JsonParseResult<T> {\n try {\n const data = safeJsonParse(jsonString, schema, options)\n return { success: true, data }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : 'Unknown error'\n return { success: false, error: message }\n }\n}\n\nexport function createJsonParser<T = unknown>(\n schema?: Schema<T>,\n defaultOptions?: JsonParseOptions,\n) {\n return (jsonString: string, options?: JsonParseOptions): T => {\n return safeJsonParse(jsonString, schema, { ...defaultOptions, ...options })\n }\n}\n\nexport function parseNdjson<T = unknown>(\n ndjson: string,\n schema?: Schema<T>,\n options?: JsonParseOptions,\n): T[] {\n const results: T[] = []\n const lines = ndjson.split(/\\r?\\n/)\n\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i]?.trim()\n if (!line || line === '') {\n continue\n }\n\n try {\n const parsed = safeJsonParse<T>(line, schema, options)\n results.push(parsed)\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error)\n throw new Error(`Failed to parse NDJSON at line ${i + 1}: ${message}`)\n }\n }\n\n return results\n}\n\nexport function* streamNdjson<T = unknown>(\n ndjson: string,\n schema?: Schema<T>,\n options?: JsonParseOptions,\n): Generator<T, void, unknown> {\n const lines = ndjson.split(/\\r?\\n/)\n\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i]?.trim()\n if (!line || line === '') {\n continue\n }\n\n try {\n yield safeJsonParse<T>(line, schema, options)\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error)\n throw new Error(`Failed to parse NDJSON at line ${i + 1}: ${message}`)\n }\n }\n}\n"],
|
|
5
|
-
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;
|
|
4
|
+
"sourcesContent": ["/**\n * @fileoverview Safe JSON parsing with validation and security controls.\n * Provides protection against prototype pollution, size limits, and schema validation.\n *\n * Key Features:\n * - Prototype pollution protection: Blocks `__proto__`, `constructor`, and `prototype` keys\n * - Size limits: Configurable maximum JSON string size (default 10MB)\n * - Schema validation: Optional Zod-compatible schema validation\n * - NDJSON support: Parse newline-delimited JSON streams\n * - Memory safety: Prevents memory exhaustion attacks\n */\n\nimport type { JsonParseOptions, JsonParseResult, Schema } from './types'\n\nconst { hasOwn: ObjectHasOwn } = Object\n\n/**\n * Safely parse JSON with optional schema validation and security controls.\n * Throws errors on parse failures, validation failures, or security violations.\n *\n * This is the recommended method for parsing untrusted JSON input as it provides\n * multiple layers of security including prototype pollution protection and size limits.\n *\n * @template T - The expected type of the parsed data\n * @param jsonString - The JSON string to parse\n * @param schema - Optional Zod-compatible schema for validation\n * @param options - Parsing options for security and behavior control\n * @returns The parsed and validated data\n *\n * @throws {Error} When JSON string exceeds `maxSize`\n * @throws {Error} When JSON parsing fails\n * @throws {Error} When prototype pollution keys are detected (unless `allowPrototype` is `true`)\n * @throws {Error} When schema validation fails\n *\n * @example\n * ```ts\n * // Basic parsing with type inference\n * const data = safeJsonParse<User>('{\"name\":\"Alice\",\"age\":30}')\n *\n * // With schema validation\n * import { z } from 'zod'\n * const userSchema = z.object({\n * name: z.string(),\n * age: z.number()\n * })\n * const user = safeJsonParse('{\"name\":\"Alice\",\"age\":30}', userSchema)\n *\n * // With size limit\n * const data = safeJsonParse(jsonString, undefined, {\n * maxSize: 1024 * 1024 // 1MB\n * })\n *\n * // Allow prototype keys (dangerous - only for trusted sources)\n * const data = safeJsonParse(jsonString, undefined, {\n * allowPrototype: true\n * })\n * ```\n */\nexport function safeJsonParse<T = unknown>(\n jsonString: string,\n schema?: Schema<T> | undefined,\n options: JsonParseOptions = {},\n): T {\n const { allowPrototype = false, maxSize = 10 * 1024 * 1024 } = options\n\n // Check size limit\n const byteLength = Buffer.byteLength(jsonString, 'utf8')\n if (byteLength > maxSize) {\n throw new Error(\n `JSON string exceeds maximum size limit${maxSize !== 10 * 1024 * 1024 ? ` of ${maxSize} bytes` : ''}`,\n )\n }\n\n // Parse JSON\n let parsed: unknown\n try {\n parsed = JSON.parse(jsonString)\n } catch (error) {\n throw new Error(`Failed to parse JSON: ${error}`)\n }\n\n // Check for prototype pollution\n if (\n !allowPrototype &&\n typeof parsed === 'object' &&\n parsed !== null &&\n !Array.isArray(parsed)\n ) {\n const dangerous = ['__proto__', 'constructor', 'prototype']\n for (const key of dangerous) {\n if (ObjectHasOwn(parsed, key)) {\n throw new Error(\n 'JSON contains potentially malicious prototype pollution keys',\n )\n }\n }\n }\n\n // Validate against schema if provided\n if (schema) {\n const result = schema.safeParse(parsed)\n if (!result.success) {\n const errors = result.error.issues\n .map(\n (issue: { path: Array<string | number>; message: string }) =>\n `${issue.path.join('.')}: ${issue.message}`,\n )\n .join(', ')\n throw new Error(`Validation failed: ${errors}`)\n }\n return result.data as T\n }\n\n return parsed as T\n}\n\n/**\n * Attempt to parse JSON, returning `undefined` on any error.\n * This is a non-throwing wrapper around `safeJsonParse` for cases where\n * you want to gracefully handle parse failures without try-catch blocks.\n *\n * Use this when parsing is optional or you have a fallback strategy.\n * For critical parsing where you need error details, use `safeJsonParse` or `parseJsonWithResult`.\n *\n * @template T - The expected type of the parsed data\n * @param jsonString - The JSON string to parse\n * @param schema - Optional Zod-compatible schema for validation\n * @param options - Parsing options for security and behavior control\n * @returns The parsed data on success, or `undefined` on any error\n *\n * @example\n * ```ts\n * // Graceful fallback to default\n * const config = tryJsonParse<Config>(jsonString) ?? defaultConfig\n *\n * // Optional parsing\n * const data = tryJsonParse(possiblyInvalidJson)\n * if (data) {\n * console.log('Parsed successfully:', data)\n * }\n *\n * // With schema validation\n * const user = tryJsonParse(jsonString, userSchema)\n * ```\n */\nexport function tryJsonParse<T = unknown>(\n jsonString: string,\n schema?: Schema<T> | undefined,\n options?: JsonParseOptions | undefined,\n): T | undefined {\n try {\n return safeJsonParse(jsonString, schema, options)\n } catch {\n return undefined\n }\n}\n\n/**\n * Parse JSON and return a discriminated union result.\n * Never throws - always returns a result object with success/failure information.\n *\n * This is ideal when you need detailed error messages and type-safe result handling.\n * The discriminated union allows TypeScript to narrow types based on the `success` flag.\n *\n * @template T - The expected type of the parsed data\n * @param jsonString - The JSON string to parse\n * @param schema - Optional Zod-compatible schema for validation\n * @param options - Parsing options for security and behavior control\n * @returns Result object with either `{success: true, data}` or `{success: false, error}`\n *\n * @example\n * ```ts\n * // Type-safe error handling\n * const result = parseJsonWithResult<User>(jsonString, userSchema)\n *\n * if (result.success) {\n * // TypeScript knows result.data is available\n * console.log(`User: ${result.data.name}`)\n * } else {\n * // TypeScript knows result.error is available\n * console.error(`Parse failed: ${result.error}`)\n * }\n *\n * // Early return pattern\n * const result = parseJsonWithResult(jsonString)\n * if (!result.success) {\n * logger.error(result.error)\n * return\n * }\n * processData(result.data)\n * ```\n */\nexport function parseJsonWithResult<T = unknown>(\n jsonString: string,\n schema?: Schema<T> | undefined,\n options?: JsonParseOptions | undefined,\n): JsonParseResult<T> {\n try {\n const data = safeJsonParse(jsonString, schema, options)\n return { success: true, data }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : 'Unknown error'\n return { success: false, error: message }\n }\n}\n\n/**\n * Create a reusable JSON parser with pre-configured schema and options.\n * Useful for parsing multiple JSON strings with the same validation rules.\n *\n * The returned parser function can accept per-call options that override the defaults.\n * This factory pattern reduces repetition when parsing many similar JSON payloads.\n *\n * @template T - The expected type of the parsed data\n * @param schema - Optional Zod-compatible schema for validation\n * @param defaultOptions - Default parsing options applied to all parse calls\n * @returns A parser function that accepts a JSON string and optional per-call options\n *\n * @example\n * ```ts\n * // Create a parser for API responses\n * import { z } from 'zod'\n * const apiResponseSchema = z.object({\n * status: z.string(),\n * data: z.unknown()\n * })\n *\n * const parseApiResponse = createJsonParser(apiResponseSchema, {\n * maxSize: 5 * 1024 * 1024 // 5MB limit for API responses\n * })\n *\n * // Use the parser multiple times\n * const response1 = parseApiResponse(json1)\n * const response2 = parseApiResponse(json2)\n *\n * // Override options for specific calls\n * const response3 = parseApiResponse(json3, { maxSize: 10 * 1024 * 1024 })\n * ```\n */\nexport function createJsonParser<T = unknown>(\n schema?: Schema<T> | undefined,\n defaultOptions?: JsonParseOptions | undefined,\n) {\n return (jsonString: string, options?: JsonParseOptions | undefined): T => {\n return safeJsonParse(jsonString, schema, { ...defaultOptions, ...options })\n }\n}\n\n/**\n * Parse newline-delimited JSON (NDJSON) into an array.\n * Each line is treated as a separate JSON object. Empty lines are skipped.\n *\n * NDJSON format is commonly used for streaming logs, bulk data transfers,\n * and event streams where each line represents a complete JSON document.\n *\n * @template T - The expected type of each parsed JSON object\n * @param ndjson - Newline-delimited JSON string (supports both `\\n` and `\\r\\n`)\n * @param schema - Optional Zod-compatible schema for validation of each line\n * @param options - Parsing options applied to each line\n * @returns Array of parsed objects, one per non-empty line\n *\n * @throws {Error} When any line fails to parse (includes line number in error message)\n *\n * @example\n * ```ts\n * // Parse NDJSON logs\n * const ndjsonString = `\n * {\"level\":\"info\",\"message\":\"Server started\"}\n * {\"level\":\"error\",\"message\":\"Connection failed\"}\n * {\"level\":\"info\",\"message\":\"Retrying...\"}\n * `\n * const logs = parseNdjson<LogEntry>(ndjsonString, logSchema)\n * console.log(logs.length) // 3\n *\n * // Parse with size limits per line\n * const entries = parseNdjson(ndjson, undefined, { maxSize: 1024 })\n *\n * // Empty lines are automatically skipped\n * const data = parseNdjson('{\"a\":1}\\n\\n{\"b\":2}\\n') // 2 objects\n * ```\n */\nexport function parseNdjson<T = unknown>(\n ndjson: string,\n schema?: Schema<T> | undefined,\n options?: JsonParseOptions | undefined,\n): T[] {\n const results: T[] = []\n const lines = ndjson.split(/\\r?\\n/)\n\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i]?.trim()\n if (!line || line === '') {\n continue\n }\n\n try {\n const parsed = safeJsonParse<T>(line, schema, options)\n results.push(parsed)\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error)\n throw new Error(`Failed to parse NDJSON at line ${i + 1}: ${message}`)\n }\n }\n\n return results\n}\n\n/**\n * Stream-parse newline-delimited JSON (NDJSON) using a generator.\n * Yields one parsed object at a time, enabling memory-efficient processing of large NDJSON files.\n *\n * Unlike `parseNdjson` which loads all results into memory, this generator allows\n * processing each line individually, making it ideal for large datasets or streaming scenarios.\n *\n * @template T - The expected type of each parsed JSON object\n * @param ndjson - Newline-delimited JSON string (supports both `\\n` and `\\r\\n`)\n * @param schema - Optional Zod-compatible schema for validation of each line\n * @param options - Parsing options applied to each line\n * @yields Parsed objects one at a time as the generator iterates\n *\n * @throws {Error} When any line fails to parse (includes line number in error message)\n *\n * @example\n * ```ts\n * // Memory-efficient processing of large NDJSON files\n * const ndjsonString = readLargeFile('logs.ndjson')\n *\n * for (const log of streamNdjson<LogEntry>(ndjsonString, logSchema)) {\n * if (log.level === 'error') {\n * console.error('Error found:', log.message)\n * }\n * }\n *\n * // Collect filtered results without loading everything\n * const errors = [...streamNdjson(ndjson)]\n * .filter(log => log.level === 'error')\n *\n * // Early termination when condition is met\n * for (const entry of streamNdjson(ndjson)) {\n * if (entry.id === targetId) {\n * processEntry(entry)\n * break // Stop processing remaining lines\n * }\n * }\n * ```\n */\nexport function* streamNdjson<T = unknown>(\n ndjson: string,\n schema?: Schema<T> | undefined,\n options?: JsonParseOptions | undefined,\n): Generator<T, void, unknown> {\n const lines = ndjson.split(/\\r?\\n/)\n\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i]?.trim()\n if (!line || line === '') {\n continue\n }\n\n try {\n yield safeJsonParse<T>(line, schema, options)\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error)\n throw new Error(`Failed to parse NDJSON at line ${i + 1}: ${message}`)\n }\n }\n}\n"],
|
|
5
|
+
"mappings": ";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAcA,MAAM,EAAE,QAAQ,aAAa,IAAI;AA4C1B,SAAS,cACd,YACA,QACA,UAA4B,CAAC,GAC1B;AACH,QAAM,EAAE,iBAAiB,OAAO,UAAU,KAAK,OAAO,KAAK,IAAI;AAG/D,QAAM,aAAa,OAAO,WAAW,YAAY,MAAM;AACvD,MAAI,aAAa,SAAS;AACxB,UAAM,IAAI;AAAA,MACR,yCAAyC,YAAY,KAAK,OAAO,OAAO,OAAO,OAAO,WAAW,EAAE;AAAA,IACrG;AAAA,EACF;AAGA,MAAI;AACJ,MAAI;AACF,aAAS,KAAK,MAAM,UAAU;AAAA,EAChC,SAAS,OAAO;AACd,UAAM,IAAI,MAAM,yBAAyB,KAAK,EAAE;AAAA,EAClD;AAGA,MACE,CAAC,kBACD,OAAO,WAAW,YAClB,WAAW,QACX,CAAC,MAAM,QAAQ,MAAM,GACrB;AACA,UAAM,YAAY,CAAC,aAAa,eAAe,WAAW;AAC1D,eAAW,OAAO,WAAW;AAC3B,UAAI,aAAa,QAAQ,GAAG,GAAG;AAC7B,cAAM,IAAI;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,MAAI,QAAQ;AACV,UAAM,SAAS,OAAO,UAAU,MAAM;AACtC,QAAI,CAAC,OAAO,SAAS;AACnB,YAAM,SAAS,OAAO,MAAM,OACzB;AAAA,QACC,CAAC,UACC,GAAG,MAAM,KAAK,KAAK,GAAG,CAAC,KAAK,MAAM,OAAO;AAAA,MAC7C,EACC,KAAK,IAAI;AACZ,YAAM,IAAI,MAAM,sBAAsB,MAAM,EAAE;AAAA,IAChD;AACA,WAAO,OAAO;AAAA,EAChB;AAEA,SAAO;AACT;AA+BO,SAAS,aACd,YACA,QACA,SACe;AACf,MAAI;AACF,WAAO,cAAc,YAAY,QAAQ,OAAO;AAAA,EAClD,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAqCO,SAAS,oBACd,YACA,QACA,SACoB;AACpB,MAAI;AACF,UAAM,OAAO,cAAc,YAAY,QAAQ,OAAO;AACtD,WAAO,EAAE,SAAS,MAAM,KAAK;AAAA,EAC/B,SAAS,OAAgB;AACvB,UAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU;AACzD,WAAO,EAAE,SAAS,OAAO,OAAO,QAAQ;AAAA,EAC1C;AACF;AAmCO,SAAS,iBACd,QACA,gBACA;AACA,SAAO,CAAC,YAAoB,YAA8C;AACxE,WAAO,cAAc,YAAY,QAAQ,EAAE,GAAG,gBAAgB,GAAG,QAAQ,CAAC;AAAA,EAC5E;AACF;AAmCO,SAAS,YACd,QACA,QACA,SACK;AACL,QAAM,UAAe,CAAC;AACtB,QAAM,QAAQ,OAAO,MAAM,OAAO;AAElC,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,UAAM,OAAO,MAAM,CAAC,GAAG,KAAK;AAC5B,QAAI,CAAC,QAAQ,SAAS,IAAI;AACxB;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,cAAiB,MAAM,QAAQ,OAAO;AACrD,cAAQ,KAAK,MAAM;AAAA,IACrB,SAAS,OAAgB;AACvB,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,YAAM,IAAI,MAAM,kCAAkC,IAAI,CAAC,KAAK,OAAO,EAAE;AAAA,IACvE;AAAA,EACF;AAEA,SAAO;AACT;AAyCO,UAAU,aACf,QACA,QACA,SAC6B;AAC7B,QAAM,QAAQ,OAAO,MAAM,OAAO;AAElC,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,UAAM,OAAO,MAAM,CAAC,GAAG,KAAK;AAC5B,QAAI,CAAC,QAAQ,SAAS,IAAI;AACxB;AAAA,IACF;AAEA,QAAI;AACF,YAAM,cAAiB,MAAM,QAAQ,OAAO;AAAA,IAC9C,SAAS,OAAgB;AACvB,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,YAAM,IAAI,MAAM,kCAAkC,IAAI,CAAC,KAAK,OAAO,EAAE;AAAA,IACvE;AAAA,EACF;AACF;",
|
|
6
6
|
"names": []
|
|
7
7
|
}
|