@chaisser/regex-humanizer 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +191 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +20 -0
- package/dist/index.d.ts +20 -0
- package/dist/index.js +165 -0
- package/dist/index.js.map +1 -0
- package/package.json +48 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
explain: () => explain,
|
|
24
|
+
humanize: () => humanize
|
|
25
|
+
});
|
|
26
|
+
module.exports = __toCommonJS(index_exports);
|
|
27
|
+
var TOKENS = {
|
|
28
|
+
"^": { en: "start of string", tr: "sat\u0131r ba\u015F\u0131" },
|
|
29
|
+
"$": { en: "end of string", tr: "sat\u0131r sonu" },
|
|
30
|
+
".": { en: "any character", tr: "herhangi bir karakter" },
|
|
31
|
+
"*": { en: "zero or more times", tr: "s\u0131f\u0131r veya daha fazla kez" },
|
|
32
|
+
"+": { en: "one or more times", tr: "bir veya daha fazla kez" },
|
|
33
|
+
"?": { en: "zero or one time (optional)", tr: "s\u0131f\u0131r veya bir kez (opsiyonel)" },
|
|
34
|
+
"|": { en: "or", tr: "veya" },
|
|
35
|
+
"\\d": { en: "any digit", tr: "herhangi bir rakam" },
|
|
36
|
+
"\\D": { en: "any non-digit", tr: "herhangi bir rakam olmayan" },
|
|
37
|
+
"\\w": { en: "any word character", tr: "herhangi bir kelime karakteri" },
|
|
38
|
+
"\\W": { en: "any non-word character", tr: "herhangi bir kelime karakteri olmayan" },
|
|
39
|
+
"\\s": { en: "any whitespace", tr: "herhangi bir bo\u015Fluk karakteri" },
|
|
40
|
+
"\\S": { en: "any non-whitespace", tr: "herhangi bir bo\u015Fluk olmayan" },
|
|
41
|
+
"\\b": { en: "word boundary", tr: "kelime s\u0131n\u0131r\u0131" },
|
|
42
|
+
"\\B": { en: "non-word boundary", tr: "kelime s\u0131n\u0131r\u0131 de\u011Fil" },
|
|
43
|
+
"\\n": { en: "newline", tr: "yeni sat\u0131r" },
|
|
44
|
+
"\\r": { en: "carriage return", tr: "sat\u0131r ba\u015F\u0131" },
|
|
45
|
+
"\\t": { en: "tab", tr: "sekme" }
|
|
46
|
+
};
|
|
47
|
+
var CHARACTER_CLASSES = {
|
|
48
|
+
"[a-z]": { en: "any lowercase letter", tr: "herhangi bir k\xFC\xE7\xFCk harf" },
|
|
49
|
+
"[A-Z]": { en: "any uppercase letter", tr: "herhangi bir b\xFCy\xFCk harf" },
|
|
50
|
+
"[0-9]": { en: "any digit", tr: "herhangi bir rakam" },
|
|
51
|
+
"[a-zA-Z]": { en: "any letter", tr: "herhangi bir harf" },
|
|
52
|
+
"[0-9a-zA-Z]": { en: "any letter or digit", tr: "herhangi bir harf veya rakam" }
|
|
53
|
+
};
|
|
54
|
+
var QUANTIFIERS = {
|
|
55
|
+
"{n}": { en: "exactly n times", tr: "tam n kez" },
|
|
56
|
+
"{n,}": { en: "n or more times", tr: "n veya daha fazla kez" },
|
|
57
|
+
"{n,m}": { en: "between n and m times", tr: "n ile m aras\u0131 kez" }
|
|
58
|
+
};
|
|
59
|
+
function humanizeToken(token, locale) {
|
|
60
|
+
if (TOKENS[token]) {
|
|
61
|
+
return TOKENS[token][locale] || TOKENS[token].en;
|
|
62
|
+
}
|
|
63
|
+
if (CHARACTER_CLASSES[token]) {
|
|
64
|
+
return CHARACTER_CLASSES[token][locale] || CHARACTER_CLASSES[token].en;
|
|
65
|
+
}
|
|
66
|
+
const quantifierMatch = token.match(/^\{(\d+)(?:,(\d+))?\}$/);
|
|
67
|
+
if (quantifierMatch) {
|
|
68
|
+
const n = quantifierMatch[1];
|
|
69
|
+
const m = quantifierMatch[2];
|
|
70
|
+
if (m) {
|
|
71
|
+
return `${QUANTIFIERS["{n,m}"][locale].replace("n", n).replace("m", m)}`;
|
|
72
|
+
}
|
|
73
|
+
return `${QUANTIFIERS["{n,}"][locale].replace("n", n)}`;
|
|
74
|
+
}
|
|
75
|
+
return token;
|
|
76
|
+
}
|
|
77
|
+
function humanize(pattern, options = {}) {
|
|
78
|
+
const locale = options.locale || "en";
|
|
79
|
+
let result = "";
|
|
80
|
+
let i = 0;
|
|
81
|
+
while (i < pattern.length) {
|
|
82
|
+
const char = pattern[i];
|
|
83
|
+
if (char === "\\") {
|
|
84
|
+
if (i + 1 < pattern.length) {
|
|
85
|
+
const nextChar = pattern[i + 1];
|
|
86
|
+
const token2 = "\\" + nextChar;
|
|
87
|
+
const human = humanizeToken(token2, locale);
|
|
88
|
+
result += human !== token2 ? ` ${human} ` : token2;
|
|
89
|
+
i += 2;
|
|
90
|
+
continue;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
if (char === "[") {
|
|
94
|
+
const closingIndex = pattern.indexOf("]", i);
|
|
95
|
+
if (closingIndex !== -1) {
|
|
96
|
+
const charClass = pattern.slice(i, closingIndex + 1);
|
|
97
|
+
const human = humanizeToken(charClass, locale);
|
|
98
|
+
result += human !== charClass ? ` ${human} ` : charClass;
|
|
99
|
+
i = closingIndex + 1;
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
if (char === "{") {
|
|
104
|
+
const closingIndex = pattern.indexOf("}", i);
|
|
105
|
+
if (closingIndex !== -1) {
|
|
106
|
+
const quantifier = pattern.slice(i, closingIndex + 1);
|
|
107
|
+
const human = humanizeToken(quantifier, locale);
|
|
108
|
+
result += human !== quantifier ? ` ${human} ` : quantifier;
|
|
109
|
+
i = closingIndex + 1;
|
|
110
|
+
continue;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
if (char === "(") {
|
|
114
|
+
const closingIndex = pattern.indexOf(")", i);
|
|
115
|
+
if (closingIndex !== -1) {
|
|
116
|
+
const group = pattern.slice(i + 1, closingIndex);
|
|
117
|
+
if (group.startsWith("?")) {
|
|
118
|
+
if (group.startsWith("?:")) {
|
|
119
|
+
result += " (non-capturing group: ";
|
|
120
|
+
result += humanize(group.slice(2), options);
|
|
121
|
+
result += ")";
|
|
122
|
+
} else if (group.startsWith("?=")) {
|
|
123
|
+
result += " (positive lookahead: ";
|
|
124
|
+
result += humanize(group.slice(2), options);
|
|
125
|
+
result += ")";
|
|
126
|
+
} else if (group.startsWith("?!")) {
|
|
127
|
+
result += " (negative lookahead: ";
|
|
128
|
+
result += humanize(group.slice(2), options);
|
|
129
|
+
result += ")";
|
|
130
|
+
}
|
|
131
|
+
} else {
|
|
132
|
+
result += " (capturing group: ";
|
|
133
|
+
result += humanize(group, options);
|
|
134
|
+
result += ")";
|
|
135
|
+
}
|
|
136
|
+
i = closingIndex + 1;
|
|
137
|
+
continue;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
const token = TOKENS[char];
|
|
141
|
+
if (token) {
|
|
142
|
+
result += ` ${token[locale] || token.en} `;
|
|
143
|
+
} else if (char !== " " && char !== " " && char !== "\n") {
|
|
144
|
+
result += char;
|
|
145
|
+
}
|
|
146
|
+
i++;
|
|
147
|
+
}
|
|
148
|
+
return result.trim();
|
|
149
|
+
}
|
|
150
|
+
function explain(pattern, options = {}) {
|
|
151
|
+
const locale = options.locale || "en";
|
|
152
|
+
const description = humanize(pattern, options);
|
|
153
|
+
const breakdown = [];
|
|
154
|
+
let i = 0;
|
|
155
|
+
while (i < pattern.length) {
|
|
156
|
+
const char = pattern[i];
|
|
157
|
+
if (char === "\\") {
|
|
158
|
+
if (i + 1 < pattern.length) {
|
|
159
|
+
const token2 = "\\" + pattern[i + 1];
|
|
160
|
+
const human = humanizeToken(token2, locale);
|
|
161
|
+
if (human !== token2) {
|
|
162
|
+
breakdown.push(`${token2} \u2192 ${human}`);
|
|
163
|
+
}
|
|
164
|
+
i += 2;
|
|
165
|
+
continue;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
if (char === "[") {
|
|
169
|
+
const closingIndex = pattern.indexOf("]", i);
|
|
170
|
+
if (closingIndex !== -1) {
|
|
171
|
+
const charClass = pattern.slice(i, closingIndex + 1);
|
|
172
|
+
const human = humanizeToken(charClass, locale);
|
|
173
|
+
breakdown.push(`${charClass} \u2192 ${human}`);
|
|
174
|
+
i = closingIndex + 1;
|
|
175
|
+
continue;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
const token = TOKENS[char];
|
|
179
|
+
if (token) {
|
|
180
|
+
breakdown.push(`${char} \u2192 ${token[locale] || token.en}`);
|
|
181
|
+
}
|
|
182
|
+
i++;
|
|
183
|
+
}
|
|
184
|
+
return { pattern, description, breakdown };
|
|
185
|
+
}
|
|
186
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
187
|
+
0 && (module.exports = {
|
|
188
|
+
explain,
|
|
189
|
+
humanize
|
|
190
|
+
});
|
|
191
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["interface HumanizeOptions {\n locale?: 'en' | 'tr';\n}\n\nconst TOKENS: Record<string, { en: string; tr: string }> = {\n '^': { en: 'start of string', tr: 'satır başı' },\n '$': { en: 'end of string', tr: 'satır sonu' },\n '.': { en: 'any character', tr: 'herhangi bir karakter' },\n '*': { en: 'zero or more times', tr: 'sıfır veya daha fazla kez' },\n '+': { en: 'one or more times', tr: 'bir veya daha fazla kez' },\n '?': { en: 'zero or one time (optional)', tr: 'sıfır veya bir kez (opsiyonel)' },\n '|': { en: 'or', tr: 'veya' },\n '\\\\d': { en: 'any digit', tr: 'herhangi bir rakam' },\n '\\\\D': { en: 'any non-digit', tr: 'herhangi bir rakam olmayan' },\n '\\\\w': { en: 'any word character', tr: 'herhangi bir kelime karakteri' },\n '\\\\W': { en: 'any non-word character', tr: 'herhangi bir kelime karakteri olmayan' },\n '\\\\s': { en: 'any whitespace', tr: 'herhangi bir boşluk karakteri' },\n '\\\\S': { en: 'any non-whitespace', tr: 'herhangi bir boşluk olmayan' },\n '\\\\b': { en: 'word boundary', tr: 'kelime sınırı' },\n '\\\\B': { en: 'non-word boundary', tr: 'kelime sınırı değil' },\n '\\\\n': { en: 'newline', tr: 'yeni satır' },\n '\\\\r': { en: 'carriage return', tr: 'satır başı' },\n '\\\\t': { en: 'tab', tr: 'sekme' },\n};\n\nconst CHARACTER_CLASSES: Record<string, { en: string; tr: string }> = {\n '[a-z]': { en: 'any lowercase letter', tr: 'herhangi bir küçük harf' },\n '[A-Z]': { en: 'any uppercase letter', tr: 'herhangi bir büyük harf' },\n '[0-9]': { en: 'any digit', tr: 'herhangi bir rakam' },\n '[a-zA-Z]': { en: 'any letter', tr: 'herhangi bir harf' },\n '[0-9a-zA-Z]': { en: 'any letter or digit', tr: 'herhangi bir harf veya rakam' },\n};\n\nconst QUANTIFIERS: Record<string, { en: string; tr: string }> = {\n '{n}': { en: 'exactly n times', tr: 'tam n kez' },\n '{n,}': { en: 'n or more times', tr: 'n veya daha fazla kez' },\n '{n,m}': { en: 'between n and m times', tr: 'n ile m arası kez' },\n};\n\nfunction humanizeToken(token: string, locale: string): string {\n if (TOKENS[token]) {\n return TOKENS[token][locale as 'en' | 'tr'] || TOKENS[token].en;\n }\n\n if (CHARACTER_CLASSES[token]) {\n return CHARACTER_CLASSES[token][locale as 'en' | 'tr'] || CHARACTER_CLASSES[token].en;\n }\n\n const quantifierMatch = token.match(/^\\{(\\d+)(?:,(\\d+))?\\}$/);\n if (quantifierMatch) {\n const n = quantifierMatch[1];\n const m = quantifierMatch[2];\n if (m) {\n return `${QUANTIFIERS['{n,m}'][locale as 'en' | 'tr'].replace('n', n).replace('m', m)}`;\n }\n return `${QUANTIFIERS['{n,}'][locale as 'en' | 'tr'].replace('n', n)}`;\n }\n\n return token;\n}\n\n/**\n * Convert a regex pattern to a human readable description\n * @param pattern - Regex pattern string\n * @param options - Options for humanization\n * @returns Human readable description\n */\nexport function humanize(pattern: string, options: HumanizeOptions = {}): string {\n const locale = options.locale || 'en';\n let result = '';\n\n let i = 0;\n while (i < pattern.length) {\n const char = pattern[i];\n\n if (char === '\\\\') {\n if (i + 1 < pattern.length) {\n const nextChar = pattern[i + 1];\n const token = '\\\\' + nextChar;\n const human = humanizeToken(token, locale);\n result += human !== token ? ` ${human} ` : token;\n i += 2;\n continue;\n }\n }\n\n if (char === '[') {\n const closingIndex = pattern.indexOf(']', i);\n if (closingIndex !== -1) {\n const charClass = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(charClass, locale);\n result += human !== charClass ? ` ${human} ` : charClass;\n i = closingIndex + 1;\n continue;\n }\n }\n\n if (char === '{') {\n const closingIndex = pattern.indexOf('}', i);\n if (closingIndex !== -1) {\n const quantifier = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(quantifier, locale);\n result += human !== quantifier ? ` ${human} ` : quantifier;\n i = closingIndex + 1;\n continue;\n }\n }\n\n if (char === '(') {\n const closingIndex = pattern.indexOf(')', i);\n if (closingIndex !== -1) {\n const group = pattern.slice(i + 1, closingIndex);\n if (group.startsWith('?')) {\n if (group.startsWith('?:')) {\n result += ' (non-capturing group: ';\n result += humanize(group.slice(2), options);\n result += ')';\n } else if (group.startsWith('?=')) {\n result += ' (positive lookahead: ';\n result += humanize(group.slice(2), options);\n result += ')';\n } else if (group.startsWith('?!')) {\n result += ' (negative lookahead: ';\n result += humanize(group.slice(2), options);\n result += ')';\n }\n } else {\n result += ' (capturing group: ';\n result += humanize(group, options);\n result += ')';\n }\n i = closingIndex + 1;\n continue;\n }\n }\n\n const token = TOKENS[char];\n if (token) {\n result += ` ${token[locale as 'en' | 'tr'] || token.en} `;\n } else if (char !== ' ' && char !== '\\t' && char !== '\\n') {\n result += char;\n }\n\n i++;\n }\n\n return result.trim();\n}\n\n/**\n * Create a human readable regex explanation with examples\n */\nexport function explain(pattern: string, options: HumanizeOptions = {}): {\n pattern: string;\n description: string;\n breakdown: string[];\n} {\n const locale = options.locale || 'en';\n const description = humanize(pattern, options);\n\n const breakdown: string[] = [];\n\n let i = 0;\n while (i < pattern.length) {\n const char = pattern[i];\n\n if (char === '\\\\') {\n if (i + 1 < pattern.length) {\n const token = '\\\\' + pattern[i + 1];\n const human = humanizeToken(token, locale);\n if (human !== token) {\n breakdown.push(`${token} → ${human}`);\n }\n i += 2;\n continue;\n }\n }\n\n if (char === '[') {\n const closingIndex = pattern.indexOf(']', i);\n if (closingIndex !== -1) {\n const charClass = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(charClass, locale);\n breakdown.push(`${charClass} → ${human}`);\n i = closingIndex + 1;\n continue;\n }\n }\n\n const token = TOKENS[char];\n if (token) {\n breakdown.push(`${char} → ${token[locale as 'en' | 'tr'] || token.en}`);\n }\n\n i++;\n }\n\n return { pattern, description, breakdown };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAIA,IAAM,SAAqD;AAAA,EACzD,KAAK,EAAE,IAAI,mBAAmB,IAAI,4BAAa;AAAA,EAC/C,KAAK,EAAE,IAAI,iBAAiB,IAAI,kBAAa;AAAA,EAC7C,KAAK,EAAE,IAAI,iBAAiB,IAAI,wBAAwB;AAAA,EACxD,KAAK,EAAE,IAAI,sBAAsB,IAAI,sCAA4B;AAAA,EACjE,KAAK,EAAE,IAAI,qBAAqB,IAAI,0BAA0B;AAAA,EAC9D,KAAK,EAAE,IAAI,+BAA+B,IAAI,2CAAiC;AAAA,EAC/E,KAAK,EAAE,IAAI,MAAM,IAAI,OAAO;AAAA,EAC5B,OAAO,EAAE,IAAI,aAAa,IAAI,qBAAqB;AAAA,EACnD,OAAO,EAAE,IAAI,iBAAiB,IAAI,6BAA6B;AAAA,EAC/D,OAAO,EAAE,IAAI,sBAAsB,IAAI,gCAAgC;AAAA,EACvE,OAAO,EAAE,IAAI,0BAA0B,IAAI,wCAAwC;AAAA,EACnF,OAAO,EAAE,IAAI,kBAAkB,IAAI,qCAAgC;AAAA,EACnE,OAAO,EAAE,IAAI,sBAAsB,IAAI,mCAA8B;AAAA,EACrE,OAAO,EAAE,IAAI,iBAAiB,IAAI,+BAAgB;AAAA,EAClD,OAAO,EAAE,IAAI,qBAAqB,IAAI,0CAAsB;AAAA,EAC5D,OAAO,EAAE,IAAI,WAAW,IAAI,kBAAa;AAAA,EACzC,OAAO,EAAE,IAAI,mBAAmB,IAAI,4BAAa;AAAA,EACjD,OAAO,EAAE,IAAI,OAAO,IAAI,QAAQ;AAClC;AAEA,IAAM,oBAAgE;AAAA,EACpE,SAAS,EAAE,IAAI,wBAAwB,IAAI,mCAA0B;AAAA,EACrE,SAAS,EAAE,IAAI,wBAAwB,IAAI,gCAA0B;AAAA,EACrE,SAAS,EAAE,IAAI,aAAa,IAAI,qBAAqB;AAAA,EACrD,YAAY,EAAE,IAAI,cAAc,IAAI,oBAAoB;AAAA,EACxD,eAAe,EAAE,IAAI,uBAAuB,IAAI,+BAA+B;AACjF;AAEA,IAAM,cAA0D;AAAA,EAC9D,OAAO,EAAE,IAAI,mBAAmB,IAAI,YAAY;AAAA,EAChD,QAAQ,EAAE,IAAI,mBAAmB,IAAI,wBAAwB;AAAA,EAC7D,SAAS,EAAE,IAAI,yBAAyB,IAAI,yBAAoB;AAClE;AAEA,SAAS,cAAc,OAAe,QAAwB;AAC5D,MAAI,OAAO,KAAK,GAAG;AACjB,WAAO,OAAO,KAAK,EAAE,MAAqB,KAAK,OAAO,KAAK,EAAE;AAAA,EAC/D;AAEA,MAAI,kBAAkB,KAAK,GAAG;AAC5B,WAAO,kBAAkB,KAAK,EAAE,MAAqB,KAAK,kBAAkB,KAAK,EAAE;AAAA,EACrF;AAEA,QAAM,kBAAkB,MAAM,MAAM,wBAAwB;AAC5D,MAAI,iBAAiB;AACnB,UAAM,IAAI,gBAAgB,CAAC;AAC3B,UAAM,IAAI,gBAAgB,CAAC;AAC3B,QAAI,GAAG;AACL,aAAO,GAAG,YAAY,OAAO,EAAE,MAAqB,EAAE,QAAQ,KAAK,CAAC,EAAE,QAAQ,KAAK,CAAC,CAAC;AAAA,IACvF;AACA,WAAO,GAAG,YAAY,MAAM,EAAE,MAAqB,EAAE,QAAQ,KAAK,CAAC,CAAC;AAAA,EACtE;AAEA,SAAO;AACT;AAQO,SAAS,SAAS,SAAiB,UAA2B,CAAC,GAAW;AAC/E,QAAM,SAAS,QAAQ,UAAU;AACjC,MAAI,SAAS;AAEb,MAAI,IAAI;AACR,SAAO,IAAI,QAAQ,QAAQ;AACzB,UAAM,OAAO,QAAQ,CAAC;AAEtB,QAAI,SAAS,MAAM;AACjB,UAAI,IAAI,IAAI,QAAQ,QAAQ;AAC1B,cAAM,WAAW,QAAQ,IAAI,CAAC;AAC9B,cAAMA,SAAQ,OAAO;AACrB,cAAM,QAAQ,cAAcA,QAAO,MAAM;AACzC,kBAAU,UAAUA,SAAQ,IAAI,KAAK,MAAMA;AAC3C,aAAK;AACL;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,YAAY,QAAQ,MAAM,GAAG,eAAe,CAAC;AACnD,cAAM,QAAQ,cAAc,WAAW,MAAM;AAC7C,kBAAU,UAAU,YAAY,IAAI,KAAK,MAAM;AAC/C,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,aAAa,QAAQ,MAAM,GAAG,eAAe,CAAC;AACpD,cAAM,QAAQ,cAAc,YAAY,MAAM;AAC9C,kBAAU,UAAU,aAAa,IAAI,KAAK,MAAM;AAChD,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,QAAQ,QAAQ,MAAM,IAAI,GAAG,YAAY;AAC/C,YAAI,MAAM,WAAW,GAAG,GAAG;AACzB,cAAI,MAAM,WAAW,IAAI,GAAG;AAC1B,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ,WAAW,MAAM,WAAW,IAAI,GAAG;AACjC,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ,WAAW,MAAM,WAAW,IAAI,GAAG;AACjC,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ;AAAA,QACF,OAAO;AACL,oBAAU;AACV,oBAAU,SAAS,OAAO,OAAO;AACjC,oBAAU;AAAA,QACZ;AACA,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,QAAQ,OAAO,IAAI;AACzB,QAAI,OAAO;AACT,gBAAU,IAAI,MAAM,MAAqB,KAAK,MAAM,EAAE;AAAA,IACxD,WAAW,SAAS,OAAO,SAAS,OAAQ,SAAS,MAAM;AACzD,gBAAU;AAAA,IACZ;AAEA;AAAA,EACF;AAEA,SAAO,OAAO,KAAK;AACrB;AAKO,SAAS,QAAQ,SAAiB,UAA2B,CAAC,GAInE;AACA,QAAM,SAAS,QAAQ,UAAU;AACjC,QAAM,cAAc,SAAS,SAAS,OAAO;AAE7C,QAAM,YAAsB,CAAC;AAE7B,MAAI,IAAI;AACR,SAAO,IAAI,QAAQ,QAAQ;AACzB,UAAM,OAAO,QAAQ,CAAC;AAEtB,QAAI,SAAS,MAAM;AACjB,UAAI,IAAI,IAAI,QAAQ,QAAQ;AAC1B,cAAMA,SAAQ,OAAO,QAAQ,IAAI,CAAC;AAClC,cAAM,QAAQ,cAAcA,QAAO,MAAM;AACzC,YAAI,UAAUA,QAAO;AACnB,oBAAU,KAAK,GAAGA,MAAK,WAAM,KAAK,EAAE;AAAA,QACtC;AACA,aAAK;AACL;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,YAAY,QAAQ,MAAM,GAAG,eAAe,CAAC;AACnD,cAAM,QAAQ,cAAc,WAAW,MAAM;AAC7C,kBAAU,KAAK,GAAG,SAAS,WAAM,KAAK,EAAE;AACxC,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,QAAQ,OAAO,IAAI;AACzB,QAAI,OAAO;AACT,gBAAU,KAAK,GAAG,IAAI,WAAM,MAAM,MAAqB,KAAK,MAAM,EAAE,EAAE;AAAA,IACxE;AAEA;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,aAAa,UAAU;AAC3C;","names":["token"]}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
interface HumanizeOptions {
|
|
2
|
+
locale?: 'en' | 'tr';
|
|
3
|
+
}
|
|
4
|
+
/**
|
|
5
|
+
* Convert a regex pattern to a human readable description
|
|
6
|
+
* @param pattern - Regex pattern string
|
|
7
|
+
* @param options - Options for humanization
|
|
8
|
+
* @returns Human readable description
|
|
9
|
+
*/
|
|
10
|
+
declare function humanize(pattern: string, options?: HumanizeOptions): string;
|
|
11
|
+
/**
|
|
12
|
+
* Create a human readable regex explanation with examples
|
|
13
|
+
*/
|
|
14
|
+
declare function explain(pattern: string, options?: HumanizeOptions): {
|
|
15
|
+
pattern: string;
|
|
16
|
+
description: string;
|
|
17
|
+
breakdown: string[];
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export { explain, humanize };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
interface HumanizeOptions {
|
|
2
|
+
locale?: 'en' | 'tr';
|
|
3
|
+
}
|
|
4
|
+
/**
|
|
5
|
+
* Convert a regex pattern to a human readable description
|
|
6
|
+
* @param pattern - Regex pattern string
|
|
7
|
+
* @param options - Options for humanization
|
|
8
|
+
* @returns Human readable description
|
|
9
|
+
*/
|
|
10
|
+
declare function humanize(pattern: string, options?: HumanizeOptions): string;
|
|
11
|
+
/**
|
|
12
|
+
* Create a human readable regex explanation with examples
|
|
13
|
+
*/
|
|
14
|
+
declare function explain(pattern: string, options?: HumanizeOptions): {
|
|
15
|
+
pattern: string;
|
|
16
|
+
description: string;
|
|
17
|
+
breakdown: string[];
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export { explain, humanize };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
var TOKENS = {
|
|
3
|
+
"^": { en: "start of string", tr: "sat\u0131r ba\u015F\u0131" },
|
|
4
|
+
"$": { en: "end of string", tr: "sat\u0131r sonu" },
|
|
5
|
+
".": { en: "any character", tr: "herhangi bir karakter" },
|
|
6
|
+
"*": { en: "zero or more times", tr: "s\u0131f\u0131r veya daha fazla kez" },
|
|
7
|
+
"+": { en: "one or more times", tr: "bir veya daha fazla kez" },
|
|
8
|
+
"?": { en: "zero or one time (optional)", tr: "s\u0131f\u0131r veya bir kez (opsiyonel)" },
|
|
9
|
+
"|": { en: "or", tr: "veya" },
|
|
10
|
+
"\\d": { en: "any digit", tr: "herhangi bir rakam" },
|
|
11
|
+
"\\D": { en: "any non-digit", tr: "herhangi bir rakam olmayan" },
|
|
12
|
+
"\\w": { en: "any word character", tr: "herhangi bir kelime karakteri" },
|
|
13
|
+
"\\W": { en: "any non-word character", tr: "herhangi bir kelime karakteri olmayan" },
|
|
14
|
+
"\\s": { en: "any whitespace", tr: "herhangi bir bo\u015Fluk karakteri" },
|
|
15
|
+
"\\S": { en: "any non-whitespace", tr: "herhangi bir bo\u015Fluk olmayan" },
|
|
16
|
+
"\\b": { en: "word boundary", tr: "kelime s\u0131n\u0131r\u0131" },
|
|
17
|
+
"\\B": { en: "non-word boundary", tr: "kelime s\u0131n\u0131r\u0131 de\u011Fil" },
|
|
18
|
+
"\\n": { en: "newline", tr: "yeni sat\u0131r" },
|
|
19
|
+
"\\r": { en: "carriage return", tr: "sat\u0131r ba\u015F\u0131" },
|
|
20
|
+
"\\t": { en: "tab", tr: "sekme" }
|
|
21
|
+
};
|
|
22
|
+
var CHARACTER_CLASSES = {
|
|
23
|
+
"[a-z]": { en: "any lowercase letter", tr: "herhangi bir k\xFC\xE7\xFCk harf" },
|
|
24
|
+
"[A-Z]": { en: "any uppercase letter", tr: "herhangi bir b\xFCy\xFCk harf" },
|
|
25
|
+
"[0-9]": { en: "any digit", tr: "herhangi bir rakam" },
|
|
26
|
+
"[a-zA-Z]": { en: "any letter", tr: "herhangi bir harf" },
|
|
27
|
+
"[0-9a-zA-Z]": { en: "any letter or digit", tr: "herhangi bir harf veya rakam" }
|
|
28
|
+
};
|
|
29
|
+
var QUANTIFIERS = {
|
|
30
|
+
"{n}": { en: "exactly n times", tr: "tam n kez" },
|
|
31
|
+
"{n,}": { en: "n or more times", tr: "n veya daha fazla kez" },
|
|
32
|
+
"{n,m}": { en: "between n and m times", tr: "n ile m aras\u0131 kez" }
|
|
33
|
+
};
|
|
34
|
+
function humanizeToken(token, locale) {
|
|
35
|
+
if (TOKENS[token]) {
|
|
36
|
+
return TOKENS[token][locale] || TOKENS[token].en;
|
|
37
|
+
}
|
|
38
|
+
if (CHARACTER_CLASSES[token]) {
|
|
39
|
+
return CHARACTER_CLASSES[token][locale] || CHARACTER_CLASSES[token].en;
|
|
40
|
+
}
|
|
41
|
+
const quantifierMatch = token.match(/^\{(\d+)(?:,(\d+))?\}$/);
|
|
42
|
+
if (quantifierMatch) {
|
|
43
|
+
const n = quantifierMatch[1];
|
|
44
|
+
const m = quantifierMatch[2];
|
|
45
|
+
if (m) {
|
|
46
|
+
return `${QUANTIFIERS["{n,m}"][locale].replace("n", n).replace("m", m)}`;
|
|
47
|
+
}
|
|
48
|
+
return `${QUANTIFIERS["{n,}"][locale].replace("n", n)}`;
|
|
49
|
+
}
|
|
50
|
+
return token;
|
|
51
|
+
}
|
|
52
|
+
function humanize(pattern, options = {}) {
|
|
53
|
+
const locale = options.locale || "en";
|
|
54
|
+
let result = "";
|
|
55
|
+
let i = 0;
|
|
56
|
+
while (i < pattern.length) {
|
|
57
|
+
const char = pattern[i];
|
|
58
|
+
if (char === "\\") {
|
|
59
|
+
if (i + 1 < pattern.length) {
|
|
60
|
+
const nextChar = pattern[i + 1];
|
|
61
|
+
const token2 = "\\" + nextChar;
|
|
62
|
+
const human = humanizeToken(token2, locale);
|
|
63
|
+
result += human !== token2 ? ` ${human} ` : token2;
|
|
64
|
+
i += 2;
|
|
65
|
+
continue;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
if (char === "[") {
|
|
69
|
+
const closingIndex = pattern.indexOf("]", i);
|
|
70
|
+
if (closingIndex !== -1) {
|
|
71
|
+
const charClass = pattern.slice(i, closingIndex + 1);
|
|
72
|
+
const human = humanizeToken(charClass, locale);
|
|
73
|
+
result += human !== charClass ? ` ${human} ` : charClass;
|
|
74
|
+
i = closingIndex + 1;
|
|
75
|
+
continue;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
if (char === "{") {
|
|
79
|
+
const closingIndex = pattern.indexOf("}", i);
|
|
80
|
+
if (closingIndex !== -1) {
|
|
81
|
+
const quantifier = pattern.slice(i, closingIndex + 1);
|
|
82
|
+
const human = humanizeToken(quantifier, locale);
|
|
83
|
+
result += human !== quantifier ? ` ${human} ` : quantifier;
|
|
84
|
+
i = closingIndex + 1;
|
|
85
|
+
continue;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
if (char === "(") {
|
|
89
|
+
const closingIndex = pattern.indexOf(")", i);
|
|
90
|
+
if (closingIndex !== -1) {
|
|
91
|
+
const group = pattern.slice(i + 1, closingIndex);
|
|
92
|
+
if (group.startsWith("?")) {
|
|
93
|
+
if (group.startsWith("?:")) {
|
|
94
|
+
result += " (non-capturing group: ";
|
|
95
|
+
result += humanize(group.slice(2), options);
|
|
96
|
+
result += ")";
|
|
97
|
+
} else if (group.startsWith("?=")) {
|
|
98
|
+
result += " (positive lookahead: ";
|
|
99
|
+
result += humanize(group.slice(2), options);
|
|
100
|
+
result += ")";
|
|
101
|
+
} else if (group.startsWith("?!")) {
|
|
102
|
+
result += " (negative lookahead: ";
|
|
103
|
+
result += humanize(group.slice(2), options);
|
|
104
|
+
result += ")";
|
|
105
|
+
}
|
|
106
|
+
} else {
|
|
107
|
+
result += " (capturing group: ";
|
|
108
|
+
result += humanize(group, options);
|
|
109
|
+
result += ")";
|
|
110
|
+
}
|
|
111
|
+
i = closingIndex + 1;
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
const token = TOKENS[char];
|
|
116
|
+
if (token) {
|
|
117
|
+
result += ` ${token[locale] || token.en} `;
|
|
118
|
+
} else if (char !== " " && char !== " " && char !== "\n") {
|
|
119
|
+
result += char;
|
|
120
|
+
}
|
|
121
|
+
i++;
|
|
122
|
+
}
|
|
123
|
+
return result.trim();
|
|
124
|
+
}
|
|
125
|
+
function explain(pattern, options = {}) {
|
|
126
|
+
const locale = options.locale || "en";
|
|
127
|
+
const description = humanize(pattern, options);
|
|
128
|
+
const breakdown = [];
|
|
129
|
+
let i = 0;
|
|
130
|
+
while (i < pattern.length) {
|
|
131
|
+
const char = pattern[i];
|
|
132
|
+
if (char === "\\") {
|
|
133
|
+
if (i + 1 < pattern.length) {
|
|
134
|
+
const token2 = "\\" + pattern[i + 1];
|
|
135
|
+
const human = humanizeToken(token2, locale);
|
|
136
|
+
if (human !== token2) {
|
|
137
|
+
breakdown.push(`${token2} \u2192 ${human}`);
|
|
138
|
+
}
|
|
139
|
+
i += 2;
|
|
140
|
+
continue;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
if (char === "[") {
|
|
144
|
+
const closingIndex = pattern.indexOf("]", i);
|
|
145
|
+
if (closingIndex !== -1) {
|
|
146
|
+
const charClass = pattern.slice(i, closingIndex + 1);
|
|
147
|
+
const human = humanizeToken(charClass, locale);
|
|
148
|
+
breakdown.push(`${charClass} \u2192 ${human}`);
|
|
149
|
+
i = closingIndex + 1;
|
|
150
|
+
continue;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
const token = TOKENS[char];
|
|
154
|
+
if (token) {
|
|
155
|
+
breakdown.push(`${char} \u2192 ${token[locale] || token.en}`);
|
|
156
|
+
}
|
|
157
|
+
i++;
|
|
158
|
+
}
|
|
159
|
+
return { pattern, description, breakdown };
|
|
160
|
+
}
|
|
161
|
+
export {
|
|
162
|
+
explain,
|
|
163
|
+
humanize
|
|
164
|
+
};
|
|
165
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["interface HumanizeOptions {\n locale?: 'en' | 'tr';\n}\n\nconst TOKENS: Record<string, { en: string; tr: string }> = {\n '^': { en: 'start of string', tr: 'satır başı' },\n '$': { en: 'end of string', tr: 'satır sonu' },\n '.': { en: 'any character', tr: 'herhangi bir karakter' },\n '*': { en: 'zero or more times', tr: 'sıfır veya daha fazla kez' },\n '+': { en: 'one or more times', tr: 'bir veya daha fazla kez' },\n '?': { en: 'zero or one time (optional)', tr: 'sıfır veya bir kez (opsiyonel)' },\n '|': { en: 'or', tr: 'veya' },\n '\\\\d': { en: 'any digit', tr: 'herhangi bir rakam' },\n '\\\\D': { en: 'any non-digit', tr: 'herhangi bir rakam olmayan' },\n '\\\\w': { en: 'any word character', tr: 'herhangi bir kelime karakteri' },\n '\\\\W': { en: 'any non-word character', tr: 'herhangi bir kelime karakteri olmayan' },\n '\\\\s': { en: 'any whitespace', tr: 'herhangi bir boşluk karakteri' },\n '\\\\S': { en: 'any non-whitespace', tr: 'herhangi bir boşluk olmayan' },\n '\\\\b': { en: 'word boundary', tr: 'kelime sınırı' },\n '\\\\B': { en: 'non-word boundary', tr: 'kelime sınırı değil' },\n '\\\\n': { en: 'newline', tr: 'yeni satır' },\n '\\\\r': { en: 'carriage return', tr: 'satır başı' },\n '\\\\t': { en: 'tab', tr: 'sekme' },\n};\n\nconst CHARACTER_CLASSES: Record<string, { en: string; tr: string }> = {\n '[a-z]': { en: 'any lowercase letter', tr: 'herhangi bir küçük harf' },\n '[A-Z]': { en: 'any uppercase letter', tr: 'herhangi bir büyük harf' },\n '[0-9]': { en: 'any digit', tr: 'herhangi bir rakam' },\n '[a-zA-Z]': { en: 'any letter', tr: 'herhangi bir harf' },\n '[0-9a-zA-Z]': { en: 'any letter or digit', tr: 'herhangi bir harf veya rakam' },\n};\n\nconst QUANTIFIERS: Record<string, { en: string; tr: string }> = {\n '{n}': { en: 'exactly n times', tr: 'tam n kez' },\n '{n,}': { en: 'n or more times', tr: 'n veya daha fazla kez' },\n '{n,m}': { en: 'between n and m times', tr: 'n ile m arası kez' },\n};\n\nfunction humanizeToken(token: string, locale: string): string {\n if (TOKENS[token]) {\n return TOKENS[token][locale as 'en' | 'tr'] || TOKENS[token].en;\n }\n\n if (CHARACTER_CLASSES[token]) {\n return CHARACTER_CLASSES[token][locale as 'en' | 'tr'] || CHARACTER_CLASSES[token].en;\n }\n\n const quantifierMatch = token.match(/^\\{(\\d+)(?:,(\\d+))?\\}$/);\n if (quantifierMatch) {\n const n = quantifierMatch[1];\n const m = quantifierMatch[2];\n if (m) {\n return `${QUANTIFIERS['{n,m}'][locale as 'en' | 'tr'].replace('n', n).replace('m', m)}`;\n }\n return `${QUANTIFIERS['{n,}'][locale as 'en' | 'tr'].replace('n', n)}`;\n }\n\n return token;\n}\n\n/**\n * Convert a regex pattern to a human readable description\n * @param pattern - Regex pattern string\n * @param options - Options for humanization\n * @returns Human readable description\n */\nexport function humanize(pattern: string, options: HumanizeOptions = {}): string {\n const locale = options.locale || 'en';\n let result = '';\n\n let i = 0;\n while (i < pattern.length) {\n const char = pattern[i];\n\n if (char === '\\\\') {\n if (i + 1 < pattern.length) {\n const nextChar = pattern[i + 1];\n const token = '\\\\' + nextChar;\n const human = humanizeToken(token, locale);\n result += human !== token ? ` ${human} ` : token;\n i += 2;\n continue;\n }\n }\n\n if (char === '[') {\n const closingIndex = pattern.indexOf(']', i);\n if (closingIndex !== -1) {\n const charClass = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(charClass, locale);\n result += human !== charClass ? ` ${human} ` : charClass;\n i = closingIndex + 1;\n continue;\n }\n }\n\n if (char === '{') {\n const closingIndex = pattern.indexOf('}', i);\n if (closingIndex !== -1) {\n const quantifier = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(quantifier, locale);\n result += human !== quantifier ? ` ${human} ` : quantifier;\n i = closingIndex + 1;\n continue;\n }\n }\n\n if (char === '(') {\n const closingIndex = pattern.indexOf(')', i);\n if (closingIndex !== -1) {\n const group = pattern.slice(i + 1, closingIndex);\n if (group.startsWith('?')) {\n if (group.startsWith('?:')) {\n result += ' (non-capturing group: ';\n result += humanize(group.slice(2), options);\n result += ')';\n } else if (group.startsWith('?=')) {\n result += ' (positive lookahead: ';\n result += humanize(group.slice(2), options);\n result += ')';\n } else if (group.startsWith('?!')) {\n result += ' (negative lookahead: ';\n result += humanize(group.slice(2), options);\n result += ')';\n }\n } else {\n result += ' (capturing group: ';\n result += humanize(group, options);\n result += ')';\n }\n i = closingIndex + 1;\n continue;\n }\n }\n\n const token = TOKENS[char];\n if (token) {\n result += ` ${token[locale as 'en' | 'tr'] || token.en} `;\n } else if (char !== ' ' && char !== '\\t' && char !== '\\n') {\n result += char;\n }\n\n i++;\n }\n\n return result.trim();\n}\n\n/**\n * Create a human readable regex explanation with examples\n */\nexport function explain(pattern: string, options: HumanizeOptions = {}): {\n pattern: string;\n description: string;\n breakdown: string[];\n} {\n const locale = options.locale || 'en';\n const description = humanize(pattern, options);\n\n const breakdown: string[] = [];\n\n let i = 0;\n while (i < pattern.length) {\n const char = pattern[i];\n\n if (char === '\\\\') {\n if (i + 1 < pattern.length) {\n const token = '\\\\' + pattern[i + 1];\n const human = humanizeToken(token, locale);\n if (human !== token) {\n breakdown.push(`${token} → ${human}`);\n }\n i += 2;\n continue;\n }\n }\n\n if (char === '[') {\n const closingIndex = pattern.indexOf(']', i);\n if (closingIndex !== -1) {\n const charClass = pattern.slice(i, closingIndex + 1);\n const human = humanizeToken(charClass, locale);\n breakdown.push(`${charClass} → ${human}`);\n i = closingIndex + 1;\n continue;\n }\n }\n\n const token = TOKENS[char];\n if (token) {\n breakdown.push(`${char} → ${token[locale as 'en' | 'tr'] || token.en}`);\n }\n\n i++;\n }\n\n return { pattern, description, breakdown };\n}\n"],"mappings":";AAIA,IAAM,SAAqD;AAAA,EACzD,KAAK,EAAE,IAAI,mBAAmB,IAAI,4BAAa;AAAA,EAC/C,KAAK,EAAE,IAAI,iBAAiB,IAAI,kBAAa;AAAA,EAC7C,KAAK,EAAE,IAAI,iBAAiB,IAAI,wBAAwB;AAAA,EACxD,KAAK,EAAE,IAAI,sBAAsB,IAAI,sCAA4B;AAAA,EACjE,KAAK,EAAE,IAAI,qBAAqB,IAAI,0BAA0B;AAAA,EAC9D,KAAK,EAAE,IAAI,+BAA+B,IAAI,2CAAiC;AAAA,EAC/E,KAAK,EAAE,IAAI,MAAM,IAAI,OAAO;AAAA,EAC5B,OAAO,EAAE,IAAI,aAAa,IAAI,qBAAqB;AAAA,EACnD,OAAO,EAAE,IAAI,iBAAiB,IAAI,6BAA6B;AAAA,EAC/D,OAAO,EAAE,IAAI,sBAAsB,IAAI,gCAAgC;AAAA,EACvE,OAAO,EAAE,IAAI,0BAA0B,IAAI,wCAAwC;AAAA,EACnF,OAAO,EAAE,IAAI,kBAAkB,IAAI,qCAAgC;AAAA,EACnE,OAAO,EAAE,IAAI,sBAAsB,IAAI,mCAA8B;AAAA,EACrE,OAAO,EAAE,IAAI,iBAAiB,IAAI,+BAAgB;AAAA,EAClD,OAAO,EAAE,IAAI,qBAAqB,IAAI,0CAAsB;AAAA,EAC5D,OAAO,EAAE,IAAI,WAAW,IAAI,kBAAa;AAAA,EACzC,OAAO,EAAE,IAAI,mBAAmB,IAAI,4BAAa;AAAA,EACjD,OAAO,EAAE,IAAI,OAAO,IAAI,QAAQ;AAClC;AAEA,IAAM,oBAAgE;AAAA,EACpE,SAAS,EAAE,IAAI,wBAAwB,IAAI,mCAA0B;AAAA,EACrE,SAAS,EAAE,IAAI,wBAAwB,IAAI,gCAA0B;AAAA,EACrE,SAAS,EAAE,IAAI,aAAa,IAAI,qBAAqB;AAAA,EACrD,YAAY,EAAE,IAAI,cAAc,IAAI,oBAAoB;AAAA,EACxD,eAAe,EAAE,IAAI,uBAAuB,IAAI,+BAA+B;AACjF;AAEA,IAAM,cAA0D;AAAA,EAC9D,OAAO,EAAE,IAAI,mBAAmB,IAAI,YAAY;AAAA,EAChD,QAAQ,EAAE,IAAI,mBAAmB,IAAI,wBAAwB;AAAA,EAC7D,SAAS,EAAE,IAAI,yBAAyB,IAAI,yBAAoB;AAClE;AAEA,SAAS,cAAc,OAAe,QAAwB;AAC5D,MAAI,OAAO,KAAK,GAAG;AACjB,WAAO,OAAO,KAAK,EAAE,MAAqB,KAAK,OAAO,KAAK,EAAE;AAAA,EAC/D;AAEA,MAAI,kBAAkB,KAAK,GAAG;AAC5B,WAAO,kBAAkB,KAAK,EAAE,MAAqB,KAAK,kBAAkB,KAAK,EAAE;AAAA,EACrF;AAEA,QAAM,kBAAkB,MAAM,MAAM,wBAAwB;AAC5D,MAAI,iBAAiB;AACnB,UAAM,IAAI,gBAAgB,CAAC;AAC3B,UAAM,IAAI,gBAAgB,CAAC;AAC3B,QAAI,GAAG;AACL,aAAO,GAAG,YAAY,OAAO,EAAE,MAAqB,EAAE,QAAQ,KAAK,CAAC,EAAE,QAAQ,KAAK,CAAC,CAAC;AAAA,IACvF;AACA,WAAO,GAAG,YAAY,MAAM,EAAE,MAAqB,EAAE,QAAQ,KAAK,CAAC,CAAC;AAAA,EACtE;AAEA,SAAO;AACT;AAQO,SAAS,SAAS,SAAiB,UAA2B,CAAC,GAAW;AAC/E,QAAM,SAAS,QAAQ,UAAU;AACjC,MAAI,SAAS;AAEb,MAAI,IAAI;AACR,SAAO,IAAI,QAAQ,QAAQ;AACzB,UAAM,OAAO,QAAQ,CAAC;AAEtB,QAAI,SAAS,MAAM;AACjB,UAAI,IAAI,IAAI,QAAQ,QAAQ;AAC1B,cAAM,WAAW,QAAQ,IAAI,CAAC;AAC9B,cAAMA,SAAQ,OAAO;AACrB,cAAM,QAAQ,cAAcA,QAAO,MAAM;AACzC,kBAAU,UAAUA,SAAQ,IAAI,KAAK,MAAMA;AAC3C,aAAK;AACL;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,YAAY,QAAQ,MAAM,GAAG,eAAe,CAAC;AACnD,cAAM,QAAQ,cAAc,WAAW,MAAM;AAC7C,kBAAU,UAAU,YAAY,IAAI,KAAK,MAAM;AAC/C,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,aAAa,QAAQ,MAAM,GAAG,eAAe,CAAC;AACpD,cAAM,QAAQ,cAAc,YAAY,MAAM;AAC9C,kBAAU,UAAU,aAAa,IAAI,KAAK,MAAM;AAChD,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,QAAQ,QAAQ,MAAM,IAAI,GAAG,YAAY;AAC/C,YAAI,MAAM,WAAW,GAAG,GAAG;AACzB,cAAI,MAAM,WAAW,IAAI,GAAG;AAC1B,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ,WAAW,MAAM,WAAW,IAAI,GAAG;AACjC,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ,WAAW,MAAM,WAAW,IAAI,GAAG;AACjC,sBAAU;AACV,sBAAU,SAAS,MAAM,MAAM,CAAC,GAAG,OAAO;AAC1C,sBAAU;AAAA,UACZ;AAAA,QACF,OAAO;AACL,oBAAU;AACV,oBAAU,SAAS,OAAO,OAAO;AACjC,oBAAU;AAAA,QACZ;AACA,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,QAAQ,OAAO,IAAI;AACzB,QAAI,OAAO;AACT,gBAAU,IAAI,MAAM,MAAqB,KAAK,MAAM,EAAE;AAAA,IACxD,WAAW,SAAS,OAAO,SAAS,OAAQ,SAAS,MAAM;AACzD,gBAAU;AAAA,IACZ;AAEA;AAAA,EACF;AAEA,SAAO,OAAO,KAAK;AACrB;AAKO,SAAS,QAAQ,SAAiB,UAA2B,CAAC,GAInE;AACA,QAAM,SAAS,QAAQ,UAAU;AACjC,QAAM,cAAc,SAAS,SAAS,OAAO;AAE7C,QAAM,YAAsB,CAAC;AAE7B,MAAI,IAAI;AACR,SAAO,IAAI,QAAQ,QAAQ;AACzB,UAAM,OAAO,QAAQ,CAAC;AAEtB,QAAI,SAAS,MAAM;AACjB,UAAI,IAAI,IAAI,QAAQ,QAAQ;AAC1B,cAAMA,SAAQ,OAAO,QAAQ,IAAI,CAAC;AAClC,cAAM,QAAQ,cAAcA,QAAO,MAAM;AACzC,YAAI,UAAUA,QAAO;AACnB,oBAAU,KAAK,GAAGA,MAAK,WAAM,KAAK,EAAE;AAAA,QACtC;AACA,aAAK;AACL;AAAA,MACF;AAAA,IACF;AAEA,QAAI,SAAS,KAAK;AAChB,YAAM,eAAe,QAAQ,QAAQ,KAAK,CAAC;AAC3C,UAAI,iBAAiB,IAAI;AACvB,cAAM,YAAY,QAAQ,MAAM,GAAG,eAAe,CAAC;AACnD,cAAM,QAAQ,cAAc,WAAW,MAAM;AAC7C,kBAAU,KAAK,GAAG,SAAS,WAAM,KAAK,EAAE;AACxC,YAAI,eAAe;AACnB;AAAA,MACF;AAAA,IACF;AAEA,UAAM,QAAQ,OAAO,IAAI;AACzB,QAAI,OAAO;AACT,gBAAU,KAAK,GAAG,IAAI,WAAM,MAAM,MAAqB,KAAK,MAAM,EAAE,EAAE;AAAA,IACxE;AAEA;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,aAAa,UAAU;AAC3C;","names":["token"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@chaisser/regex-humanizer",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Convert regex patterns to human readable descriptions",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"module": "./dist/index.js",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"files": [
|
|
17
|
+
"dist"
|
|
18
|
+
],
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "tsup",
|
|
21
|
+
"dev": "tsup --watch",
|
|
22
|
+
"test": "vitest run",
|
|
23
|
+
"test:watch": "vitest",
|
|
24
|
+
"lint": "tsc --noEmit",
|
|
25
|
+
"prepublishOnly": "npm run build"
|
|
26
|
+
},
|
|
27
|
+
"keywords": [
|
|
28
|
+
"regex",
|
|
29
|
+
"regexp",
|
|
30
|
+
"human",
|
|
31
|
+
"readable",
|
|
32
|
+
"description",
|
|
33
|
+
"typescript"
|
|
34
|
+
],
|
|
35
|
+
"license": "MIT",
|
|
36
|
+
"devDependencies": {
|
|
37
|
+
"@types/node": "^22.10.2",
|
|
38
|
+
"tsup": "^8.3.5",
|
|
39
|
+
"typescript": "^5.7.2",
|
|
40
|
+
"vitest": "^2.1.8"
|
|
41
|
+
},
|
|
42
|
+
"engines": {
|
|
43
|
+
"node": ">=16.0.0"
|
|
44
|
+
},
|
|
45
|
+
"publishConfig": {
|
|
46
|
+
"access": "public"
|
|
47
|
+
}
|
|
48
|
+
}
|