@character-foundry/character-foundry 0.1.3 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +70 -0
- package/dist/app-framework.cjs +1742 -0
- package/dist/app-framework.cjs.map +1 -0
- package/dist/app-framework.d.cts +881 -0
- package/dist/app-framework.d.ts +881 -2
- package/dist/app-framework.js +1718 -1
- package/dist/app-framework.js.map +1 -1
- package/dist/charx.cjs +917 -0
- package/dist/charx.cjs.map +1 -0
- package/dist/charx.d.cts +640 -0
- package/dist/charx.d.ts +640 -2
- package/dist/charx.js +893 -1
- package/dist/charx.js.map +1 -1
- package/dist/core.cjs +668 -0
- package/dist/core.cjs.map +1 -0
- package/dist/core.d.cts +363 -0
- package/dist/core.d.ts +363 -2
- package/dist/core.js +644 -1
- package/dist/core.js.map +1 -1
- package/dist/exporter.cjs +7539 -0
- package/dist/exporter.cjs.map +1 -0
- package/dist/exporter.d.cts +681 -0
- package/dist/exporter.d.ts +681 -2
- package/dist/exporter.js +7522 -1
- package/dist/exporter.js.map +1 -1
- package/dist/federation.cjs +3915 -0
- package/dist/federation.cjs.map +1 -0
- package/dist/federation.d.cts +2951 -0
- package/dist/federation.d.ts +2951 -2
- package/dist/federation.js +3891 -1
- package/dist/federation.js.map +1 -1
- package/dist/index.cjs +9109 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1119 -0
- package/dist/index.d.ts +1113 -20
- package/dist/index.js +9092 -26
- package/dist/index.js.map +1 -1
- package/dist/loader.cjs +8923 -0
- package/dist/loader.cjs.map +1 -0
- package/dist/loader.d.cts +1037 -0
- package/dist/loader.d.ts +1037 -2
- package/dist/loader.js +8906 -1
- package/dist/loader.js.map +1 -1
- package/dist/lorebook.cjs +865 -0
- package/dist/lorebook.cjs.map +1 -0
- package/dist/lorebook.d.cts +1008 -0
- package/dist/lorebook.d.ts +1008 -2
- package/dist/lorebook.js +841 -1
- package/dist/lorebook.js.map +1 -1
- package/dist/media.cjs +6660 -0
- package/dist/media.cjs.map +1 -0
- package/dist/media.d.cts +87 -0
- package/dist/media.d.ts +87 -2
- package/dist/media.js +6643 -1
- package/dist/media.js.map +1 -1
- package/dist/normalizer.cjs +502 -0
- package/dist/normalizer.cjs.map +1 -0
- package/dist/normalizer.d.cts +1216 -0
- package/dist/normalizer.d.ts +1216 -2
- package/dist/normalizer.js +478 -1
- package/dist/normalizer.js.map +1 -1
- package/dist/png.cjs +778 -0
- package/dist/png.cjs.map +1 -0
- package/dist/png.d.cts +786 -0
- package/dist/png.d.ts +786 -2
- package/dist/png.js +754 -1
- package/dist/png.js.map +1 -1
- package/dist/schemas.cjs +799 -0
- package/dist/schemas.cjs.map +1 -0
- package/dist/schemas.d.cts +2178 -0
- package/dist/schemas.d.ts +2178 -2
- package/dist/schemas.js +775 -1
- package/dist/schemas.js.map +1 -1
- package/dist/tokenizers.cjs +153 -0
- package/dist/tokenizers.cjs.map +1 -0
- package/dist/tokenizers.d.cts +155 -0
- package/dist/tokenizers.d.ts +155 -2
- package/dist/tokenizers.js +129 -1
- package/dist/tokenizers.js.map +1 -1
- package/dist/voxta.cjs +7995 -0
- package/dist/voxta.cjs.map +1 -0
- package/dist/voxta.d.cts +1349 -0
- package/dist/voxta.d.ts +1349 -2
- package/dist/voxta.js +7978 -1
- package/dist/voxta.js.map +1 -1
- package/package.json +177 -45
- package/dist/app-framework.d.ts.map +0 -1
- package/dist/charx.d.ts.map +0 -1
- package/dist/core.d.ts.map +0 -1
- package/dist/exporter.d.ts.map +0 -1
- package/dist/federation.d.ts.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/loader.d.ts.map +0 -1
- package/dist/lorebook.d.ts.map +0 -1
- package/dist/media.d.ts.map +0 -1
- package/dist/normalizer.d.ts.map +0 -1
- package/dist/png.d.ts.map +0 -1
- package/dist/schemas.d.ts.map +0 -1
- package/dist/tokenizers.d.ts.map +0 -1
- package/dist/voxta.d.ts.map +0 -1
package/dist/tokenizers.js
CHANGED
|
@@ -1,2 +1,130 @@
|
|
|
1
|
-
|
|
1
|
+
// ../tokenizers/dist/index.js
|
|
2
|
+
import { encode } from "gpt-tokenizer";
|
|
3
|
+
var GptTokenizer = class {
|
|
4
|
+
id = "gpt-4";
|
|
5
|
+
name = "GPT-4 (cl100k_base)";
|
|
6
|
+
count(text) {
|
|
7
|
+
if (!text) return 0;
|
|
8
|
+
try {
|
|
9
|
+
return encode(text).length;
|
|
10
|
+
} catch (error) {
|
|
11
|
+
console.warn("GPT tokenization failed, falling back to approximation", error);
|
|
12
|
+
return Math.ceil(text.length / 4);
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
countMany(texts) {
|
|
16
|
+
return texts.map((t) => this.count(t));
|
|
17
|
+
}
|
|
18
|
+
encode(text) {
|
|
19
|
+
return encode(text);
|
|
20
|
+
}
|
|
21
|
+
};
|
|
22
|
+
var SimpleTokenizer = class {
|
|
23
|
+
id = "simple";
|
|
24
|
+
name = "Simple Approximation";
|
|
25
|
+
count(text) {
|
|
26
|
+
if (!text) return 0;
|
|
27
|
+
const words = text.split(/\s+/).filter((w) => w.length > 0);
|
|
28
|
+
const charCount = text.length;
|
|
29
|
+
return Math.ceil(charCount / 4 + words.length * 0.3);
|
|
30
|
+
}
|
|
31
|
+
countMany(texts) {
|
|
32
|
+
return texts.map((t) => this.count(t));
|
|
33
|
+
}
|
|
34
|
+
};
|
|
35
|
+
var LlamaTokenizer = class {
|
|
36
|
+
id = "llama";
|
|
37
|
+
name = "LLaMA Approximation";
|
|
38
|
+
count(text) {
|
|
39
|
+
if (!text) return 0;
|
|
40
|
+
return Math.ceil(text.length / 4.5);
|
|
41
|
+
}
|
|
42
|
+
countMany(texts) {
|
|
43
|
+
return texts.map((t) => this.count(t));
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
var TokenizerRegistry = class {
|
|
47
|
+
tokenizers = /* @__PURE__ */ new Map();
|
|
48
|
+
defaultId = "gpt-4";
|
|
49
|
+
constructor() {
|
|
50
|
+
this.register(new GptTokenizer());
|
|
51
|
+
this.register(new SimpleTokenizer());
|
|
52
|
+
this.register(new LlamaTokenizer());
|
|
53
|
+
}
|
|
54
|
+
register(tokenizer) {
|
|
55
|
+
this.tokenizers.set(tokenizer.id, tokenizer);
|
|
56
|
+
}
|
|
57
|
+
get(id) {
|
|
58
|
+
const targetId = id || this.defaultId;
|
|
59
|
+
const tokenizer = this.tokenizers.get(targetId);
|
|
60
|
+
if (!tokenizer) {
|
|
61
|
+
return this.tokenizers.get("simple") || new SimpleTokenizer();
|
|
62
|
+
}
|
|
63
|
+
return tokenizer;
|
|
64
|
+
}
|
|
65
|
+
list() {
|
|
66
|
+
return Array.from(this.tokenizers.values());
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
var registry = new TokenizerRegistry();
|
|
70
|
+
function countTokens(text, tokenizerId) {
|
|
71
|
+
return registry.get(tokenizerId).count(text);
|
|
72
|
+
}
|
|
73
|
+
function getTokenizer(tokenizerId) {
|
|
74
|
+
return registry.get(tokenizerId);
|
|
75
|
+
}
|
|
76
|
+
function countCardTokens(card, options = {}) {
|
|
77
|
+
const tokenizer = getTokenizer(options.tokenizer ?? "gpt-4");
|
|
78
|
+
const onlyEnabled = options.onlyEnabledLorebook ?? true;
|
|
79
|
+
const data = card.data ?? card;
|
|
80
|
+
const description = tokenizer.count(data.description ?? "");
|
|
81
|
+
const personality = tokenizer.count(data.personality ?? "");
|
|
82
|
+
const scenario = tokenizer.count(data.scenario ?? "");
|
|
83
|
+
const firstMes = tokenizer.count(data.first_mes ?? "");
|
|
84
|
+
const mesExample = tokenizer.count(data.mes_example ?? "");
|
|
85
|
+
const systemPrompt = tokenizer.count(data.system_prompt ?? "");
|
|
86
|
+
const postHistoryInstructions = tokenizer.count(data.post_history_instructions ?? "");
|
|
87
|
+
const creatorNotes = tokenizer.count(data.creator_notes ?? "");
|
|
88
|
+
const altGreetings = data.alternate_greetings ?? [];
|
|
89
|
+
const alternateGreetings = altGreetings.reduce(
|
|
90
|
+
(sum, greeting) => sum + tokenizer.count(greeting ?? ""),
|
|
91
|
+
0
|
|
92
|
+
);
|
|
93
|
+
const entries = data.character_book?.entries ?? [];
|
|
94
|
+
const lorebook = entries.reduce((sum, entry) => {
|
|
95
|
+
if (onlyEnabled && entry.enabled === false) {
|
|
96
|
+
return sum;
|
|
97
|
+
}
|
|
98
|
+
return sum + tokenizer.count(entry.content ?? "");
|
|
99
|
+
}, 0);
|
|
100
|
+
const total = description + personality + scenario + firstMes + mesExample + systemPrompt + postHistoryInstructions + alternateGreetings + lorebook + creatorNotes;
|
|
101
|
+
return {
|
|
102
|
+
description,
|
|
103
|
+
personality,
|
|
104
|
+
scenario,
|
|
105
|
+
firstMes,
|
|
106
|
+
mesExample,
|
|
107
|
+
systemPrompt,
|
|
108
|
+
postHistoryInstructions,
|
|
109
|
+
alternateGreetings,
|
|
110
|
+
lorebook,
|
|
111
|
+
creatorNotes,
|
|
112
|
+
total
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
function countText(text, tokenizerId = "gpt-4") {
|
|
116
|
+
const tokenizer = getTokenizer(tokenizerId);
|
|
117
|
+
return tokenizer.count(text);
|
|
118
|
+
}
|
|
119
|
+
export {
|
|
120
|
+
GptTokenizer,
|
|
121
|
+
LlamaTokenizer,
|
|
122
|
+
SimpleTokenizer,
|
|
123
|
+
TokenizerRegistry,
|
|
124
|
+
countCardTokens,
|
|
125
|
+
countText,
|
|
126
|
+
countTokens,
|
|
127
|
+
getTokenizer,
|
|
128
|
+
registry
|
|
129
|
+
};
|
|
2
130
|
//# sourceMappingURL=tokenizers.js.map
|
package/dist/tokenizers.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"tokenizers.js","sourceRoot":"","sources":["../src/tokenizers.ts"],"names":[],"mappings":"AAAA,cAAc,+BAA+B,CAAC"}
|
|
1
|
+
{"version":3,"sources":["../../tokenizers/src/gpt.ts","../../tokenizers/src/simple.ts","../../tokenizers/src/registry.ts","../../tokenizers/src/card-counter.ts"],"sourcesContent":["import { encode } from 'gpt-tokenizer';\nimport type { TokenizerAdapter } from './types.js';\n\n/**\n * GPT-3.5/4 compatible tokenizer using cl100k_base encoding\n * Uses gpt-tokenizer (pure JS)\n */\nexport class GptTokenizer implements TokenizerAdapter {\n readonly id = 'gpt-4';\n readonly name = 'GPT-4 (cl100k_base)';\n\n count(text: string): number {\n if (!text) return 0;\n try {\n return encode(text).length;\n } catch (error) {\n console.warn('GPT tokenization failed, falling back to approximation', error);\n return Math.ceil(text.length / 4);\n }\n }\n\n countMany(texts: string[]): number[] {\n return texts.map(t => this.count(t));\n }\n\n encode(text: string): number[] {\n return encode(text);\n }\n}\n","import type { TokenizerAdapter } from './types.js';\n\n/**\n * Simple BPE-style tokenizer approximation (roughly GPT-2)\n * Useful when exact counts aren't critical or for performance\n */\nexport class SimpleTokenizer implements TokenizerAdapter {\n readonly id = 'simple';\n readonly name = 'Simple Approximation';\n\n count(text: string): number {\n if (!text) return 0;\n // Rough approximation: ~4 chars per token, but words count as tokens\n const words = text.split(/\\s+/).filter((w) => w.length > 0);\n const charCount = text.length;\n return Math.ceil(charCount / 4 + words.length * 0.3);\n }\n\n countMany(texts: string[]): number[] {\n return texts.map(t => this.count(t));\n }\n}\n\n/**\n * LLaMA-style tokenizer approximation\n */\nexport class LlamaTokenizer implements TokenizerAdapter {\n readonly id = 'llama';\n readonly name = 'LLaMA Approximation';\n\n count(text: string): number {\n if (!text) return 0;\n return Math.ceil(text.length / 4.5);\n }\n\n countMany(texts: string[]): number[] {\n return texts.map(t => this.count(t));\n }\n}\n","import type { TokenizerAdapter } from './types.js';\nimport { GptTokenizer } from './gpt.js';\nimport { SimpleTokenizer, LlamaTokenizer } from './simple.js';\n\nexport class TokenizerRegistry {\n private tokenizers = new Map<string, TokenizerAdapter>();\n private defaultId: string = 'gpt-4';\n\n constructor() {\n this.register(new GptTokenizer());\n this.register(new SimpleTokenizer());\n this.register(new LlamaTokenizer());\n }\n\n register(tokenizer: TokenizerAdapter): void {\n this.tokenizers.set(tokenizer.id, tokenizer);\n }\n\n get(id?: string): TokenizerAdapter {\n const targetId = id || this.defaultId;\n const tokenizer = this.tokenizers.get(targetId);\n if (!tokenizer) {\n // Fallback to simple if requested ID not found\n return this.tokenizers.get('simple') || new SimpleTokenizer();\n }\n return tokenizer;\n }\n\n list(): TokenizerAdapter[] {\n return Array.from(this.tokenizers.values());\n }\n}\n\nexport const registry = new TokenizerRegistry();\n\n// Helper for quick usage\nexport function countTokens(text: string, tokenizerId?: string): number {\n return registry.get(tokenizerId).count(text);\n}\n\n// Helper to get a tokenizer by ID\nexport function getTokenizer(tokenizerId?: string): TokenizerAdapter {\n return registry.get(tokenizerId);\n}\n","/**\n * Card Token Counter\n *\n * Count tokens in character card fields for consistent\n * cross-platform token reporting.\n */\n\nimport type { TokenizerAdapter } from './types.js';\nimport { getTokenizer } from './registry.js';\n\n/**\n * Token counts for a character card\n */\nexport interface CardTokenCounts {\n description: number;\n personality: number;\n scenario: number;\n firstMes: number;\n mesExample: number;\n systemPrompt: number;\n postHistoryInstructions: number;\n alternateGreetings: number;\n lorebook: number;\n creatorNotes: number;\n total: number;\n}\n\n/**\n * Card data structure for token counting\n * Accepts both CCv2 and CCv3 card structures\n */\nexport interface CardForCounting {\n data?: {\n name?: string;\n description?: string;\n personality?: string | null;\n scenario?: string;\n first_mes?: string;\n mes_example?: string | null;\n system_prompt?: string;\n post_history_instructions?: string;\n alternate_greetings?: string[];\n creator_notes?: string;\n character_book?: {\n entries?: Array<{\n content?: string;\n enabled?: boolean;\n }>;\n } | null;\n };\n // Support for unwrapped cards\n name?: string;\n description?: string;\n personality?: string | null;\n scenario?: string;\n first_mes?: string;\n mes_example?: string | null;\n system_prompt?: string;\n post_history_instructions?: string;\n alternate_greetings?: string[];\n creator_notes?: string;\n character_book?: {\n entries?: Array<{\n content?: string;\n enabled?: boolean;\n }>;\n } | null;\n}\n\n/**\n * Token counting options\n */\nexport interface TokenCountOptions {\n /** Tokenizer ID to use. Default: 'gpt-4' */\n tokenizer?: string;\n /** Only count enabled lorebook entries. Default: true */\n onlyEnabledLorebook?: boolean;\n}\n\n/**\n * Count tokens in a character card's fields.\n *\n * @param card - Character card data (CCv2 or CCv3)\n * @param options - Counting options\n * @returns Token counts per field and total\n */\nexport function countCardTokens(\n card: CardForCounting,\n options: TokenCountOptions = {}\n): CardTokenCounts {\n const tokenizer = getTokenizer(options.tokenizer ?? 'gpt-4');\n const onlyEnabled = options.onlyEnabledLorebook ?? true;\n\n // Handle both wrapped and unwrapped formats\n const data = card.data ?? card;\n\n // Count each field\n const description = tokenizer.count(data.description ?? '');\n const personality = tokenizer.count(data.personality ?? '');\n const scenario = tokenizer.count(data.scenario ?? '');\n const firstMes = tokenizer.count(data.first_mes ?? '');\n const mesExample = tokenizer.count(data.mes_example ?? '');\n const systemPrompt = tokenizer.count(data.system_prompt ?? '');\n const postHistoryInstructions = tokenizer.count(data.post_history_instructions ?? '');\n const creatorNotes = tokenizer.count(data.creator_notes ?? '');\n\n // Count alternate greetings\n const altGreetings = data.alternate_greetings ?? [];\n const alternateGreetings = altGreetings.reduce(\n (sum, greeting) => sum + tokenizer.count(greeting ?? ''),\n 0\n );\n\n // Count lorebook entries\n const entries = data.character_book?.entries ?? [];\n const lorebook = entries.reduce((sum, entry) => {\n if (onlyEnabled && entry.enabled === false) {\n return sum;\n }\n return sum + tokenizer.count(entry.content ?? '');\n }, 0);\n\n // Calculate total\n const total =\n description +\n personality +\n scenario +\n firstMes +\n mesExample +\n systemPrompt +\n postHistoryInstructions +\n alternateGreetings +\n lorebook +\n creatorNotes;\n\n return {\n description,\n personality,\n scenario,\n firstMes,\n mesExample,\n systemPrompt,\n postHistoryInstructions,\n alternateGreetings,\n lorebook,\n creatorNotes,\n total,\n };\n}\n\n/**\n * Count tokens in a text string using the specified tokenizer.\n *\n * @param text - Text to count tokens in\n * @param tokenizerId - Tokenizer ID to use. Default: 'gpt-4'\n * @returns Token count\n */\nexport function countText(text: string, tokenizerId = 'gpt-4'): number {\n const tokenizer = getTokenizer(tokenizerId);\n return tokenizer.count(text);\n}\n"],"mappings":";AAAA,SAAS,cAAc;AAOhB,IAAM,eAAN,MAA+C;EAC3C,KAAK;EACL,OAAO;EAEhB,MAAM,MAAsB;AAC1B,QAAI,CAAC,KAAM,QAAO;AAClB,QAAI;AACF,aAAO,OAAO,IAAI,EAAE;IACtB,SAAS,OAAO;AACd,cAAQ,KAAK,0DAA0D,KAAK;AAC5E,aAAO,KAAK,KAAK,KAAK,SAAS,CAAC;IAClC;EACF;EAEA,UAAU,OAA2B;AACnC,WAAO,MAAM,IAAI,CAAA,MAAK,KAAK,MAAM,CAAC,CAAC;EACrC;EAEA,OAAO,MAAwB;AAC7B,WAAO,OAAO,IAAI;EACpB;AACF;ACtBO,IAAM,kBAAN,MAAkD;EAC9C,KAAK;EACL,OAAO;EAEhB,MAAM,MAAsB;AAC1B,QAAI,CAAC,KAAM,QAAO;AAElB,UAAM,QAAQ,KAAK,MAAM,KAAK,EAAE,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAC1D,UAAM,YAAY,KAAK;AACvB,WAAO,KAAK,KAAK,YAAY,IAAI,MAAM,SAAS,GAAG;EACrD;EAEA,UAAU,OAA2B;AACnC,WAAO,MAAM,IAAI,CAAA,MAAK,KAAK,MAAM,CAAC,CAAC;EACrC;AACF;AAKO,IAAM,iBAAN,MAAiD;EAC7C,KAAK;EACL,OAAO;EAEhB,MAAM,MAAsB;AAC1B,QAAI,CAAC,KAAM,QAAO;AAClB,WAAO,KAAK,KAAK,KAAK,SAAS,GAAG;EACpC;EAEA,UAAU,OAA2B;AACnC,WAAO,MAAM,IAAI,CAAA,MAAK,KAAK,MAAM,CAAC,CAAC;EACrC;AACF;AClCO,IAAM,oBAAN,MAAwB;EACrB,aAAa,oBAAI,IAA8B;EAC/C,YAAoB;EAE5B,cAAc;AACZ,SAAK,SAAS,IAAI,aAAa,CAAC;AAChC,SAAK,SAAS,IAAI,gBAAgB,CAAC;AACnC,SAAK,SAAS,IAAI,eAAe,CAAC;EACpC;EAEA,SAAS,WAAmC;AAC1C,SAAK,WAAW,IAAI,UAAU,IAAI,SAAS;EAC7C;EAEA,IAAI,IAA+B;AACjC,UAAM,WAAW,MAAM,KAAK;AAC5B,UAAM,YAAY,KAAK,WAAW,IAAI,QAAQ;AAC9C,QAAI,CAAC,WAAW;AAEd,aAAO,KAAK,WAAW,IAAI,QAAQ,KAAK,IAAI,gBAAgB;IAC9D;AACA,WAAO;EACT;EAEA,OAA2B;AACzB,WAAO,MAAM,KAAK,KAAK,WAAW,OAAO,CAAC;EAC5C;AACF;AAEO,IAAM,WAAW,IAAI,kBAAkB;AAGvC,SAAS,YAAY,MAAc,aAA8B;AACtE,SAAO,SAAS,IAAI,WAAW,EAAE,MAAM,IAAI;AAC7C;AAGO,SAAS,aAAa,aAAwC;AACnE,SAAO,SAAS,IAAI,WAAW;AACjC;AC2CO,SAAS,gBACd,MACA,UAA6B,CAAC,GACb;AACjB,QAAM,YAAY,aAAa,QAAQ,aAAa,OAAO;AAC3D,QAAM,cAAc,QAAQ,uBAAuB;AAGnD,QAAM,OAAO,KAAK,QAAQ;AAG1B,QAAM,cAAc,UAAU,MAAM,KAAK,eAAe,EAAE;AAC1D,QAAM,cAAc,UAAU,MAAM,KAAK,eAAe,EAAE;AAC1D,QAAM,WAAW,UAAU,MAAM,KAAK,YAAY,EAAE;AACpD,QAAM,WAAW,UAAU,MAAM,KAAK,aAAa,EAAE;AACrD,QAAM,aAAa,UAAU,MAAM,KAAK,eAAe,EAAE;AACzD,QAAM,eAAe,UAAU,MAAM,KAAK,iBAAiB,EAAE;AAC7D,QAAM,0BAA0B,UAAU,MAAM,KAAK,6BAA6B,EAAE;AACpF,QAAM,eAAe,UAAU,MAAM,KAAK,iBAAiB,EAAE;AAG7D,QAAM,eAAe,KAAK,uBAAuB,CAAC;AAClD,QAAM,qBAAqB,aAAa;IACtC,CAAC,KAAK,aAAa,MAAM,UAAU,MAAM,YAAY,EAAE;IACvD;EACF;AAGA,QAAM,UAAU,KAAK,gBAAgB,WAAW,CAAC;AACjD,QAAM,WAAW,QAAQ,OAAO,CAAC,KAAK,UAAU;AAC9C,QAAI,eAAe,MAAM,YAAY,OAAO;AAC1C,aAAO;IACT;AACA,WAAO,MAAM,UAAU,MAAM,MAAM,WAAW,EAAE;EAClD,GAAG,CAAC;AAGJ,QAAM,QACJ,cACA,cACA,WACA,WACA,aACA,eACA,0BACA,qBACA,WACA;AAEF,SAAO;IACL;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;IACA;EACF;AACF;AASO,SAAS,UAAU,MAAc,cAAc,SAAiB;AACrE,QAAM,YAAY,aAAa,WAAW;AAC1C,SAAO,UAAU,MAAM,IAAI;AAC7B;","names":[]}
|