ruvector 0.1.38 → 0.1.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/.claude-flow/metrics/agent-metrics.json +1 -0
  2. package/.claude-flow/metrics/performance.json +87 -0
  3. package/.claude-flow/metrics/task-metrics.json +10 -0
  4. package/PACKAGE_SUMMARY.md +409 -0
  5. package/README.md +1679 -508
  6. package/bin/cli.js +2427 -0
  7. package/dist/core/agentdb-fast.d.ts +149 -0
  8. package/dist/core/agentdb-fast.d.ts.map +1 -0
  9. package/dist/core/agentdb-fast.js +301 -0
  10. package/dist/core/attention-fallbacks.d.ts +221 -0
  11. package/dist/core/attention-fallbacks.d.ts.map +1 -0
  12. package/dist/core/attention-fallbacks.js +361 -0
  13. package/dist/core/gnn-wrapper.d.ts +143 -0
  14. package/dist/core/gnn-wrapper.d.ts.map +1 -0
  15. package/dist/core/gnn-wrapper.js +213 -0
  16. package/dist/core/index.d.ts +15 -0
  17. package/dist/core/index.d.ts.map +1 -0
  18. package/dist/core/index.js +39 -0
  19. package/dist/core/sona-wrapper.d.ts +215 -0
  20. package/dist/core/sona-wrapper.d.ts.map +1 -0
  21. package/dist/core/sona-wrapper.js +258 -0
  22. package/dist/index.d.ts +87 -82
  23. package/dist/index.d.ts.map +1 -0
  24. package/dist/index.js +169 -89
  25. package/dist/services/embedding-service.d.ts +136 -0
  26. package/dist/services/embedding-service.d.ts.map +1 -0
  27. package/dist/services/embedding-service.js +294 -0
  28. package/dist/services/index.d.ts +6 -0
  29. package/dist/services/index.d.ts.map +1 -0
  30. package/dist/services/index.js +26 -0
  31. package/dist/types.d.ts +145 -0
  32. package/dist/types.d.ts.map +1 -0
  33. package/dist/types.js +2 -0
  34. package/examples/api-usage.js +211 -0
  35. package/examples/cli-demo.sh +85 -0
  36. package/package.json +41 -93
  37. package/bin/ruvector.js +0 -1150
  38. package/dist/index.d.mts +0 -95
  39. package/dist/index.mjs +0 -5
@@ -0,0 +1,361 @@
1
+ "use strict";
2
+ /**
3
+ * Attention Fallbacks - Safe wrapper around @ruvector/attention with automatic array conversion
4
+ *
5
+ * This wrapper handles the array type conversion automatically, allowing users
6
+ * to pass either regular arrays or Float32Arrays.
7
+ *
8
+ * @ruvector/attention requires Float32Array inputs.
9
+ * This wrapper handles the conversion automatically.
10
+ */
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.MoEAttention = exports.LocalGlobalAttention = exports.LinearAttention = exports.HyperbolicAttention = exports.FlashAttention = exports.MultiHeadAttention = void 0;
13
+ exports.projectToPoincareBall = projectToPoincareBall;
14
+ exports.poincareDistance = poincareDistance;
15
+ exports.mobiusAddition = mobiusAddition;
16
+ exports.expMap = expMap;
17
+ exports.logMap = logMap;
18
+ exports.isAttentionAvailable = isAttentionAvailable;
19
+ exports.getAttentionVersion = getAttentionVersion;
20
+ // Lazy load to avoid import errors if not installed
21
+ let attentionModule = null;
22
+ let loadError = null;
23
+ function getAttentionModule() {
24
+ if (attentionModule)
25
+ return attentionModule;
26
+ if (loadError)
27
+ throw loadError;
28
+ try {
29
+ attentionModule = require('@ruvector/attention');
30
+ return attentionModule;
31
+ }
32
+ catch (e) {
33
+ loadError = new Error(`@ruvector/attention is not installed or failed to load: ${e.message}\n` +
34
+ `Install with: npm install @ruvector/attention`);
35
+ throw loadError;
36
+ }
37
+ }
38
+ /**
39
+ * Convert any array-like input to Float32Array
40
+ */
41
+ function toFloat32Array(input) {
42
+ if (input instanceof Float32Array) {
43
+ return input;
44
+ }
45
+ return new Float32Array(input);
46
+ }
47
+ /**
48
+ * Convert nested arrays to Float32Arrays
49
+ */
50
+ function toFloat32Arrays(inputs) {
51
+ return inputs.map(arr => toFloat32Array(arr));
52
+ }
53
+ /**
54
+ * Convert Float32Array result back to regular array if needed
55
+ */
56
+ function fromFloat32Array(input) {
57
+ return Array.from(input);
58
+ }
59
+ /**
60
+ * Multi-head attention mechanism
61
+ *
62
+ * This wrapper automatically converts array inputs to Float32Array.
63
+ */
64
+ class MultiHeadAttention {
65
+ /**
66
+ * Create a new multi-head attention instance
67
+ *
68
+ * @param dim - Embedding dimension (must be divisible by numHeads)
69
+ * @param numHeads - Number of attention heads
70
+ */
71
+ constructor(dim, numHeads) {
72
+ const attention = getAttentionModule();
73
+ this.inner = new attention.MultiHeadAttention(dim, numHeads);
74
+ this.dim = dim;
75
+ this.numHeads = numHeads;
76
+ }
77
+ /**
78
+ * Compute multi-head attention
79
+ *
80
+ * @param query - Query vector
81
+ * @param keys - Array of key vectors
82
+ * @param values - Array of value vectors
83
+ * @returns Attention output
84
+ *
85
+ * @example
86
+ * ```typescript
87
+ * const mha = new MultiHeadAttention(64, 4);
88
+ *
89
+ * // Works with regular arrays
90
+ * const result1 = mha.compute([...64 values], [[...64], [...64]], [[...64], [...64]]);
91
+ *
92
+ * // Also works with Float32Array
93
+ * const q = new Float32Array(64);
94
+ * const k = [new Float32Array(64)];
95
+ * const v = [new Float32Array(64)];
96
+ * const result2 = mha.compute(q, k, v);
97
+ * ```
98
+ */
99
+ compute(query, keys, values) {
100
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
101
+ return {
102
+ values: fromFloat32Array(raw),
103
+ raw
104
+ };
105
+ }
106
+ /**
107
+ * Compute and return raw Float32Array (faster, no conversion)
108
+ */
109
+ computeRaw(query, keys, values) {
110
+ return this.inner.compute(query, keys, values);
111
+ }
112
+ get headDim() {
113
+ return this.dim / this.numHeads;
114
+ }
115
+ }
116
+ exports.MultiHeadAttention = MultiHeadAttention;
117
+ /**
118
+ * Flash attention with tiled computation
119
+ */
120
+ class FlashAttention {
121
+ /**
122
+ * Create a new flash attention instance
123
+ *
124
+ * @param dim - Embedding dimension
125
+ * @param blockSize - Block size for tiled computation (default: 512)
126
+ */
127
+ constructor(dim, blockSize = 512) {
128
+ const attention = getAttentionModule();
129
+ this.inner = new attention.FlashAttention(dim, blockSize);
130
+ this.dim = dim;
131
+ this.blockSize = blockSize;
132
+ }
133
+ /**
134
+ * Compute flash attention
135
+ */
136
+ compute(query, keys, values) {
137
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
138
+ return {
139
+ values: fromFloat32Array(raw),
140
+ raw
141
+ };
142
+ }
143
+ computeRaw(query, keys, values) {
144
+ return this.inner.compute(query, keys, values);
145
+ }
146
+ }
147
+ exports.FlashAttention = FlashAttention;
148
+ /**
149
+ * Hyperbolic attention in Poincare ball model
150
+ */
151
+ class HyperbolicAttention {
152
+ /**
153
+ * Create a new hyperbolic attention instance
154
+ *
155
+ * @param dim - Embedding dimension
156
+ * @param curvature - Hyperbolic curvature (typically 1.0)
157
+ */
158
+ constructor(dim, curvature = 1.0) {
159
+ const attention = getAttentionModule();
160
+ this.inner = new attention.HyperbolicAttention(dim, curvature);
161
+ this.dim = dim;
162
+ this.curvature = curvature;
163
+ }
164
+ /**
165
+ * Compute hyperbolic attention
166
+ */
167
+ compute(query, keys, values) {
168
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
169
+ return {
170
+ values: fromFloat32Array(raw),
171
+ raw
172
+ };
173
+ }
174
+ computeRaw(query, keys, values) {
175
+ return this.inner.compute(query, keys, values);
176
+ }
177
+ }
178
+ exports.HyperbolicAttention = HyperbolicAttention;
179
+ /**
180
+ * Linear attention (Performer-style) with O(n) complexity
181
+ */
182
+ class LinearAttention {
183
+ /**
184
+ * Create a new linear attention instance
185
+ *
186
+ * @param dim - Embedding dimension
187
+ * @param numFeatures - Number of random features
188
+ */
189
+ constructor(dim, numFeatures) {
190
+ const attention = getAttentionModule();
191
+ this.inner = new attention.LinearAttention(dim, numFeatures);
192
+ this.dim = dim;
193
+ this.numFeatures = numFeatures;
194
+ }
195
+ /**
196
+ * Compute linear attention
197
+ */
198
+ compute(query, keys, values) {
199
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
200
+ return {
201
+ values: fromFloat32Array(raw),
202
+ raw
203
+ };
204
+ }
205
+ computeRaw(query, keys, values) {
206
+ return this.inner.compute(query, keys, values);
207
+ }
208
+ }
209
+ exports.LinearAttention = LinearAttention;
210
+ /**
211
+ * Local-global attention (Longformer-style)
212
+ */
213
+ class LocalGlobalAttention {
214
+ /**
215
+ * Create a new local-global attention instance
216
+ *
217
+ * @param dim - Embedding dimension
218
+ * @param localWindow - Size of local attention window
219
+ * @param globalTokens - Number of global attention tokens
220
+ */
221
+ constructor(dim, localWindow, globalTokens) {
222
+ const attention = getAttentionModule();
223
+ this.inner = new attention.LocalGlobalAttention(dim, localWindow, globalTokens);
224
+ this.dim = dim;
225
+ this.localWindow = localWindow;
226
+ this.globalTokens = globalTokens;
227
+ }
228
+ /**
229
+ * Compute local-global attention
230
+ */
231
+ compute(query, keys, values) {
232
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
233
+ return {
234
+ values: fromFloat32Array(raw),
235
+ raw
236
+ };
237
+ }
238
+ computeRaw(query, keys, values) {
239
+ return this.inner.compute(query, keys, values);
240
+ }
241
+ }
242
+ exports.LocalGlobalAttention = LocalGlobalAttention;
243
+ /**
244
+ * Mixture of Experts attention
245
+ */
246
+ class MoEAttention {
247
+ /**
248
+ * Create a new MoE attention instance
249
+ *
250
+ * @param config - MoE configuration
251
+ */
252
+ constructor(config) {
253
+ const attention = getAttentionModule();
254
+ this.inner = new attention.MoEAttention({
255
+ dim: config.dim,
256
+ num_experts: config.numExperts,
257
+ top_k: config.topK,
258
+ expert_capacity: config.expertCapacity ?? 1.25,
259
+ });
260
+ this.config = config;
261
+ }
262
+ /**
263
+ * Create with simple parameters
264
+ */
265
+ static simple(dim, numExperts, topK) {
266
+ return new MoEAttention({ dim, numExperts, topK });
267
+ }
268
+ /**
269
+ * Compute MoE attention
270
+ */
271
+ compute(query, keys, values) {
272
+ const raw = this.inner.compute(toFloat32Array(query), toFloat32Arrays(keys), toFloat32Arrays(values));
273
+ return {
274
+ values: fromFloat32Array(raw),
275
+ raw
276
+ };
277
+ }
278
+ computeRaw(query, keys, values) {
279
+ return this.inner.compute(query, keys, values);
280
+ }
281
+ }
282
+ exports.MoEAttention = MoEAttention;
283
+ // Hyperbolic math utilities
284
+ /**
285
+ * Project a vector into the Poincare ball
286
+ */
287
+ function projectToPoincareBall(vector, curvature = 1.0) {
288
+ const attention = getAttentionModule();
289
+ const result = attention.projectToPoincareBall(toFloat32Array(vector), curvature);
290
+ return fromFloat32Array(result);
291
+ }
292
+ /**
293
+ * Compute hyperbolic (Poincare) distance between two points
294
+ */
295
+ function poincareDistance(a, b, curvature = 1.0) {
296
+ const attention = getAttentionModule();
297
+ return attention.poincareDistance(toFloat32Array(a), toFloat32Array(b), curvature);
298
+ }
299
+ /**
300
+ * Mobius addition in hyperbolic space
301
+ */
302
+ function mobiusAddition(a, b, curvature = 1.0) {
303
+ const attention = getAttentionModule();
304
+ const result = attention.mobiusAddition(toFloat32Array(a), toFloat32Array(b), curvature);
305
+ return fromFloat32Array(result);
306
+ }
307
+ /**
308
+ * Exponential map from tangent space to hyperbolic space
309
+ */
310
+ function expMap(base, tangent, curvature = 1.0) {
311
+ const attention = getAttentionModule();
312
+ const result = attention.expMap(toFloat32Array(base), toFloat32Array(tangent), curvature);
313
+ return fromFloat32Array(result);
314
+ }
315
+ /**
316
+ * Logarithmic map from hyperbolic space to tangent space
317
+ */
318
+ function logMap(base, point, curvature = 1.0) {
319
+ const attention = getAttentionModule();
320
+ const result = attention.logMap(toFloat32Array(base), toFloat32Array(point), curvature);
321
+ return fromFloat32Array(result);
322
+ }
323
+ /**
324
+ * Check if attention module is available
325
+ */
326
+ function isAttentionAvailable() {
327
+ try {
328
+ getAttentionModule();
329
+ return true;
330
+ }
331
+ catch {
332
+ return false;
333
+ }
334
+ }
335
+ /**
336
+ * Get attention module version
337
+ */
338
+ function getAttentionVersion() {
339
+ try {
340
+ const attention = getAttentionModule();
341
+ return attention.version?.() ?? null;
342
+ }
343
+ catch {
344
+ return null;
345
+ }
346
+ }
347
+ exports.default = {
348
+ MultiHeadAttention,
349
+ FlashAttention,
350
+ HyperbolicAttention,
351
+ LinearAttention,
352
+ LocalGlobalAttention,
353
+ MoEAttention,
354
+ projectToPoincareBall,
355
+ poincareDistance,
356
+ mobiusAddition,
357
+ expMap,
358
+ logMap,
359
+ isAttentionAvailable,
360
+ getAttentionVersion,
361
+ };
@@ -0,0 +1,143 @@
1
+ /**
2
+ * GNN Wrapper - Safe wrapper around @ruvector/gnn with automatic array conversion
3
+ *
4
+ * This wrapper handles the array type conversion automatically, allowing users
5
+ * to pass either regular arrays or Float32Arrays.
6
+ *
7
+ * The native @ruvector/gnn requires Float32Array for maximum performance.
8
+ * This wrapper converts any input type to Float32Array automatically.
9
+ *
10
+ * Performance Tips:
11
+ * - Pass Float32Array directly for zero-copy performance
12
+ * - Use toFloat32Array/toFloat32ArrayBatch for pre-conversion
13
+ * - Avoid repeated conversions in hot paths
14
+ */
15
+ /**
16
+ * Convert any array-like input to Float32Array (native requires Float32Array)
17
+ * Optimized paths:
18
+ * - Float32Array: zero-copy return
19
+ * - Float64Array: efficient typed array copy
20
+ * - Array: direct Float32Array construction
21
+ */
22
+ export declare function toFloat32Array(input: number[] | Float32Array | Float64Array): Float32Array;
23
+ /**
24
+ * Convert array of arrays to array of Float32Arrays
25
+ */
26
+ export declare function toFloat32ArrayBatch(input: (number[] | Float32Array | Float64Array)[]): Float32Array[];
27
+ /**
28
+ * Search result from differentiable search
29
+ */
30
+ export interface DifferentiableSearchResult {
31
+ /** Indices of top-k candidates */
32
+ indices: number[];
33
+ /** Soft weights for top-k candidates */
34
+ weights: number[];
35
+ }
36
+ /**
37
+ * Differentiable search using soft attention mechanism
38
+ *
39
+ * This wrapper automatically converts Float32Array inputs to regular arrays.
40
+ *
41
+ * @param query - Query vector (array or Float32Array)
42
+ * @param candidates - List of candidate vectors (arrays or Float32Arrays)
43
+ * @param k - Number of top results to return
44
+ * @param temperature - Temperature for softmax (lower = sharper, higher = smoother)
45
+ * @returns Search result with indices and soft weights
46
+ *
47
+ * @example
48
+ * ```typescript
49
+ * import { differentiableSearch } from 'ruvector/core/gnn-wrapper';
50
+ *
51
+ * // Works with regular arrays (auto-converted to Float32Array)
52
+ * const result1 = differentiableSearch([1, 0, 0], [[1, 0, 0], [0, 1, 0]], 2, 1.0);
53
+ *
54
+ * // For best performance, use Float32Array directly (zero-copy)
55
+ * const query = new Float32Array([1, 0, 0]);
56
+ * const candidates = [new Float32Array([1, 0, 0]), new Float32Array([0, 1, 0])];
57
+ * const result2 = differentiableSearch(query, candidates, 2, 1.0);
58
+ * ```
59
+ */
60
+ export declare function differentiableSearch(query: number[] | Float32Array | Float64Array, candidates: (number[] | Float32Array | Float64Array)[], k: number, temperature?: number): DifferentiableSearchResult;
61
+ /**
62
+ * GNN Layer for HNSW topology
63
+ */
64
+ export declare class RuvectorLayer {
65
+ private inner;
66
+ /**
67
+ * Create a new Ruvector GNN layer
68
+ *
69
+ * @param inputDim - Dimension of input node embeddings
70
+ * @param hiddenDim - Dimension of hidden representations
71
+ * @param heads - Number of attention heads
72
+ * @param dropout - Dropout rate (0.0 to 1.0)
73
+ */
74
+ constructor(inputDim: number, hiddenDim: number, heads: number, dropout?: number);
75
+ /**
76
+ * Forward pass through the GNN layer
77
+ *
78
+ * @param nodeEmbedding - Current node's embedding
79
+ * @param neighborEmbeddings - Embeddings of neighbor nodes
80
+ * @param edgeWeights - Weights of edges to neighbors
81
+ * @returns Updated node embedding as Float32Array
82
+ */
83
+ forward(nodeEmbedding: number[] | Float32Array, neighborEmbeddings: (number[] | Float32Array)[], edgeWeights: number[] | Float32Array): Float32Array;
84
+ /**
85
+ * Serialize the layer to JSON
86
+ */
87
+ toJson(): string;
88
+ /**
89
+ * Deserialize the layer from JSON
90
+ */
91
+ static fromJson(json: string): RuvectorLayer;
92
+ }
93
+ /**
94
+ * Tensor compressor with adaptive level selection
95
+ */
96
+ export declare class TensorCompress {
97
+ private inner;
98
+ constructor();
99
+ /**
100
+ * Compress an embedding based on access frequency
101
+ *
102
+ * @param embedding - Input embedding vector
103
+ * @param accessFreq - Access frequency (0.0 to 1.0)
104
+ * @returns Compressed tensor as JSON string
105
+ */
106
+ compress(embedding: number[] | Float32Array, accessFreq: number): string;
107
+ /**
108
+ * Decompress a compressed tensor
109
+ *
110
+ * @param compressedJson - Compressed tensor JSON
111
+ * @returns Decompressed embedding
112
+ */
113
+ decompress(compressedJson: string): number[];
114
+ }
115
+ /**
116
+ * Hierarchical forward pass through GNN layers
117
+ *
118
+ * @param query - Query vector
119
+ * @param layerEmbeddings - Embeddings organized by layer
120
+ * @param gnnLayersJson - JSON array of serialized GNN layers
121
+ * @returns Final embedding after hierarchical processing as Float32Array
122
+ */
123
+ export declare function hierarchicalForward(query: number[] | Float32Array, layerEmbeddings: (number[] | Float32Array)[][], gnnLayersJson: string[]): Float32Array;
124
+ /**
125
+ * Get compression level for a given access frequency
126
+ */
127
+ export declare function getCompressionLevel(accessFreq: number): string;
128
+ /**
129
+ * Check if GNN module is available
130
+ */
131
+ export declare function isGnnAvailable(): boolean;
132
+ declare const _default: {
133
+ differentiableSearch: typeof differentiableSearch;
134
+ RuvectorLayer: typeof RuvectorLayer;
135
+ TensorCompress: typeof TensorCompress;
136
+ hierarchicalForward: typeof hierarchicalForward;
137
+ getCompressionLevel: typeof getCompressionLevel;
138
+ isGnnAvailable: typeof isGnnAvailable;
139
+ toFloat32Array: typeof toFloat32Array;
140
+ toFloat32ArrayBatch: typeof toFloat32ArrayBatch;
141
+ };
142
+ export default _default;
143
+ //# sourceMappingURL=gnn-wrapper.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"gnn-wrapper.d.ts","sourceRoot":"","sources":["../../src/core/gnn-wrapper.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;GAaG;AAsBH;;;;;;GAMG;AACH,wBAAgB,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,YAAY,GAAG,YAAY,GAAG,YAAY,CAK1F;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,KAAK,EAAE,CAAC,MAAM,EAAE,GAAG,YAAY,GAAG,YAAY,CAAC,EAAE,GAAG,YAAY,EAAE,CAMrG;AAED;;GAEG;AACH,MAAM,WAAW,0BAA0B;IACzC,kCAAkC;IAClC,OAAO,EAAE,MAAM,EAAE,CAAC;IAClB,wCAAwC;IACxC,OAAO,EAAE,MAAM,EAAE,CAAC;CACnB;AAED;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AACH,wBAAgB,oBAAoB,CAClC,KAAK,EAAE,MAAM,EAAE,GAAG,YAAY,GAAG,YAAY,EAC7C,UAAU,EAAE,CAAC,MAAM,EAAE,GAAG,YAAY,GAAG,YAAY,CAAC,EAAE,EACtD,CAAC,EAAE,MAAM,EACT,WAAW,GAAE,MAAY,GACxB,0BAA0B,CAQ5B;AAED;;GAEG;AACH,qBAAa,aAAa;IACxB,OAAO,CAAC,KAAK,CAAM;IAEnB;;;;;;;OAOG;gBACS,QAAQ,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,OAAO,GAAE,MAAY;IAKrF;;;;;;;OAOG;IACH,OAAO,CACL,aAAa,EAAE,MAAM,EAAE,GAAG,YAAY,EACtC,kBAAkB,EAAE,CAAC,MAAM,EAAE,GAAG,YAAY,CAAC,EAAE,EAC/C,WAAW,EAAE,MAAM,EAAE,GAAG,YAAY,GACnC,YAAY;IAQf;;OAEG;IACH,MAAM,IAAI,MAAM;IAIhB;;OAEG;IACH,MAAM,CAAC,QAAQ,CAAC,IAAI,EAAE,MAAM,GAAG,aAAa;CAM7C;AAED;;GAEG;AACH,qBAAa,cAAc;IACzB,OAAO,CAAC,KAAK,CAAM;;IAOnB;;;;;;OAMG;IACH,QAAQ,CAAC,SAAS,EAAE,MAAM,EAAE,GAAG,YAAY,EAAE,UAAU,EAAE,MAAM,GAAG,MAAM;IAIxE;;;;;OAKG;IACH,UAAU,CAAC,cAAc,EAAE,MAAM,GAAG,MAAM,EAAE;CAG7C;AAED;;;;;;;GAOG;AACH,wBAAgB,mBAAmB,CACjC,KAAK,EAAE,MAAM,EAAE,GAAG,YAAY,EAC9B,eAAe,EAAE,CAAC,MAAM,EAAE,GAAG,YAAY,CAAC,EAAE,EAAE,EAC9C,aAAa,EAAE,MAAM,EAAE,GACtB,YAAY,CAOd;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM,CAG9D;AAED;;GAEG;AACH,wBAAgB,cAAc,IAAI,OAAO,CAOxC;;;;;;;;;;;AAED,wBAUE"}