@sparkleideas/neural 3.5.2-patch.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/README.md +260 -0
  2. package/__tests__/README.md +235 -0
  3. package/__tests__/algorithms.test.ts +582 -0
  4. package/__tests__/patterns.test.ts +549 -0
  5. package/__tests__/sona.test.ts +445 -0
  6. package/docs/SONA_INTEGRATION.md +460 -0
  7. package/docs/SONA_QUICKSTART.md +168 -0
  8. package/examples/sona-usage.ts +318 -0
  9. package/package.json +23 -0
  10. package/src/algorithms/a2c.d.ts +86 -0
  11. package/src/algorithms/a2c.d.ts.map +1 -0
  12. package/src/algorithms/a2c.js +361 -0
  13. package/src/algorithms/a2c.js.map +1 -0
  14. package/src/algorithms/a2c.ts +478 -0
  15. package/src/algorithms/curiosity.d.ts +82 -0
  16. package/src/algorithms/curiosity.d.ts.map +1 -0
  17. package/src/algorithms/curiosity.js +392 -0
  18. package/src/algorithms/curiosity.js.map +1 -0
  19. package/src/algorithms/curiosity.ts +509 -0
  20. package/src/algorithms/decision-transformer.d.ts +82 -0
  21. package/src/algorithms/decision-transformer.d.ts.map +1 -0
  22. package/src/algorithms/decision-transformer.js +415 -0
  23. package/src/algorithms/decision-transformer.js.map +1 -0
  24. package/src/algorithms/decision-transformer.ts +521 -0
  25. package/src/algorithms/dqn.d.ts +72 -0
  26. package/src/algorithms/dqn.d.ts.map +1 -0
  27. package/src/algorithms/dqn.js +303 -0
  28. package/src/algorithms/dqn.js.map +1 -0
  29. package/src/algorithms/dqn.ts +382 -0
  30. package/src/algorithms/index.d.ts +32 -0
  31. package/src/algorithms/index.d.ts.map +1 -0
  32. package/src/algorithms/index.js +74 -0
  33. package/src/algorithms/index.js.map +1 -0
  34. package/src/algorithms/index.ts +122 -0
  35. package/src/algorithms/ppo.d.ts +72 -0
  36. package/src/algorithms/ppo.d.ts.map +1 -0
  37. package/src/algorithms/ppo.js +331 -0
  38. package/src/algorithms/ppo.js.map +1 -0
  39. package/src/algorithms/ppo.ts +429 -0
  40. package/src/algorithms/q-learning.d.ts +77 -0
  41. package/src/algorithms/q-learning.d.ts.map +1 -0
  42. package/src/algorithms/q-learning.js +259 -0
  43. package/src/algorithms/q-learning.js.map +1 -0
  44. package/src/algorithms/q-learning.ts +333 -0
  45. package/src/algorithms/sarsa.d.ts +82 -0
  46. package/src/algorithms/sarsa.d.ts.map +1 -0
  47. package/src/algorithms/sarsa.js +297 -0
  48. package/src/algorithms/sarsa.js.map +1 -0
  49. package/src/algorithms/sarsa.ts +383 -0
  50. package/src/algorithms/tmp.json +0 -0
  51. package/src/application/index.ts +11 -0
  52. package/src/application/services/neural-application-service.ts +217 -0
  53. package/src/domain/entities/pattern.ts +169 -0
  54. package/src/domain/index.ts +18 -0
  55. package/src/domain/services/learning-service.ts +256 -0
  56. package/src/index.d.ts +118 -0
  57. package/src/index.d.ts.map +1 -0
  58. package/src/index.js +201 -0
  59. package/src/index.js.map +1 -0
  60. package/src/index.ts +363 -0
  61. package/src/modes/balanced.d.ts +60 -0
  62. package/src/modes/balanced.d.ts.map +1 -0
  63. package/src/modes/balanced.js +234 -0
  64. package/src/modes/balanced.js.map +1 -0
  65. package/src/modes/balanced.ts +299 -0
  66. package/src/modes/base.ts +163 -0
  67. package/src/modes/batch.d.ts +82 -0
  68. package/src/modes/batch.d.ts.map +1 -0
  69. package/src/modes/batch.js +316 -0
  70. package/src/modes/batch.js.map +1 -0
  71. package/src/modes/batch.ts +434 -0
  72. package/src/modes/edge.d.ts +85 -0
  73. package/src/modes/edge.d.ts.map +1 -0
  74. package/src/modes/edge.js +310 -0
  75. package/src/modes/edge.js.map +1 -0
  76. package/src/modes/edge.ts +409 -0
  77. package/src/modes/index.d.ts +55 -0
  78. package/src/modes/index.d.ts.map +1 -0
  79. package/src/modes/index.js +83 -0
  80. package/src/modes/index.js.map +1 -0
  81. package/src/modes/index.ts +16 -0
  82. package/src/modes/real-time.d.ts +58 -0
  83. package/src/modes/real-time.d.ts.map +1 -0
  84. package/src/modes/real-time.js +196 -0
  85. package/src/modes/real-time.js.map +1 -0
  86. package/src/modes/real-time.ts +257 -0
  87. package/src/modes/research.d.ts +79 -0
  88. package/src/modes/research.d.ts.map +1 -0
  89. package/src/modes/research.js +389 -0
  90. package/src/modes/research.js.map +1 -0
  91. package/src/modes/research.ts +486 -0
  92. package/src/modes/tmp.json +0 -0
  93. package/src/pattern-learner.d.ts +117 -0
  94. package/src/pattern-learner.d.ts.map +1 -0
  95. package/src/pattern-learner.js +603 -0
  96. package/src/pattern-learner.js.map +1 -0
  97. package/src/pattern-learner.ts +757 -0
  98. package/src/reasoning-bank.d.ts +259 -0
  99. package/src/reasoning-bank.d.ts.map +1 -0
  100. package/src/reasoning-bank.js +993 -0
  101. package/src/reasoning-bank.js.map +1 -0
  102. package/src/reasoning-bank.ts +1279 -0
  103. package/src/reasoningbank-adapter.ts +697 -0
  104. package/src/sona-integration.d.ts +168 -0
  105. package/src/sona-integration.d.ts.map +1 -0
  106. package/src/sona-integration.js +316 -0
  107. package/src/sona-integration.js.map +1 -0
  108. package/src/sona-integration.ts +432 -0
  109. package/src/sona-manager.d.ts +147 -0
  110. package/src/sona-manager.d.ts.map +1 -0
  111. package/src/sona-manager.js +695 -0
  112. package/src/sona-manager.js.map +1 -0
  113. package/src/sona-manager.ts +835 -0
  114. package/src/tmp.json +0 -0
  115. package/src/types.d.ts +431 -0
  116. package/src/types.d.ts.map +1 -0
  117. package/src/types.js +11 -0
  118. package/src/types.js.map +1 -0
  119. package/src/types.ts +590 -0
  120. package/tmp.json +0 -0
  121. package/tsconfig.json +9 -0
  122. package/vitest.config.ts +19 -0
@@ -0,0 +1,434 @@
1
+ /**
2
+ * Batch Mode Implementation
3
+ *
4
+ * Optimized for high-throughput processing with:
5
+ * - Large batch sizes (128)
6
+ * - Rank-8 LoRA
7
+ * - Gradient accumulation
8
+ * - Async batch processing
9
+ * - 50ms latency budget
10
+ */
11
+
12
+ import type {
13
+ SONAModeConfig,
14
+ ModeOptimizations,
15
+ Trajectory,
16
+ Pattern,
17
+ PatternMatch,
18
+ LoRAWeights,
19
+ EWCState,
20
+ } from '../types.js';
21
+ import { BaseModeImplementation } from './base.js';
22
+
23
+ /**
24
+ * Batch mode for high-throughput processing
25
+ */
26
+ export class BatchMode extends BaseModeImplementation {
27
+ readonly mode = 'batch';
28
+
29
+ // Batch processing queues
30
+ private patternQueue: Array<{
31
+ embedding: Float32Array;
32
+ k: number;
33
+ resolve: (matches: PatternMatch[]) => void;
34
+ }> = [];
35
+ private learningQueue: Trajectory[] = [];
36
+
37
+ // Batch buffers
38
+ private embeddingBuffer: Float32Array | null = null;
39
+ private batchEmbeddings: Float32Array[] = [];
40
+
41
+ // Gradient accumulation
42
+ private accumulatedGradients: Map<string, Float32Array> = new Map();
43
+ private gradientSteps = 0;
44
+
45
+ // Batch processing state
46
+ private isBatchProcessing = false;
47
+ private batchTimer: ReturnType<typeof setTimeout> | null = null;
48
+
49
+ // Stats
50
+ private totalBatches = 0;
51
+ private totalItems = 0;
52
+ private totalBatchTime = 0;
53
+ private learnIterations = 0;
54
+
55
+ async initialize(): Promise<void> {
56
+ await super.initialize();
57
+ this.patternQueue = [];
58
+ this.learningQueue = [];
59
+ this.accumulatedGradients.clear();
60
+ this.gradientSteps = 0;
61
+ }
62
+
63
+ async cleanup(): Promise<void> {
64
+ if (this.batchTimer) {
65
+ clearTimeout(this.batchTimer);
66
+ }
67
+ this.patternQueue = [];
68
+ this.learningQueue = [];
69
+ this.accumulatedGradients.clear();
70
+ await super.cleanup();
71
+ }
72
+
73
+ /**
74
+ * Find patterns - queues for batch processing
75
+ */
76
+ async findPatterns(
77
+ embedding: Float32Array,
78
+ k: number,
79
+ patterns: Pattern[]
80
+ ): Promise<PatternMatch[]> {
81
+ // For immediate needs, process synchronously
82
+ if (patterns.length < 100) {
83
+ return this.findPatternsDirect(embedding, k, patterns);
84
+ }
85
+
86
+ // Queue for batch processing
87
+ return new Promise(resolve => {
88
+ this.patternQueue.push({ embedding, k, resolve });
89
+ this.scheduleBatchProcessing(patterns);
90
+ });
91
+ }
92
+
93
+ /**
94
+ * Learn from trajectories - accumulates for batch
95
+ */
96
+ async learn(
97
+ trajectories: Trajectory[],
98
+ config: SONAModeConfig,
99
+ ewcState: EWCState
100
+ ): Promise<number> {
101
+ const startTime = performance.now();
102
+
103
+ if (trajectories.length === 0) return 0;
104
+
105
+ // Add to learning queue
106
+ this.learningQueue.push(...trajectories);
107
+
108
+ // Process when queue is full
109
+ if (this.learningQueue.length >= config.batchSize) {
110
+ return this.processBatchLearning(config, ewcState);
111
+ }
112
+
113
+ // Return estimated improvement
114
+ const avgQuality = trajectories.reduce((s, t) => s + t.qualityScore, 0) / trajectories.length;
115
+
116
+ this.totalBatchTime += performance.now() - startTime;
117
+ return Math.max(0, avgQuality - 0.5) * 0.5; // Partial estimate
118
+ }
119
+
120
+ /**
121
+ * Apply LoRA with rank-8
122
+ */
123
+ async applyLoRA(
124
+ input: Float32Array,
125
+ weights?: LoRAWeights
126
+ ): Promise<Float32Array> {
127
+ if (!weights) {
128
+ return input;
129
+ }
130
+
131
+ // Batch mode can process multiple inputs efficiently
132
+ this.batchEmbeddings.push(new Float32Array(input));
133
+
134
+ // Process immediately for single requests
135
+ if (this.batchEmbeddings.length === 1) {
136
+ const output = await this.applyLoRADirect(input, weights);
137
+ this.batchEmbeddings = [];
138
+ return output;
139
+ }
140
+
141
+ // For multiple inputs, process as batch
142
+ const outputs = await this.applyLoRABatch(this.batchEmbeddings, weights);
143
+ this.batchEmbeddings = [];
144
+ return outputs[outputs.length - 1];
145
+ }
146
+
147
+ getStats(): Record<string, number> {
148
+ return {
149
+ totalBatches: this.totalBatches,
150
+ avgItemsPerBatch: this.totalBatches > 0 ? this.totalItems / this.totalBatches : 0,
151
+ avgBatchTimeMs: this.totalBatches > 0 ? this.totalBatchTime / this.totalBatches : 0,
152
+ pendingPatternRequests: this.patternQueue.length,
153
+ pendingTrajectories: this.learningQueue.length,
154
+ accumulatedGradientSteps: this.gradientSteps,
155
+ learnIterations: this.learnIterations,
156
+ };
157
+ }
158
+
159
+ // ========================================================================
160
+ // Direct processing (for small batches)
161
+ // ========================================================================
162
+
163
+ /**
164
+ * Direct pattern matching without batching
165
+ */
166
+ private findPatternsDirect(
167
+ embedding: Float32Array,
168
+ k: number,
169
+ patterns: Pattern[]
170
+ ): PatternMatch[] {
171
+ const matches: PatternMatch[] = [];
172
+
173
+ for (const pattern of patterns) {
174
+ const similarity = this.cosineSimilarity(embedding, pattern.embedding);
175
+ matches.push({
176
+ pattern,
177
+ similarity,
178
+ confidence: similarity * pattern.successRate,
179
+ latencyMs: 0,
180
+ });
181
+ }
182
+
183
+ matches.sort((a, b) => b.similarity - a.similarity);
184
+ return matches.slice(0, k);
185
+ }
186
+
187
+ /**
188
+ * Direct LoRA application
189
+ */
190
+ private async applyLoRADirect(
191
+ input: Float32Array,
192
+ weights: LoRAWeights
193
+ ): Promise<Float32Array> {
194
+ const output = new Float32Array(input.length);
195
+ output.set(input);
196
+
197
+ const rank = this.config.loraRank;
198
+
199
+ for (const module of ['q_proj', 'v_proj', 'k_proj', 'o_proj']) {
200
+ const A = weights.A.get(module);
201
+ const B = weights.B.get(module);
202
+
203
+ if (A && B) {
204
+ const adapted = this.applyLoRATransform(input, A, B, rank);
205
+ const alpha = 0.25;
206
+ for (let i = 0; i < output.length; i++) {
207
+ output[i] = output[i] * (1 - alpha) + adapted[i] * alpha;
208
+ }
209
+ }
210
+ }
211
+
212
+ return output;
213
+ }
214
+
215
+ // ========================================================================
216
+ // Batch processing
217
+ // ========================================================================
218
+
219
+ /**
220
+ * Schedule batch processing
221
+ */
222
+ private scheduleBatchProcessing(patterns: Pattern[]): void {
223
+ if (this.batchTimer) return;
224
+
225
+ this.batchTimer = setTimeout(() => {
226
+ this.processBatchPatterns(patterns);
227
+ }, 10); // Wait 10ms to accumulate requests
228
+ }
229
+
230
+ /**
231
+ * Process pattern requests in batch
232
+ */
233
+ private async processBatchPatterns(patterns: Pattern[]): Promise<void> {
234
+ this.batchTimer = null;
235
+ if (this.patternQueue.length === 0) return;
236
+
237
+ const startTime = performance.now();
238
+ this.isBatchProcessing = true;
239
+
240
+ const batch = this.patternQueue;
241
+ this.patternQueue = [];
242
+
243
+ // Pre-compute pattern embeddings matrix
244
+ const patternMatrix = patterns.map(p => p.embedding);
245
+
246
+ // Process all queries in batch
247
+ for (const request of batch) {
248
+ const matches = this.batchSimilaritySearch(
249
+ request.embedding,
250
+ request.k,
251
+ patterns,
252
+ patternMatrix
253
+ );
254
+ request.resolve(matches);
255
+ }
256
+
257
+ this.totalBatches++;
258
+ this.totalItems += batch.length;
259
+ this.totalBatchTime += performance.now() - startTime;
260
+ this.isBatchProcessing = false;
261
+ }
262
+
263
+ /**
264
+ * Batch similarity search
265
+ */
266
+ private batchSimilaritySearch(
267
+ query: Float32Array,
268
+ k: number,
269
+ patterns: Pattern[],
270
+ patternMatrix: Float32Array[]
271
+ ): PatternMatch[] {
272
+ const similarities: Array<{ idx: number; sim: number }> = [];
273
+
274
+ for (let i = 0; i < patternMatrix.length; i++) {
275
+ const sim = this.cosineSimilarity(query, patternMatrix[i]);
276
+ similarities.push({ idx: i, sim });
277
+ }
278
+
279
+ similarities.sort((a, b) => b.sim - a.sim);
280
+ const topK = similarities.slice(0, k);
281
+
282
+ return topK.map(s => ({
283
+ pattern: patterns[s.idx],
284
+ similarity: s.sim,
285
+ confidence: s.sim * patterns[s.idx].successRate,
286
+ latencyMs: 0,
287
+ }));
288
+ }
289
+
290
+ /**
291
+ * Process batch learning
292
+ */
293
+ private async processBatchLearning(
294
+ config: SONAModeConfig,
295
+ ewcState: EWCState
296
+ ): Promise<number> {
297
+ const startTime = performance.now();
298
+
299
+ const batch = this.learningQueue.slice(0, config.batchSize);
300
+ this.learningQueue = this.learningQueue.slice(config.batchSize);
301
+
302
+ const qualityThreshold = config.qualityThreshold;
303
+ const learningRate = config.learningRate;
304
+
305
+ // Separate by quality
306
+ const good = batch.filter(t => t.qualityScore >= qualityThreshold);
307
+ const bad = batch.filter(t => t.qualityScore < qualityThreshold);
308
+
309
+ if (good.length === 0) {
310
+ this.totalBatchTime += performance.now() - startTime;
311
+ return 0;
312
+ }
313
+
314
+ // Accumulate gradients
315
+ for (const trajectory of good) {
316
+ this.accumulateTrajectoryGradient(trajectory, learningRate);
317
+ }
318
+
319
+ // Contrastive learning from bad examples
320
+ for (const trajectory of bad.slice(0, good.length)) {
321
+ this.accumulateTrajectoryGradient(trajectory, -learningRate * 0.3);
322
+ }
323
+
324
+ this.gradientSteps++;
325
+
326
+ // Apply accumulated gradients every N steps
327
+ if (this.gradientSteps >= 4) {
328
+ await this.applyAccumulatedGradients(ewcState, config.ewcLambda);
329
+ this.gradientSteps = 0;
330
+ }
331
+
332
+ // Compute improvement
333
+ const avgQuality = good.reduce((s, t) => s + t.qualityScore, 0) / good.length;
334
+ const improvement = avgQuality - 0.5;
335
+
336
+ this.learnIterations++;
337
+ this.totalBatchTime += performance.now() - startTime;
338
+
339
+ return Math.max(0, improvement);
340
+ }
341
+
342
+ /**
343
+ * Accumulate gradient from trajectory
344
+ */
345
+ private accumulateTrajectoryGradient(
346
+ trajectory: Trajectory,
347
+ scale: number
348
+ ): void {
349
+ if (trajectory.steps.length === 0) return;
350
+
351
+ const key = trajectory.domain;
352
+ let gradient = this.accumulatedGradients.get(key);
353
+
354
+ if (!gradient) {
355
+ const dim = trajectory.steps[0].stateAfter.length;
356
+ gradient = new Float32Array(dim);
357
+ this.accumulatedGradients.set(key, gradient);
358
+ }
359
+
360
+ // Add trajectory contribution
361
+ const weight = trajectory.qualityScore * scale;
362
+ for (const step of trajectory.steps) {
363
+ for (let i = 0; i < Math.min(gradient.length, step.stateAfter.length); i++) {
364
+ gradient[i] += step.stateAfter[i] * weight * step.reward;
365
+ }
366
+ }
367
+ }
368
+
369
+ /**
370
+ * Apply accumulated gradients with EWC
371
+ */
372
+ private async applyAccumulatedGradients(
373
+ ewcState: EWCState,
374
+ ewcLambda: number
375
+ ): Promise<void> {
376
+ for (const [key, gradient] of this.accumulatedGradients) {
377
+ // Normalize gradient
378
+ const norm = Math.sqrt(gradient.reduce((s, v) => s + v * v, 0));
379
+ if (norm > 0) {
380
+ for (let i = 0; i < gradient.length; i++) {
381
+ gradient[i] /= norm;
382
+ }
383
+ }
384
+
385
+ // Apply EWC penalty
386
+ const fisher = ewcState.fisher.get(key);
387
+ const means = ewcState.means.get(key);
388
+
389
+ if (fisher && means) {
390
+ for (let i = 0; i < gradient.length; i++) {
391
+ const penalty = ewcLambda * fisher[i] * (gradient[i] - means[i]);
392
+ gradient[i] -= penalty;
393
+ }
394
+ }
395
+
396
+ // Clear gradient for next accumulation
397
+ gradient.fill(0);
398
+ }
399
+ }
400
+
401
+ /**
402
+ * Apply LoRA to batch of inputs
403
+ */
404
+ private async applyLoRABatch(
405
+ inputs: Float32Array[],
406
+ weights: LoRAWeights
407
+ ): Promise<Float32Array[]> {
408
+ const outputs: Float32Array[] = [];
409
+ const rank = this.config.loraRank;
410
+
411
+ // Process all inputs together for cache efficiency
412
+ for (const input of inputs) {
413
+ const output = new Float32Array(input.length);
414
+ output.set(input);
415
+
416
+ for (const module of ['q_proj', 'v_proj', 'k_proj', 'o_proj']) {
417
+ const A = weights.A.get(module);
418
+ const B = weights.B.get(module);
419
+
420
+ if (A && B) {
421
+ const adapted = this.applyLoRATransform(input, A, B, rank);
422
+ const alpha = 0.25;
423
+ for (let i = 0; i < output.length; i++) {
424
+ output[i] = output[i] * (1 - alpha) + adapted[i] * alpha;
425
+ }
426
+ }
427
+ }
428
+
429
+ outputs.push(output);
430
+ }
431
+
432
+ return outputs;
433
+ }
434
+ }
@@ -0,0 +1,85 @@
1
+ /**
2
+ * Edge Mode Implementation
3
+ *
4
+ * Optimized for resource-constrained environments with:
5
+ * - <5MB memory footprint
6
+ * - Minimal latency (<1ms)
7
+ * - Micro-LoRA (rank-1)
8
+ * - Aggressive pruning
9
+ * - Async updates
10
+ */
11
+ import type { SONAModeConfig, Trajectory, Pattern, PatternMatch, LoRAWeights, EWCState } from '../types.js';
12
+ import { BaseModeImplementation } from './index.js';
13
+ /**
14
+ * Edge mode for resource-constrained devices
15
+ */
16
+ export declare class EdgeMode extends BaseModeImplementation {
17
+ readonly mode = "edge";
18
+ private compressedPatterns;
19
+ private quantizedWeights;
20
+ private quantizationScale;
21
+ private pendingUpdates;
22
+ private updateTimer;
23
+ private totalOps;
24
+ private totalTime;
25
+ initialize(): Promise<void>;
26
+ cleanup(): Promise<void>;
27
+ /**
28
+ * Find patterns using compressed embeddings
29
+ */
30
+ findPatterns(embedding: Float32Array, k: number, patterns: Pattern[]): Promise<PatternMatch[]>;
31
+ /**
32
+ * Lightweight learning with async updates
33
+ */
34
+ learn(trajectories: Trajectory[], config: SONAModeConfig, ewcState: EWCState): Promise<number>;
35
+ /**
36
+ * Apply quantized LoRA
37
+ */
38
+ applyLoRA(input: Float32Array, weights?: LoRAWeights): Promise<Float32Array>;
39
+ getStats(): Record<string, number>;
40
+ /**
41
+ * Compress embedding to 8-bit representation
42
+ */
43
+ private compressEmbedding;
44
+ /**
45
+ * Create compressed pattern representation
46
+ */
47
+ private createCompressedPattern;
48
+ /**
49
+ * Fast similarity on compressed embeddings
50
+ */
51
+ private compressedSimilarity;
52
+ /**
53
+ * Get or create quantized weights
54
+ */
55
+ private getOrQuantize;
56
+ /**
57
+ * Quantize float weights to int8
58
+ */
59
+ private quantizeWeights;
60
+ /**
61
+ * Apply LoRA with quantized weights
62
+ */
63
+ private applyQuantizedLoRA;
64
+ /**
65
+ * Queue an async update
66
+ */
67
+ private queueAsyncUpdate;
68
+ /**
69
+ * Process pending async updates
70
+ */
71
+ private processAsyncUpdates;
72
+ /**
73
+ * Perform lightweight parameter update
74
+ */
75
+ private performLightweightUpdate;
76
+ /**
77
+ * Find most similar compressed pattern
78
+ */
79
+ private findSimilarCompressedPattern;
80
+ /**
81
+ * Estimate memory usage in MB
82
+ */
83
+ private estimateMemoryUsage;
84
+ }
85
+ //# sourceMappingURL=edge.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"edge.d.ts","sourceRoot":"","sources":["edge.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG;AAEH,OAAO,KAAK,EACV,cAAc,EAEd,UAAU,EACV,OAAO,EACP,YAAY,EACZ,WAAW,EACX,QAAQ,EACT,MAAM,aAAa,CAAC;AACrB,OAAO,EAAE,sBAAsB,EAAE,MAAM,YAAY,CAAC;AAEpD;;GAEG;AACH,qBAAa,QAAS,SAAQ,sBAAsB;IAClD,QAAQ,CAAC,IAAI,UAAU;IAGvB,OAAO,CAAC,kBAAkB,CAA6C;IAGvE,OAAO,CAAC,gBAAgB,CAAqC;IAC7D,OAAO,CAAC,iBAAiB,CAAe;IAGxC,OAAO,CAAC,cAAc,CAAkC;IACxD,OAAO,CAAC,WAAW,CAA8C;IAGjE,OAAO,CAAC,QAAQ,CAAK;IACrB,OAAO,CAAC,SAAS,CAAK;IAEhB,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC;IAO3B,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAU9B;;OAEG;IACG,YAAY,CAChB,SAAS,EAAE,YAAY,EACvB,CAAC,EAAE,MAAM,EACT,QAAQ,EAAE,OAAO,EAAE,GAClB,OAAO,CAAC,YAAY,EAAE,CAAC;IAoC1B;;OAEG;IACG,KAAK,CACT,YAAY,EAAE,UAAU,EAAE,EAC1B,MAAM,EAAE,cAAc,EACtB,QAAQ,EAAE,QAAQ,GACjB,OAAO,CAAC,MAAM,CAAC;IAwBlB;;OAEG;IACG,SAAS,CACb,KAAK,EAAE,YAAY,EACnB,OAAO,CAAC,EAAE,WAAW,GACpB,OAAO,CAAC,YAAY,CAAC;IA6BxB,QAAQ,IAAI,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAclC;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAYzB;;OAEG;IACH,OAAO,CAAC,uBAAuB;IAS/B;;OAEG;IACH,OAAO,CAAC,oBAAoB;IAmB5B;;OAEG;IACH,OAAO,CAAC,aAAa;IASrB;;OAEG;IACH,OAAO,CAAC,eAAe;IAYvB;;OAEG;IACH,OAAO,CAAC,kBAAkB;IA6B1B;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAWxB;;OAEG;YACW,mBAAmB;IAwBjC;;OAEG;YACW,wBAAwB;IAuBtC;;OAEG;IACH,OAAO,CAAC,4BAA4B;IAgBpC;;OAEG;IACH,OAAO,CAAC,mBAAmB;CAkB5B"}