agentic-qe 2.2.0 → 2.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/qe-security-scanner.md +26 -0
- package/.claude/skills/agentic-quality-engineering/SKILL.md +4 -4
- package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +14 -11
- package/.claude/skills/skills-manifest.json +2 -2
- package/CHANGELOG.md +75 -0
- package/README.md +92 -214
- package/dist/agents/BaseAgent.d.ts +5 -1
- package/dist/agents/BaseAgent.d.ts.map +1 -1
- package/dist/agents/BaseAgent.js +32 -17
- package/dist/agents/BaseAgent.js.map +1 -1
- package/dist/agents/index.js +3 -3
- package/dist/agents/index.js.map +1 -1
- package/dist/cli/commands/improve/index.d.ts +8 -1
- package/dist/cli/commands/improve/index.d.ts.map +1 -1
- package/dist/cli/commands/improve/index.js +18 -16
- package/dist/cli/commands/improve/index.js.map +1 -1
- package/dist/cli/commands/learn/index.d.ts +10 -2
- package/dist/cli/commands/learn/index.d.ts.map +1 -1
- package/dist/cli/commands/learn/index.js +99 -63
- package/dist/cli/commands/learn/index.js.map +1 -1
- package/dist/cli/commands/patterns/index.d.ts +8 -1
- package/dist/cli/commands/patterns/index.d.ts.map +1 -1
- package/dist/cli/commands/patterns/index.js +79 -45
- package/dist/cli/commands/patterns/index.js.map +1 -1
- package/dist/cli/commands/routing/index.d.ts +5 -0
- package/dist/cli/commands/routing/index.d.ts.map +1 -1
- package/dist/cli/commands/routing/index.js +11 -10
- package/dist/cli/commands/routing/index.js.map +1 -1
- package/dist/cli/init/agents.d.ts +1 -1
- package/dist/cli/init/agents.js +2 -2
- package/dist/cli/init/database-init.d.ts +7 -0
- package/dist/cli/init/database-init.d.ts.map +1 -1
- package/dist/cli/init/database-init.js +29 -48
- package/dist/cli/init/database-init.js.map +1 -1
- package/dist/core/memory/HNSWVectorMemory.d.ts +261 -0
- package/dist/core/memory/HNSWVectorMemory.d.ts.map +1 -0
- package/dist/core/memory/HNSWVectorMemory.js +647 -0
- package/dist/core/memory/HNSWVectorMemory.js.map +1 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts +7 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.js +9 -0
- package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
- package/dist/core/memory/index.d.ts +2 -0
- package/dist/core/memory/index.d.ts.map +1 -1
- package/dist/core/memory/index.js +11 -1
- package/dist/core/memory/index.js.map +1 -1
- package/dist/learning/ExplainableLearning.d.ts +191 -0
- package/dist/learning/ExplainableLearning.d.ts.map +1 -0
- package/dist/learning/ExplainableLearning.js +441 -0
- package/dist/learning/ExplainableLearning.js.map +1 -0
- package/dist/learning/GossipPatternSharingProtocol.d.ts +228 -0
- package/dist/learning/GossipPatternSharingProtocol.d.ts.map +1 -0
- package/dist/learning/GossipPatternSharingProtocol.js +590 -0
- package/dist/learning/GossipPatternSharingProtocol.js.map +1 -0
- package/dist/learning/LearningEngine.d.ts +4 -4
- package/dist/learning/LearningEngine.d.ts.map +1 -1
- package/dist/learning/LearningEngine.js +20 -13
- package/dist/learning/LearningEngine.js.map +1 -1
- package/dist/learning/PerformanceOptimizer.d.ts +268 -0
- package/dist/learning/PerformanceOptimizer.d.ts.map +1 -0
- package/dist/learning/PerformanceOptimizer.js +552 -0
- package/dist/learning/PerformanceOptimizer.js.map +1 -0
- package/dist/learning/PrivacyManager.d.ts +197 -0
- package/dist/learning/PrivacyManager.d.ts.map +1 -0
- package/dist/learning/PrivacyManager.js +551 -0
- package/dist/learning/PrivacyManager.js.map +1 -0
- package/dist/learning/TransferLearningManager.d.ts +212 -0
- package/dist/learning/TransferLearningManager.d.ts.map +1 -0
- package/dist/learning/TransferLearningManager.js +497 -0
- package/dist/learning/TransferLearningManager.js.map +1 -0
- package/dist/learning/algorithms/MAMLMetaLearner.d.ts +218 -0
- package/dist/learning/algorithms/MAMLMetaLearner.d.ts.map +1 -0
- package/dist/learning/algorithms/MAMLMetaLearner.js +532 -0
- package/dist/learning/algorithms/MAMLMetaLearner.js.map +1 -0
- package/dist/learning/algorithms/index.d.ts +4 -1
- package/dist/learning/algorithms/index.d.ts.map +1 -1
- package/dist/learning/algorithms/index.js +7 -1
- package/dist/learning/algorithms/index.js.map +1 -1
- package/dist/learning/index.d.ts +8 -0
- package/dist/learning/index.d.ts.map +1 -1
- package/dist/learning/index.js +17 -1
- package/dist/learning/index.js.map +1 -1
- package/dist/mcp/server-instructions.d.ts +1 -1
- package/dist/mcp/server-instructions.js +1 -1
- package/dist/providers/HybridRouter.d.ts +217 -0
- package/dist/providers/HybridRouter.d.ts.map +1 -0
- package/dist/providers/HybridRouter.js +679 -0
- package/dist/providers/HybridRouter.js.map +1 -0
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/index.d.ts.map +1 -1
- package/dist/providers/index.js +7 -1
- package/dist/providers/index.js.map +1 -1
- package/dist/telemetry/LearningTelemetry.d.ts +190 -0
- package/dist/telemetry/LearningTelemetry.d.ts.map +1 -0
- package/dist/telemetry/LearningTelemetry.js +403 -0
- package/dist/telemetry/LearningTelemetry.js.map +1 -0
- package/dist/telemetry/index.d.ts +1 -0
- package/dist/telemetry/index.d.ts.map +1 -1
- package/dist/telemetry/index.js +20 -2
- package/dist/telemetry/index.js.map +1 -1
- package/dist/telemetry/instrumentation/agent.d.ts +1 -1
- package/dist/telemetry/instrumentation/agent.js +1 -1
- package/dist/telemetry/instrumentation/index.d.ts +1 -1
- package/dist/telemetry/instrumentation/index.js +1 -1
- package/dist/utils/math.d.ts +11 -0
- package/dist/utils/math.d.ts.map +1 -0
- package/dist/utils/math.js +16 -0
- package/dist/utils/math.js.map +1 -0
- package/docs/reference/agents.md +1 -1
- package/docs/reference/skills.md +3 -3
- package/docs/reference/usage.md +4 -4
- package/package.json +14 -1
|
@@ -0,0 +1,532 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* MAMLMetaLearner - Model-Agnostic Meta-Learning for QE Agents
|
|
4
|
+
*
|
|
5
|
+
* Implements MAML-style meta-learning that enables agents to "learn how to learn".
|
|
6
|
+
* Agents can quickly adapt to new testing domains with just 5-10 examples.
|
|
7
|
+
*
|
|
8
|
+
* Key Concepts:
|
|
9
|
+
* - Inner Loop: Fast adaptation to new task with few gradient steps (5-10 examples)
|
|
10
|
+
* - Outer Loop: Learn initialization parameters that enable fast adaptation
|
|
11
|
+
* - Meta-Learning: After seeing few examples of new test pattern, agent performs well
|
|
12
|
+
*
|
|
13
|
+
* Algorithm:
|
|
14
|
+
* 1. Initialize meta-parameters θ (Q-table initialization)
|
|
15
|
+
* 2. For each task Ti in task distribution:
|
|
16
|
+
* a. Sample K examples from Ti (support set)
|
|
17
|
+
* b. Adapt: θ'i = θ - α∇Loss(θ, support) [inner loop]
|
|
18
|
+
* c. Evaluate on query set from Ti
|
|
19
|
+
* 3. Update meta-parameters: θ = θ - β∇Loss(θ', query) [outer loop]
|
|
20
|
+
* 4. Result: θ is optimized for fast adaptation to new tasks
|
|
21
|
+
*
|
|
22
|
+
* Use Cases:
|
|
23
|
+
* - New testing framework adoption (5-10 examples → proficient)
|
|
24
|
+
* - New project domain (few examples → effective testing strategy)
|
|
25
|
+
* - API testing → UI testing transfer learning
|
|
26
|
+
*/
|
|
27
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
28
|
+
exports.MAMLMetaLearner = void 0;
|
|
29
|
+
exports.createDefaultMAMLConfig = createDefaultMAMLConfig;
|
|
30
|
+
const AbstractRLLearner_1 = require("./AbstractRLLearner");
|
|
31
|
+
/**
|
|
32
|
+
* Default MAML configuration
|
|
33
|
+
*/
|
|
34
|
+
const DEFAULT_MAML_CONFIG = {
|
|
35
|
+
// Base RL config
|
|
36
|
+
learningRate: 0.01, // Outer loop meta-learning rate (β)
|
|
37
|
+
discountFactor: 0.95,
|
|
38
|
+
explorationRate: 0.2, // Lower exploration for meta-learning
|
|
39
|
+
explorationDecay: 0.99,
|
|
40
|
+
minExplorationRate: 0.05,
|
|
41
|
+
useExperienceReplay: false, // MAML uses episodic learning
|
|
42
|
+
replayBufferSize: 1000,
|
|
43
|
+
batchSize: 10, // Task batch size
|
|
44
|
+
// MAML-specific
|
|
45
|
+
innerLearningRate: 0.1, // α - higher for fast adaptation
|
|
46
|
+
innerSteps: 5, // Few-shot learning (5 gradient steps)
|
|
47
|
+
metaLearningRate: 0.01, // β - meta-parameter update
|
|
48
|
+
minTaskExamples: 5, // Support set size (few-shot)
|
|
49
|
+
maxTaskExamples: 10,
|
|
50
|
+
taskBatchSize: 5, // Meta-train on 5 tasks per batch
|
|
51
|
+
firstOrderApproximation: true // Faster computation
|
|
52
|
+
};
|
|
53
|
+
/**
|
|
54
|
+
* MAMLMetaLearner - Model-Agnostic Meta-Learning for Fast Adaptation
|
|
55
|
+
*
|
|
56
|
+
* Learns an initialization of Q-values that enables rapid adaptation to new
|
|
57
|
+
* testing tasks with minimal examples (5-10 shots).
|
|
58
|
+
*
|
|
59
|
+
* Example:
|
|
60
|
+
* - Agent trained on Jest, Mocha, Jasmine unit testing
|
|
61
|
+
* - Sees 5-10 examples of Vitest tests
|
|
62
|
+
* - Immediately generates high-quality Vitest tests
|
|
63
|
+
*
|
|
64
|
+
* Meta-Learning Process:
|
|
65
|
+
* 1. Sample batch of tasks (different testing scenarios)
|
|
66
|
+
* 2. For each task:
|
|
67
|
+
* - Adapt Q-table with support set (inner loop)
|
|
68
|
+
* - Evaluate adapted Q-table on query set
|
|
69
|
+
* 3. Compute meta-gradient from all tasks
|
|
70
|
+
* 4. Update meta-parameters (Q-table initialization)
|
|
71
|
+
*/
|
|
72
|
+
class MAMLMetaLearner extends AbstractRLLearner_1.AbstractRLLearner {
|
|
73
|
+
constructor(config = {}) {
|
|
74
|
+
const fullConfig = { ...DEFAULT_MAML_CONFIG, ...config };
|
|
75
|
+
super(fullConfig);
|
|
76
|
+
this.mamlConfig = fullConfig;
|
|
77
|
+
// Meta-parameters (learned Q-table initialization)
|
|
78
|
+
this.metaQTable = new Map();
|
|
79
|
+
// Initialize with base Q-table
|
|
80
|
+
this.copyQTable(this.qTable, this.metaQTable);
|
|
81
|
+
// Task organization
|
|
82
|
+
this.taskBuffer = new Map();
|
|
83
|
+
this.metaEpisodes = [];
|
|
84
|
+
this.metaStepCount = 0;
|
|
85
|
+
this.logger.info('MAMLMetaLearner initialized for fast adaptation', {
|
|
86
|
+
innerLearningRate: fullConfig.innerLearningRate,
|
|
87
|
+
innerSteps: fullConfig.innerSteps,
|
|
88
|
+
metaLearningRate: fullConfig.metaLearningRate,
|
|
89
|
+
supportSetSize: fullConfig.minTaskExamples
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Update Q-value using base algorithm (delegates to wrapped algorithm if available)
|
|
94
|
+
* For MAML, this is called during inner loop adaptation
|
|
95
|
+
*/
|
|
96
|
+
update(experience, nextAction) {
|
|
97
|
+
if (this.baseAlgorithm) {
|
|
98
|
+
this.baseAlgorithm.update(experience, nextAction);
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
// Default: Q-Learning update
|
|
102
|
+
this.qLearningUpdate(experience);
|
|
103
|
+
}
|
|
104
|
+
// Buffer experience by task type for meta-learning
|
|
105
|
+
this.bufferExperience(experience);
|
|
106
|
+
this.stepCount++;
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* Q-Learning update rule (default inner loop algorithm)
|
|
110
|
+
*/
|
|
111
|
+
qLearningUpdate(experience) {
|
|
112
|
+
const stateKey = this.encodeState(experience.state);
|
|
113
|
+
const actionKey = this.encodeAction(experience.action);
|
|
114
|
+
const nextStateKey = this.encodeState(experience.nextState);
|
|
115
|
+
// Get current Q-value
|
|
116
|
+
const currentQ = this.getQValueRaw(stateKey, actionKey);
|
|
117
|
+
// Get max Q-value for next state
|
|
118
|
+
const nextStateActions = this.qTable.get(nextStateKey);
|
|
119
|
+
const maxNextQ = nextStateActions && nextStateActions.size > 0
|
|
120
|
+
? Math.max(...Array.from(nextStateActions.values()).map(qv => qv.value))
|
|
121
|
+
: 0;
|
|
122
|
+
// Q-learning update
|
|
123
|
+
const tdTarget = experience.reward + this.config.discountFactor * maxNextQ;
|
|
124
|
+
const tdError = tdTarget - currentQ;
|
|
125
|
+
const newQ = currentQ + this.config.learningRate * tdError;
|
|
126
|
+
this.setQValue(stateKey, actionKey, newQ);
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Buffer experience by task type for meta-learning
|
|
130
|
+
*/
|
|
131
|
+
bufferExperience(experience) {
|
|
132
|
+
const taskType = experience.taskType;
|
|
133
|
+
if (!this.taskBuffer.has(taskType)) {
|
|
134
|
+
this.taskBuffer.set(taskType, []);
|
|
135
|
+
}
|
|
136
|
+
const buffer = this.taskBuffer.get(taskType);
|
|
137
|
+
buffer.push(experience);
|
|
138
|
+
// Limit buffer size per task type
|
|
139
|
+
const maxBufferSize = this.mamlConfig.maxTaskExamples * 10;
|
|
140
|
+
if (buffer.length > maxBufferSize) {
|
|
141
|
+
buffer.shift(); // Remove oldest
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* Perform meta-learning update (outer loop)
|
|
146
|
+
* Learns Q-table initialization that enables fast adaptation
|
|
147
|
+
*
|
|
148
|
+
* This should be called periodically after collecting enough task examples
|
|
149
|
+
*/
|
|
150
|
+
async performMetaUpdate() {
|
|
151
|
+
// Sample batch of tasks from buffer
|
|
152
|
+
const tasks = this.sampleMetaTasks();
|
|
153
|
+
if (tasks.length < this.mamlConfig.taskBatchSize) {
|
|
154
|
+
this.logger.debug(`Not enough tasks for meta-update (${tasks.length}/${this.mamlConfig.taskBatchSize})`);
|
|
155
|
+
return null;
|
|
156
|
+
}
|
|
157
|
+
// Meta-gradients accumulator
|
|
158
|
+
const metaGradients = new Map();
|
|
159
|
+
let totalPreAdaptLoss = 0;
|
|
160
|
+
let totalPostAdaptLoss = 0;
|
|
161
|
+
// For each task in the batch
|
|
162
|
+
for (const task of tasks) {
|
|
163
|
+
// 1. Clone meta-parameters for this task
|
|
164
|
+
const taskQTable = this.cloneQTable(this.metaQTable);
|
|
165
|
+
// 2. Inner loop: Adapt to task using support set
|
|
166
|
+
const preAdaptLoss = this.evaluateLoss(taskQTable, task.supportSet);
|
|
167
|
+
for (let step = 0; step < this.mamlConfig.innerSteps; step++) {
|
|
168
|
+
this.adaptToTask(taskQTable, task.supportSet, this.mamlConfig.innerLearningRate);
|
|
169
|
+
}
|
|
170
|
+
// 3. Evaluate adapted parameters on query set
|
|
171
|
+
const postAdaptLoss = this.evaluateLoss(taskQTable, task.querySet);
|
|
172
|
+
totalPreAdaptLoss += preAdaptLoss;
|
|
173
|
+
totalPostAdaptLoss += postAdaptLoss;
|
|
174
|
+
// 4. Compute meta-gradients
|
|
175
|
+
if (this.mamlConfig.firstOrderApproximation) {
|
|
176
|
+
// First-order MAML: ignore second derivatives
|
|
177
|
+
this.computeFirstOrderGradients(metaGradients, taskQTable, task.querySet);
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
// Full MAML: compute through inner loop
|
|
181
|
+
this.computeSecondOrderGradients(metaGradients, this.metaQTable, task);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
// 5. Update meta-parameters using accumulated gradients
|
|
185
|
+
this.updateMetaParameters(metaGradients, tasks.length);
|
|
186
|
+
// 6. Copy meta-parameters to main Q-table
|
|
187
|
+
this.copyQTable(this.metaQTable, this.qTable);
|
|
188
|
+
this.metaStepCount++;
|
|
189
|
+
// Create meta-episode record
|
|
190
|
+
const metaEpisode = {
|
|
191
|
+
episodeId: `meta-${this.metaStepCount}-${Date.now()}`,
|
|
192
|
+
tasks,
|
|
193
|
+
preAdaptationLoss: totalPreAdaptLoss / tasks.length,
|
|
194
|
+
postAdaptationLoss: totalPostAdaptLoss / tasks.length,
|
|
195
|
+
metaLoss: totalPostAdaptLoss / tasks.length,
|
|
196
|
+
improvement: ((totalPreAdaptLoss - totalPostAdaptLoss) / totalPreAdaptLoss) * 100,
|
|
197
|
+
timestamp: new Date()
|
|
198
|
+
};
|
|
199
|
+
this.metaEpisodes.push(metaEpisode);
|
|
200
|
+
this.logger.info('Meta-update completed', {
|
|
201
|
+
episode: metaEpisode.episodeId,
|
|
202
|
+
tasks: tasks.length,
|
|
203
|
+
preAdaptLoss: metaEpisode.preAdaptationLoss.toFixed(4),
|
|
204
|
+
postAdaptLoss: metaEpisode.postAdaptationLoss.toFixed(4),
|
|
205
|
+
improvement: `${metaEpisode.improvement.toFixed(2)}%`
|
|
206
|
+
});
|
|
207
|
+
return metaEpisode;
|
|
208
|
+
}
|
|
209
|
+
/**
|
|
210
|
+
* Sample batch of meta-tasks from task buffer
|
|
211
|
+
* Each task contains support set (for adaptation) and query set (for evaluation)
|
|
212
|
+
*/
|
|
213
|
+
sampleMetaTasks() {
|
|
214
|
+
const tasks = [];
|
|
215
|
+
const taskTypes = Array.from(this.taskBuffer.keys());
|
|
216
|
+
// Sample up to taskBatchSize tasks
|
|
217
|
+
const numTasks = Math.min(this.mamlConfig.taskBatchSize, taskTypes.length);
|
|
218
|
+
for (let i = 0; i < numTasks; i++) {
|
|
219
|
+
const taskType = taskTypes[i % taskTypes.length];
|
|
220
|
+
const experiences = this.taskBuffer.get(taskType) || [];
|
|
221
|
+
if (experiences.length < this.mamlConfig.minTaskExamples * 2) {
|
|
222
|
+
continue; // Not enough examples for support + query sets
|
|
223
|
+
}
|
|
224
|
+
// Shuffle experiences
|
|
225
|
+
const shuffled = [...experiences].sort(() => Math.random() - 0.5);
|
|
226
|
+
// Split into support and query sets
|
|
227
|
+
const supportSize = this.mamlConfig.minTaskExamples;
|
|
228
|
+
const supportSet = shuffled.slice(0, supportSize);
|
|
229
|
+
const querySet = shuffled.slice(supportSize, supportSize * 2);
|
|
230
|
+
tasks.push({
|
|
231
|
+
id: `task-${taskType}-${Date.now()}`,
|
|
232
|
+
taskType,
|
|
233
|
+
supportSet,
|
|
234
|
+
querySet
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
return tasks;
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Adapt Q-table to a specific task using support set (inner loop)
|
|
241
|
+
*/
|
|
242
|
+
adaptToTask(qTable, supportSet, learningRate) {
|
|
243
|
+
for (const experience of supportSet) {
|
|
244
|
+
const stateKey = this.encodeState(experience.state);
|
|
245
|
+
const actionKey = this.encodeAction(experience.action);
|
|
246
|
+
const nextStateKey = this.encodeState(experience.nextState);
|
|
247
|
+
// Get current Q-value from task-specific table
|
|
248
|
+
const currentQ = this.getQValueFromTable(qTable, stateKey, actionKey);
|
|
249
|
+
// Get max Q-value for next state
|
|
250
|
+
const nextStateActions = qTable.get(nextStateKey);
|
|
251
|
+
const maxNextQ = nextStateActions && nextStateActions.size > 0
|
|
252
|
+
? Math.max(...Array.from(nextStateActions.values()).map(qv => qv.value))
|
|
253
|
+
: 0;
|
|
254
|
+
// Q-learning update with inner learning rate
|
|
255
|
+
const tdTarget = experience.reward + this.config.discountFactor * maxNextQ;
|
|
256
|
+
const tdError = tdTarget - currentQ;
|
|
257
|
+
const newQ = currentQ + learningRate * tdError;
|
|
258
|
+
// Update task-specific Q-table
|
|
259
|
+
this.setQValueInTable(qTable, stateKey, actionKey, newQ);
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
/**
|
|
263
|
+
* Evaluate loss (TD error) on a set of experiences
|
|
264
|
+
*/
|
|
265
|
+
evaluateLoss(qTable, experiences) {
|
|
266
|
+
let totalLoss = 0;
|
|
267
|
+
for (const experience of experiences) {
|
|
268
|
+
const stateKey = this.encodeState(experience.state);
|
|
269
|
+
const actionKey = this.encodeAction(experience.action);
|
|
270
|
+
const nextStateKey = this.encodeState(experience.nextState);
|
|
271
|
+
const currentQ = this.getQValueFromTable(qTable, stateKey, actionKey);
|
|
272
|
+
const nextStateActions = qTable.get(nextStateKey);
|
|
273
|
+
const maxNextQ = nextStateActions && nextStateActions.size > 0
|
|
274
|
+
? Math.max(...Array.from(nextStateActions.values()).map(qv => qv.value))
|
|
275
|
+
: 0;
|
|
276
|
+
const tdTarget = experience.reward + this.config.discountFactor * maxNextQ;
|
|
277
|
+
const tdError = tdTarget - currentQ;
|
|
278
|
+
totalLoss += tdError * tdError; // Squared TD error
|
|
279
|
+
}
|
|
280
|
+
return experiences.length > 0 ? totalLoss / experiences.length : 0;
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Compute first-order meta-gradients (FOMAML)
|
|
284
|
+
* Faster approximation that ignores second-order derivatives
|
|
285
|
+
*/
|
|
286
|
+
computeFirstOrderGradients(metaGradients, adaptedQTable, querySet) {
|
|
287
|
+
for (const experience of querySet) {
|
|
288
|
+
const stateKey = this.encodeState(experience.state);
|
|
289
|
+
const actionKey = this.encodeAction(experience.action);
|
|
290
|
+
const nextStateKey = this.encodeState(experience.nextState);
|
|
291
|
+
const currentQ = this.getQValueFromTable(adaptedQTable, stateKey, actionKey);
|
|
292
|
+
const nextStateActions = adaptedQTable.get(nextStateKey);
|
|
293
|
+
const maxNextQ = nextStateActions && nextStateActions.size > 0
|
|
294
|
+
? Math.max(...Array.from(nextStateActions.values()).map(qv => qv.value))
|
|
295
|
+
: 0;
|
|
296
|
+
const tdTarget = experience.reward + this.config.discountFactor * maxNextQ;
|
|
297
|
+
const tdError = tdTarget - currentQ;
|
|
298
|
+
// Gradient: ∂Loss/∂Q = -2 * TD-error
|
|
299
|
+
const gradient = -2 * tdError;
|
|
300
|
+
// Accumulate gradient
|
|
301
|
+
if (!metaGradients.has(stateKey)) {
|
|
302
|
+
metaGradients.set(stateKey, new Map());
|
|
303
|
+
}
|
|
304
|
+
const stateGradients = metaGradients.get(stateKey);
|
|
305
|
+
const currentGradient = stateGradients.get(actionKey) || 0;
|
|
306
|
+
stateGradients.set(actionKey, currentGradient + gradient);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Compute second-order meta-gradients (Full MAML)
|
|
311
|
+
* More accurate but computationally expensive
|
|
312
|
+
*/
|
|
313
|
+
computeSecondOrderGradients(metaGradients, metaQTable, task) {
|
|
314
|
+
// For simplicity, use first-order approximation
|
|
315
|
+
// Full second-order computation requires computing Hessians
|
|
316
|
+
const adaptedQTable = this.cloneQTable(metaQTable);
|
|
317
|
+
for (let step = 0; step < this.mamlConfig.innerSteps; step++) {
|
|
318
|
+
this.adaptToTask(adaptedQTable, task.supportSet, this.mamlConfig.innerLearningRate);
|
|
319
|
+
}
|
|
320
|
+
this.computeFirstOrderGradients(metaGradients, adaptedQTable, task.querySet);
|
|
321
|
+
}
|
|
322
|
+
/**
|
|
323
|
+
* Update meta-parameters using accumulated gradients
|
|
324
|
+
*/
|
|
325
|
+
updateMetaParameters(metaGradients, numTasks) {
|
|
326
|
+
for (const [stateKey, stateGradients] of metaGradients.entries()) {
|
|
327
|
+
for (const [actionKey, gradient] of stateGradients.entries()) {
|
|
328
|
+
const currentQ = this.getQValueFromTable(this.metaQTable, stateKey, actionKey);
|
|
329
|
+
// Average gradient over tasks
|
|
330
|
+
const avgGradient = gradient / numTasks;
|
|
331
|
+
// Meta-gradient descent
|
|
332
|
+
const newQ = currentQ - this.mamlConfig.metaLearningRate * avgGradient;
|
|
333
|
+
this.setQValueInTable(this.metaQTable, stateKey, actionKey, newQ);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
/**
|
|
338
|
+
* Fast adaptation to new task (few-shot learning)
|
|
339
|
+
* Given 5-10 examples, quickly adapt Q-table for new testing domain
|
|
340
|
+
*
|
|
341
|
+
* @param examples Few examples of new task (5-10)
|
|
342
|
+
* @returns Adapted Q-table
|
|
343
|
+
*/
|
|
344
|
+
async fastAdapt(examples) {
|
|
345
|
+
if (examples.length < this.mamlConfig.minTaskExamples) {
|
|
346
|
+
this.logger.warn(`Few-shot adaptation requires at least ${this.mamlConfig.minTaskExamples} examples, got ${examples.length}`);
|
|
347
|
+
}
|
|
348
|
+
// Clone meta-parameters
|
|
349
|
+
const adaptedQTable = this.cloneQTable(this.metaQTable);
|
|
350
|
+
// Perform inner loop adaptation
|
|
351
|
+
for (let step = 0; step < this.mamlConfig.innerSteps; step++) {
|
|
352
|
+
this.adaptToTask(adaptedQTable, examples, this.mamlConfig.innerLearningRate);
|
|
353
|
+
}
|
|
354
|
+
this.logger.info(`Fast adaptation completed with ${examples.length} examples in ${this.mamlConfig.innerSteps} steps`);
|
|
355
|
+
return adaptedQTable;
|
|
356
|
+
}
|
|
357
|
+
/**
|
|
358
|
+
* Get Q-value from specific Q-table (helper)
|
|
359
|
+
*/
|
|
360
|
+
getQValueFromTable(qTable, stateKey, actionKey) {
|
|
361
|
+
const stateActions = qTable.get(stateKey);
|
|
362
|
+
if (!stateActions)
|
|
363
|
+
return 0;
|
|
364
|
+
const qValue = stateActions.get(actionKey);
|
|
365
|
+
return qValue?.value ?? 0;
|
|
366
|
+
}
|
|
367
|
+
/**
|
|
368
|
+
* Set Q-value in specific Q-table (helper)
|
|
369
|
+
*/
|
|
370
|
+
setQValueInTable(qTable, stateKey, actionKey, value) {
|
|
371
|
+
if (!qTable.has(stateKey)) {
|
|
372
|
+
qTable.set(stateKey, new Map());
|
|
373
|
+
}
|
|
374
|
+
const stateActions = qTable.get(stateKey);
|
|
375
|
+
const currentQValue = stateActions.get(actionKey);
|
|
376
|
+
stateActions.set(actionKey, {
|
|
377
|
+
state: stateKey,
|
|
378
|
+
action: actionKey,
|
|
379
|
+
value,
|
|
380
|
+
updateCount: (currentQValue?.updateCount ?? 0) + 1,
|
|
381
|
+
lastUpdated: Date.now()
|
|
382
|
+
});
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Get Q-value (raw, without creating entry)
|
|
386
|
+
*/
|
|
387
|
+
getQValueRaw(stateKey, actionKey) {
|
|
388
|
+
return this.getQValueFromTable(this.qTable, stateKey, actionKey);
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Clone Q-table
|
|
392
|
+
*/
|
|
393
|
+
cloneQTable(source) {
|
|
394
|
+
const cloned = new Map();
|
|
395
|
+
for (const [stateKey, stateActions] of source.entries()) {
|
|
396
|
+
const clonedActions = new Map();
|
|
397
|
+
for (const [actionKey, qValue] of stateActions.entries()) {
|
|
398
|
+
clonedActions.set(actionKey, { ...qValue });
|
|
399
|
+
}
|
|
400
|
+
cloned.set(stateKey, clonedActions);
|
|
401
|
+
}
|
|
402
|
+
return cloned;
|
|
403
|
+
}
|
|
404
|
+
/**
|
|
405
|
+
* Copy Q-table from source to destination
|
|
406
|
+
*/
|
|
407
|
+
copyQTable(source, destination) {
|
|
408
|
+
destination.clear();
|
|
409
|
+
for (const [stateKey, stateActions] of source.entries()) {
|
|
410
|
+
const copiedActions = new Map();
|
|
411
|
+
for (const [actionKey, qValue] of stateActions.entries()) {
|
|
412
|
+
copiedActions.set(actionKey, { ...qValue });
|
|
413
|
+
}
|
|
414
|
+
destination.set(stateKey, copiedActions);
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Get meta-learning statistics
|
|
419
|
+
*/
|
|
420
|
+
getMetaStatistics() {
|
|
421
|
+
const recentEpisodes = this.metaEpisodes.slice(-10);
|
|
422
|
+
return {
|
|
423
|
+
metaSteps: this.metaStepCount,
|
|
424
|
+
metaEpisodes: this.metaEpisodes.length,
|
|
425
|
+
avgPreAdaptLoss: recentEpisodes.length > 0
|
|
426
|
+
? recentEpisodes.reduce((sum, e) => sum + e.preAdaptationLoss, 0) / recentEpisodes.length
|
|
427
|
+
: 0,
|
|
428
|
+
avgPostAdaptLoss: recentEpisodes.length > 0
|
|
429
|
+
? recentEpisodes.reduce((sum, e) => sum + e.postAdaptationLoss, 0) / recentEpisodes.length
|
|
430
|
+
: 0,
|
|
431
|
+
avgImprovement: recentEpisodes.length > 0
|
|
432
|
+
? recentEpisodes.reduce((sum, e) => sum + e.improvement, 0) / recentEpisodes.length
|
|
433
|
+
: 0,
|
|
434
|
+
taskTypes: this.taskBuffer.size,
|
|
435
|
+
bufferedExperiences: Array.from(this.taskBuffer.values()).reduce((sum, arr) => sum + arr.length, 0)
|
|
436
|
+
};
|
|
437
|
+
}
|
|
438
|
+
/**
|
|
439
|
+
* Get meta-episodes history
|
|
440
|
+
*/
|
|
441
|
+
getMetaEpisodes() {
|
|
442
|
+
return [...this.metaEpisodes];
|
|
443
|
+
}
|
|
444
|
+
/**
|
|
445
|
+
* Clear task buffer
|
|
446
|
+
*/
|
|
447
|
+
clearTaskBuffer() {
|
|
448
|
+
this.taskBuffer.clear();
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Get default exploration rate
|
|
452
|
+
*/
|
|
453
|
+
getDefaultExplorationRate() {
|
|
454
|
+
return this.mamlConfig.explorationRate ?? 0.2;
|
|
455
|
+
}
|
|
456
|
+
/**
|
|
457
|
+
* Get algorithm name
|
|
458
|
+
*/
|
|
459
|
+
getAlgorithmName() {
|
|
460
|
+
return 'MAML';
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Override getStatistics to include meta-learning metrics
|
|
464
|
+
*/
|
|
465
|
+
getStatistics() {
|
|
466
|
+
return {
|
|
467
|
+
...super.getStatistics(),
|
|
468
|
+
maml: this.getMetaStatistics()
|
|
469
|
+
};
|
|
470
|
+
}
|
|
471
|
+
/**
|
|
472
|
+
* Export meta-learner state
|
|
473
|
+
*/
|
|
474
|
+
export() {
|
|
475
|
+
const baseExport = super.export();
|
|
476
|
+
// Serialize metaQTable
|
|
477
|
+
const serializedMetaQTable = {};
|
|
478
|
+
for (const [state, actions] of this.metaQTable.entries()) {
|
|
479
|
+
serializedMetaQTable[state] = {};
|
|
480
|
+
for (const [action, qValue] of actions.entries()) {
|
|
481
|
+
serializedMetaQTable[state][action] = qValue;
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
// Serialize taskBuffer
|
|
485
|
+
const serializedTaskBuffer = {};
|
|
486
|
+
for (const [taskType, experiences] of this.taskBuffer.entries()) {
|
|
487
|
+
serializedTaskBuffer[taskType] = experiences;
|
|
488
|
+
}
|
|
489
|
+
return {
|
|
490
|
+
...baseExport,
|
|
491
|
+
metaQTable: serializedMetaQTable,
|
|
492
|
+
taskBuffer: serializedTaskBuffer,
|
|
493
|
+
metaEpisodes: this.metaEpisodes,
|
|
494
|
+
metaStepCount: this.metaStepCount
|
|
495
|
+
};
|
|
496
|
+
}
|
|
497
|
+
/**
|
|
498
|
+
* Import meta-learner state
|
|
499
|
+
*/
|
|
500
|
+
import(state) {
|
|
501
|
+
super.import(state);
|
|
502
|
+
// Deserialize metaQTable
|
|
503
|
+
this.metaQTable.clear();
|
|
504
|
+
for (const [stateKey, actions] of Object.entries(state.metaQTable)) {
|
|
505
|
+
const actionMap = new Map();
|
|
506
|
+
for (const [actionKey, qValue] of Object.entries(actions)) {
|
|
507
|
+
actionMap.set(actionKey, qValue);
|
|
508
|
+
}
|
|
509
|
+
this.metaQTable.set(stateKey, actionMap);
|
|
510
|
+
}
|
|
511
|
+
// Deserialize taskBuffer
|
|
512
|
+
this.taskBuffer.clear();
|
|
513
|
+
for (const [taskType, experiences] of Object.entries(state.taskBuffer)) {
|
|
514
|
+
this.taskBuffer.set(taskType, experiences);
|
|
515
|
+
}
|
|
516
|
+
this.metaEpisodes = state.metaEpisodes;
|
|
517
|
+
this.metaStepCount = state.metaStepCount;
|
|
518
|
+
this.logger.info('Imported MAML state', {
|
|
519
|
+
metaSteps: this.metaStepCount,
|
|
520
|
+
taskTypes: this.taskBuffer.size,
|
|
521
|
+
metaTableSize: this.metaQTable.size
|
|
522
|
+
});
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
exports.MAMLMetaLearner = MAMLMetaLearner;
|
|
526
|
+
/**
|
|
527
|
+
* Create default MAML configuration
|
|
528
|
+
*/
|
|
529
|
+
function createDefaultMAMLConfig() {
|
|
530
|
+
return DEFAULT_MAML_CONFIG;
|
|
531
|
+
}
|
|
532
|
+
//# sourceMappingURL=MAMLMetaLearner.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"MAMLMetaLearner.js","sourceRoot":"","sources":["../../../src/learning/algorithms/MAMLMetaLearner.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;;;AA0rBH,0DAEC;AA1rBD,2DAA0E;AA0B1E;;GAEG;AACH,MAAM,mBAAmB,GAAwB;IAC/C,iBAAiB;IACjB,YAAY,EAAE,IAAI,EAAE,oCAAoC;IACxD,cAAc,EAAE,IAAI;IACpB,eAAe,EAAE,GAAG,EAAE,sCAAsC;IAC5D,gBAAgB,EAAE,IAAI;IACtB,kBAAkB,EAAE,IAAI;IACxB,mBAAmB,EAAE,KAAK,EAAE,8BAA8B;IAC1D,gBAAgB,EAAE,IAAI;IACtB,SAAS,EAAE,EAAE,EAAE,kBAAkB;IAEjC,gBAAgB;IAChB,iBAAiB,EAAE,GAAG,EAAE,iCAAiC;IACzD,UAAU,EAAE,CAAC,EAAE,uCAAuC;IACtD,gBAAgB,EAAE,IAAI,EAAE,4BAA4B;IACpD,eAAe,EAAE,CAAC,EAAE,8BAA8B;IAClD,eAAe,EAAE,EAAE;IACnB,aAAa,EAAE,CAAC,EAAE,kCAAkC;IACpD,uBAAuB,EAAE,IAAI,CAAC,qBAAqB;CACpD,CAAC;AA0BF;;;;;;;;;;;;;;;;;;GAkBG;AACH,MAAa,eAAgB,SAAQ,qCAAiB;IAQpD,YAAY,SAA8B,EAAE;QAC1C,MAAM,UAAU,GAAG,EAAE,GAAG,mBAAmB,EAAE,GAAG,MAAM,EAAgB,CAAC;QACvE,KAAK,CAAC,UAAU,CAAC,CAAC;QAClB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAE7B,mDAAmD;QACnD,IAAI,CAAC,UAAU,GAAG,IAAI,GAAG,EAAE,CAAC;QAE5B,+BAA+B;QAC/B,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;QAE9C,oBAAoB;QACpB,IAAI,CAAC,UAAU,GAAG,IAAI,GAAG,EAAE,CAAC;QAC5B,IAAI,CAAC,YAAY,GAAG,EAAE,CAAC;QACvB,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC;QAEvB,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,iDAAiD,EAAE;YAClE,iBAAiB,EAAE,UAAU,CAAC,iBAAiB;YAC/C,UAAU,EAAE,UAAU,CAAC,UAAU;YACjC,gBAAgB,EAAE,UAAU,CAAC,gBAAgB;YAC7C,cAAc,EAAE,UAAU,CAAC,eAAe;SAC3C,CAAC,CAAC;IACL,CAAC;IAED;;;OAGG;IACH,MAAM,CAAC,UAA0B,EAAE,UAAwB;QACzD,IAAI,IAAI,CAAC,aAAa,EAAE,CAAC;YACvB,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,UAAU,EAAE,UAAU,CAAC,CAAC;QACpD,CAAC;aAAM,CAAC;YACN,6BAA6B;YAC7B,IAAI,CAAC,eAAe,CAAC,UAAU,CAAC,CAAC;QACnC,CAAC;QAED,mDAAmD;QACnD,IAAI,CAAC,gBAAgB,CAAC,UAAU,CAAC,CAAC;QAElC,IAAI,CAAC,SAAS,EAAE,CAAC;IACnB,CAAC;IAED;;OAEG;IACK,eAAe,CAAC,UAA0B;QAChD,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;QACpD,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;QACvD,MAAM,YAAY,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;QAE5D,sBAAsB;QACtB,MAAM,QAAQ,GAAG,IAAI,CAAC,YAAY,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC;QAExD,iCAAiC;QACjC,MAAM,gBAAgB,GAAG,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC;QACvD,MAAM,QAAQ,GAAG,gBAAgB,IAAI,gBAAgB,CAAC,IAAI,GAAG,CAAC;YAC5D,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC;YACxE,CAAC,CAAC,CAAC,CAAC;QAEN,oBAAoB;QACpB,MAAM,QAAQ,GAAG,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,GAAG,QAAQ,CAAC;QAC3E,MAAM,OAAO,GAAG,QAAQ,GAAG,QAAQ,CAAC;QACpC,MAAM,IAAI,GAAG,QAAQ,GAAG,IAAI,CAAC,MAAM,CAAC,YAAY,GAAG,OAAO,CAAC;QAE3D,IAAI,CAAC,SAAS,CAAC,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAC;IAC5C,CAAC;IAED;;OAEG;IACK,gBAAgB,CAAC,UAA0B;QACjD,MAAM,QAAQ,GAAG,UAAU,CAAC,QAAQ,CAAC;QAErC,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC;YACnC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,EAAE,EAAE,CAAC,CAAC;QACpC,CAAC;QAED,MAAM,MAAM,GAAG,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAE,CAAC;QAC9C,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;QAExB,kCAAkC;QAClC,MAAM,aAAa,GAAG,IAAI,CAAC,UAAU,CAAC,eAAe,GAAG,EAAE,CAAC;QAC3D,IAAI,MAAM,CAAC,MAAM,GAAG,aAAa,EAAE,CAAC;YAClC,MAAM,CAAC,KAAK,EAAE,CAAC,CAAC,gBAAgB;QAClC,CAAC;IACH,CAAC;IAED;;;;;OAKG;IACH,KAAK,CAAC,iBAAiB;QACrB,oCAAoC;QACpC,MAAM,KAAK,GAAG,IAAI,CAAC,eAAe,EAAE,CAAC;QAErC,IAAI,KAAK,CAAC,MAAM,GAAG,IAAI,CAAC,UAAU,CAAC,aAAa,EAAE,CAAC;YACjD,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,qCAAqC,KAAK,CAAC,MAAM,IAAI,IAAI,CAAC,UAAU,CAAC,aAAa,GAAG,CAAC,CAAC;YACzG,OAAO,IAAI,CAAC;QACd,CAAC;QAED,6BAA6B;QAC7B,MAAM,aAAa,GAAG,IAAI,GAAG,EAA+B,CAAC;QAE7D,IAAI,iBAAiB,GAAG,CAAC,CAAC;QAC1B,IAAI,kBAAkB,GAAG,CAAC,CAAC;QAE3B,6BAA6B;QAC7B,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE,CAAC;YACzB,yCAAyC;YACzC,MAAM,UAAU,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;YAErD,iDAAiD;YACjD,MAAM,YAAY,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;YAEpE,KAAK,IAAI,IAAI,GAAG,CAAC,EAAE,IAAI,GAAG,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,EAAE,EAAE,CAAC;gBAC7D,IAAI,CAAC,WAAW,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,CAAC,iBAAiB,CAAC,CAAC;YACnF,CAAC;YAED,8CAA8C;YAC9C,MAAM,aAAa,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;YAEnE,iBAAiB,IAAI,YAAY,CAAC;YAClC,kBAAkB,IAAI,aAAa,CAAC;YAEpC,4BAA4B;YAC5B,IAAI,IAAI,CAAC,UAAU,CAAC,uBAAuB,EAAE,CAAC;gBAC5C,8CAA8C;gBAC9C,IAAI,CAAC,0BAA0B,CAAC,aAAa,EAAE,UAAU,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;YAC5E,CAAC;iBAAM,CAAC;gBACN,wCAAwC;gBACxC,IAAI,CAAC,2BAA2B,CAAC,aAAa,EAAE,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,CAAC;YACzE,CAAC;QACH,CAAC;QAED,wDAAwD;QACxD,IAAI,CAAC,oBAAoB,CAAC,aAAa,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC;QAEvD,0CAA0C;QAC1C,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC;QAE9C,IAAI,CAAC,aAAa,EAAE,CAAC;QAErB,6BAA6B;QAC7B,MAAM,WAAW,GAAgB;YAC/B,SAAS,EAAE,QAAQ,IAAI,CAAC,aAAa,IAAI,IAAI,CAAC,GAAG,EAAE,EAAE;YACrD,KAAK;YACL,iBAAiB,EAAE,iBAAiB,GAAG,KAAK,CAAC,MAAM;YACnD,kBAAkB,EAAE,kBAAkB,GAAG,KAAK,CAAC,MAAM;YACrD,QAAQ,EAAE,kBAAkB,GAAG,KAAK,CAAC,MAAM;YAC3C,WAAW,EAAE,CAAC,CAAC,iBAAiB,GAAG,kBAAkB,CAAC,GAAG,iBAAiB,CAAC,GAAG,GAAG;YACjF,SAAS,EAAE,IAAI,IAAI,EAAE;SACtB,CAAC;QAEF,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;QAEpC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,uBAAuB,EAAE;YACxC,OAAO,EAAE,WAAW,CAAC,SAAS;YAC9B,KAAK,EAAE,KAAK,CAAC,MAAM;YACnB,YAAY,EAAE,WAAW,CAAC,iBAAiB,CAAC,OAAO,CAAC,CAAC,CAAC;YACtD,aAAa,EAAE,WAAW,CAAC,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC;YACxD,WAAW,EAAE,GAAG,WAAW,CAAC,WAAW,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;SACtD,CAAC,CAAC;QAEH,OAAO,WAAW,CAAC;IACrB,CAAC;IAED;;;OAGG;IACK,eAAe;QACrB,MAAM,KAAK,GAAe,EAAE,CAAC;QAC7B,MAAM,SAAS,GAAG,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,IAAI,EAAE,CAAC,CAAC;QAErD,mCAAmC;QACnC,MAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,aAAa,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC;QAE3E,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,EAAE,CAAC,EAAE,EAAE,CAAC;YAClC,MAAM,QAAQ,GAAG,SAAS,CAAC,CAAC,GAAG,SAAS,CAAC,MAAM,CAAC,CAAC;YACjD,MAAM,WAAW,GAAG,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;YAExD,IAAI,WAAW,CAAC,MAAM,GAAG,IAAI,CAAC,UAAU,CAAC,eAAe,GAAG,CAAC,EAAE,CAAC;gBAC7D,SAAS,CAAC,+CAA+C;YAC3D,CAAC;YAED,sBAAsB;YACtB,MAAM,QAAQ,GAAG,CAAC,GAAG,WAAW,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,GAAG,CAAC,CAAC;YAElE,oCAAoC;YACpC,MAAM,WAAW,GAAG,IAAI,CAAC,UAAU,CAAC,eAAe,CAAC;YACpD,MAAM,UAAU,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC;YAClD,MAAM,QAAQ,GAAG,QAAQ,CAAC,KAAK,CAAC,WAAW,EAAE,WAAW,GAAG,CAAC,CAAC,CAAC;YAE9D,KAAK,CAAC,IAAI,CAAC;gBACT,EAAE,EAAE,QAAQ,QAAQ,IAAI,IAAI,CAAC,GAAG,EAAE,EAAE;gBACpC,QAAQ;gBACR,UAAU;gBACV,QAAQ;aACT,CAAC,CAAC;QACL,CAAC;QAED,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;OAEG;IACK,WAAW,CACjB,MAAwC,EACxC,UAA4B,EAC5B,YAAoB;QAEpB,KAAK,MAAM,UAAU,IAAI,UAAU,EAAE,CAAC;YACpC,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;YACpD,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;YAE5D,+CAA+C;YAC/C,MAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;YAEtE,iCAAiC;YACjC,MAAM,gBAAgB,GAAG,MAAM,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC;YAClD,MAAM,QAAQ,GAAG,gBAAgB,IAAI,gBAAgB,CAAC,IAAI,GAAG,CAAC;gBAC5D,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC;gBACxE,CAAC,CAAC,CAAC,CAAC;YAEN,6CAA6C;YAC7C,MAAM,QAAQ,GAAG,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,GAAG,QAAQ,CAAC;YAC3E,MAAM,OAAO,GAAG,QAAQ,GAAG,QAAQ,CAAC;YACpC,MAAM,IAAI,GAAG,QAAQ,GAAG,YAAY,GAAG,OAAO,CAAC;YAE/C,+BAA+B;YAC/B,IAAI,CAAC,gBAAgB,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAC;QAC3D,CAAC;IACH,CAAC;IAED;;OAEG;IACK,YAAY,CAClB,MAAwC,EACxC,WAA6B;QAE7B,IAAI,SAAS,GAAG,CAAC,CAAC;QAElB,KAAK,MAAM,UAAU,IAAI,WAAW,EAAE,CAAC;YACrC,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;YACpD,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;YAE5D,MAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;YAEtE,MAAM,gBAAgB,GAAG,MAAM,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC;YAClD,MAAM,QAAQ,GAAG,gBAAgB,IAAI,gBAAgB,CAAC,IAAI,GAAG,CAAC;gBAC5D,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC;gBACxE,CAAC,CAAC,CAAC,CAAC;YAEN,MAAM,QAAQ,GAAG,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,GAAG,QAAQ,CAAC;YAC3E,MAAM,OAAO,GAAG,QAAQ,GAAG,QAAQ,CAAC;YAEpC,SAAS,IAAI,OAAO,GAAG,OAAO,CAAC,CAAC,mBAAmB;QACrD,CAAC;QAED,OAAO,WAAW,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,SAAS,GAAG,WAAW,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;IACrE,CAAC;IAED;;;OAGG;IACK,0BAA0B,CAChC,aAA+C,EAC/C,aAA+C,EAC/C,QAA0B;QAE1B,KAAK,MAAM,UAAU,IAAI,QAAQ,EAAE,CAAC;YAClC,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;YACpD,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;YACvD,MAAM,YAAY,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;YAE5D,MAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;YAE7E,MAAM,gBAAgB,GAAG,aAAa,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC;YACzD,MAAM,QAAQ,GAAG,gBAAgB,IAAI,gBAAgB,CAAC,IAAI,GAAG,CAAC;gBAC5D,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,gBAAgB,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,KAAK,CAAC,CAAC;gBACxE,CAAC,CAAC,CAAC,CAAC;YAEN,MAAM,QAAQ,GAAG,UAAU,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,cAAc,GAAG,QAAQ,CAAC;YAC3E,MAAM,OAAO,GAAG,QAAQ,GAAG,QAAQ,CAAC;YAEpC,qCAAqC;YACrC,MAAM,QAAQ,GAAG,CAAC,CAAC,GAAG,OAAO,CAAC;YAE9B,sBAAsB;YACtB,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC;gBACjC,aAAa,CAAC,GAAG,CAAC,QAAQ,EAAE,IAAI,GAAG,EAAE,CAAC,CAAC;YACzC,CAAC;YACD,MAAM,cAAc,GAAG,aAAa,CAAC,GAAG,CAAC,QAAQ,CAAE,CAAC;YACpD,MAAM,eAAe,GAAG,cAAc,CAAC,GAAG,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC;YAC3D,cAAc,CAAC,GAAG,CAAC,SAAS,EAAE,eAAe,GAAG,QAAQ,CAAC,CAAC;QAC5D,CAAC;IACH,CAAC;IAED;;;OAGG;IACK,2BAA2B,CACjC,aAA+C,EAC/C,UAA4C,EAC5C,IAAc;QAEd,gDAAgD;QAChD,4DAA4D;QAC5D,MAAM,aAAa,GAAG,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,CAAC;QAEnD,KAAK,IAAI,IAAI,GAAG,CAAC,EAAE,IAAI,GAAG,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,EAAE,EAAE,CAAC;YAC7D,IAAI,CAAC,WAAW,CAAC,aAAa,EAAE,IAAI,CAAC,UAAU,EAAE,IAAI,CAAC,UAAU,CAAC,iBAAiB,CAAC,CAAC;QACtF,CAAC;QAED,IAAI,CAAC,0BAA0B,CAAC,aAAa,EAAE,aAAa,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;IAC/E,CAAC;IAED;;OAEG;IACK,oBAAoB,CAC1B,aAA+C,EAC/C,QAAgB;QAEhB,KAAK,MAAM,CAAC,QAAQ,EAAE,cAAc,CAAC,IAAI,aAAa,CAAC,OAAO,EAAE,EAAE,CAAC;YACjE,KAAK,MAAM,CAAC,SAAS,EAAE,QAAQ,CAAC,IAAI,cAAc,CAAC,OAAO,EAAE,EAAE,CAAC;gBAC7D,MAAM,QAAQ,GAAG,IAAI,CAAC,kBAAkB,CAAC,IAAI,CAAC,UAAU,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;gBAE/E,8BAA8B;gBAC9B,MAAM,WAAW,GAAG,QAAQ,GAAG,QAAQ,CAAC;gBAExC,wBAAwB;gBACxB,MAAM,IAAI,GAAG,QAAQ,GAAG,IAAI,CAAC,UAAU,CAAC,gBAAgB,GAAG,WAAW,CAAC;gBAEvE,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,UAAU,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAC;YACpE,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;;;OAMG;IACH,KAAK,CAAC,SAAS,CAAC,QAA0B;QACxC,IAAI,QAAQ,CAAC,MAAM,GAAG,IAAI,CAAC,UAAU,CAAC,eAAe,EAAE,CAAC;YACtD,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,yCAAyC,IAAI,CAAC,UAAU,CAAC,eAAe,kBAAkB,QAAQ,CAAC,MAAM,EAAE,CAAC,CAAC;QAChI,CAAC;QAED,wBAAwB;QACxB,MAAM,aAAa,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;QAExD,gCAAgC;QAChC,KAAK,IAAI,IAAI,GAAG,CAAC,EAAE,IAAI,GAAG,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,IAAI,EAAE,EAAE,CAAC;YAC7D,IAAI,CAAC,WAAW,CAAC,aAAa,EAAE,QAAQ,EAAE,IAAI,CAAC,UAAU,CAAC,iBAAiB,CAAC,CAAC;QAC/E,CAAC;QAED,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,kCAAkC,QAAQ,CAAC,MAAM,gBAAgB,IAAI,CAAC,UAAU,CAAC,UAAU,QAAQ,CAAC,CAAC;QAEtH,OAAO,aAAa,CAAC;IACvB,CAAC;IAED;;OAEG;IACK,kBAAkB,CACxB,MAAwC,EACxC,QAAgB,EAChB,SAAiB;QAEjB,MAAM,YAAY,GAAG,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;QAC1C,IAAI,CAAC,YAAY;YAAE,OAAO,CAAC,CAAC;QAE5B,MAAM,MAAM,GAAG,YAAY,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QAC3C,OAAO,MAAM,EAAE,KAAK,IAAI,CAAC,CAAC;IAC5B,CAAC;IAED;;OAEG;IACK,gBAAgB,CACtB,MAAwC,EACxC,QAAgB,EAChB,SAAiB,EACjB,KAAa;QAEb,IAAI,CAAC,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC,EAAE,CAAC;YAC1B,MAAM,CAAC,GAAG,CAAC,QAAQ,EAAE,IAAI,GAAG,EAAE,CAAC,CAAC;QAClC,CAAC;QACD,MAAM,YAAY,GAAG,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAE,CAAC;QAE3C,MAAM,aAAa,GAAG,YAAY,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QAClD,YAAY,CAAC,GAAG,CAAC,SAAS,EAAE;YAC1B,KAAK,EAAE,QAAQ;YACf,MAAM,EAAE,SAAS;YACjB,KAAK;YACL,WAAW,EAAE,CAAC,aAAa,EAAE,WAAW,IAAI,CAAC,CAAC,GAAG,CAAC;YAClD,WAAW,EAAE,IAAI,CAAC,GAAG,EAAE;SACxB,CAAC,CAAC;IACL,CAAC;IAED;;OAEG;IACK,YAAY,CAAC,QAAgB,EAAE,SAAiB;QACtD,OAAO,IAAI,CAAC,kBAAkB,CAAC,IAAI,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,CAAC;IACnE,CAAC;IAED;;OAEG;IACK,WAAW,CAAC,MAAwC;QAC1D,MAAM,MAAM,GAAG,IAAI,GAAG,EAA+B,CAAC;QAEtD,KAAK,MAAM,CAAC,QAAQ,EAAE,YAAY,CAAC,IAAI,MAAM,CAAC,OAAO,EAAE,EAAE,CAAC;YACxD,MAAM,aAAa,GAAG,IAAI,GAAG,EAAkB,CAAC;YAChD,KAAK,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,IAAI,YAAY,CAAC,OAAO,EAAE,EAAE,CAAC;gBACzD,aAAa,CAAC,GAAG,CAAC,SAAS,EAAE,EAAE,GAAG,MAAM,EAAE,CAAC,CAAC;YAC9C,CAAC;YACD,MAAM,CAAC,GAAG,CAAC,QAAQ,EAAE,aAAa,CAAC,CAAC;QACtC,CAAC;QAED,OAAO,MAAM,CAAC;IAChB,CAAC;IAED;;OAEG;IACK,UAAU,CAChB,MAAwC,EACxC,WAA6C;QAE7C,WAAW,CAAC,KAAK,EAAE,CAAC;QAEpB,KAAK,MAAM,CAAC,QAAQ,EAAE,YAAY,CAAC,IAAI,MAAM,CAAC,OAAO,EAAE,EAAE,CAAC;YACxD,MAAM,aAAa,GAAG,IAAI,GAAG,EAAkB,CAAC;YAChD,KAAK,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,IAAI,YAAY,CAAC,OAAO,EAAE,EAAE,CAAC;gBACzD,aAAa,CAAC,GAAG,CAAC,SAAS,EAAE,EAAE,GAAG,MAAM,EAAE,CAAC,CAAC;YAC9C,CAAC;YACD,WAAW,CAAC,GAAG,CAAC,QAAQ,EAAE,aAAa,CAAC,CAAC;QAC3C,CAAC;IACH,CAAC;IAED;;OAEG;IACH,iBAAiB;QASf,MAAM,cAAc,GAAG,IAAI,CAAC,YAAY,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEpD,OAAO;YACL,SAAS,EAAE,IAAI,CAAC,aAAa;YAC7B,YAAY,EAAE,IAAI,CAAC,YAAY,CAAC,MAAM;YACtC,eAAe,EAAE,cAAc,CAAC,MAAM,GAAG,CAAC;gBACxC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,GAAG,GAAG,CAAC,CAAC,iBAAiB,EAAE,CAAC,CAAC,GAAG,cAAc,CAAC,MAAM;gBACzF,CAAC,CAAC,CAAC;YACL,gBAAgB,EAAE,cAAc,CAAC,MAAM,GAAG,CAAC;gBACzC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,GAAG,GAAG,CAAC,CAAC,kBAAkB,EAAE,CAAC,CAAC,GAAG,cAAc,CAAC,MAAM;gBAC1F,CAAC,CAAC,CAAC;YACL,cAAc,EAAE,cAAc,CAAC,MAAM,GAAG,CAAC;gBACvC,CAAC,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,GAAG,GAAG,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,GAAG,cAAc,CAAC,MAAM;gBACnF,CAAC,CAAC,CAAC;YACL,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,IAAI;YAC/B,mBAAmB,EAAE,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,MAAM,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC;SACpG,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,eAAe;QACb,OAAO,CAAC,GAAG,IAAI,CAAC,YAAY,CAAC,CAAC;IAChC,CAAC;IAED;;OAEG;IACH,eAAe;QACb,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;IAC1B,CAAC;IAED;;OAEG;IACO,yBAAyB;QACjC,OAAO,IAAI,CAAC,UAAU,CAAC,eAAe,IAAI,GAAG,CAAC;IAChD,CAAC;IAED;;OAEG;IACH,gBAAgB;QACd,OAAO,MAAM,CAAC;IAChB,CAAC;IAED;;OAEG;IACH,aAAa;QAGX,OAAO;YACL,GAAG,KAAK,CAAC,aAAa,EAAE;YACxB,IAAI,EAAE,IAAI,CAAC,iBAAiB,EAAE;SAC/B,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,MAAM;QAMJ,MAAM,UAAU,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QAElC,uBAAuB;QACvB,MAAM,oBAAoB,GAA2C,EAAE,CAAC;QACxE,KAAK,MAAM,CAAC,KAAK,EAAE,OAAO,CAAC,IAAI,IAAI,CAAC,UAAU,CAAC,OAAO,EAAE,EAAE,CAAC;YACzD,oBAAoB,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC;YACjC,KAAK,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,IAAI,OAAO,CAAC,OAAO,EAAE,EAAE,CAAC;gBACjD,oBAAoB,CAAC,KAAK,CAAC,CAAC,MAAM,CAAC,GAAG,MAAM,CAAC;YAC/C,CAAC;QACH,CAAC;QAED,uBAAuB;QACvB,MAAM,oBAAoB,GAAqC,EAAE,CAAC;QAClE,KAAK,MAAM,CAAC,QAAQ,EAAE,WAAW,CAAC,IAAI,IAAI,CAAC,UAAU,CAAC,OAAO,EAAE,EAAE,CAAC;YAChE,oBAAoB,CAAC,QAAQ,CAAC,GAAG,WAAW,CAAC;QAC/C,CAAC;QAED,OAAO;YACL,GAAG,UAAU;YACb,UAAU,EAAE,oBAAoB;YAChC,UAAU,EAAE,oBAAoB;YAChC,YAAY,EAAE,IAAI,CAAC,YAAY;YAC/B,aAAa,EAAE,IAAI,CAAC,aAAa;SAClC,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,MAAM,CAAC,KAA4C;QACjD,KAAK,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAEpB,yBAAyB;QACzB,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACxB,KAAK,MAAM,CAAC,QAAQ,EAAE,OAAO,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC;YACnE,MAAM,SAAS,GAAG,IAAI,GAAG,EAAkB,CAAC;YAC5C,KAAK,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;gBAC1D,SAAS,CAAC,GAAG,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;YACnC,CAAC;YACD,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC;QAC3C,CAAC;QAED,yBAAyB;QACzB,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACxB,KAAK,MAAM,CAAC,QAAQ,EAAE,WAAW,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC;YACvE,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;QAC7C,CAAC;QAED,IAAI,CAAC,YAAY,GAAG,KAAK,CAAC,YAAY,CAAC;QACvC,IAAI,CAAC,aAAa,GAAG,KAAK,CAAC,aAAa,CAAC;QAEzC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,qBAAqB,EAAE;YACtC,SAAS,EAAE,IAAI,CAAC,aAAa;YAC7B,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,IAAI;YAC/B,aAAa,EAAE,IAAI,CAAC,UAAU,CAAC,IAAI;SACpC,CAAC,CAAC;IACL,CAAC;CACF;AAtlBD,0CAslBC;AAED;;GAEG;AACH,SAAgB,uBAAuB;IACrC,OAAO,mBAAiC,CAAC;AAC3C,CAAC"}
|
|
@@ -7,21 +7,24 @@
|
|
|
7
7
|
* - SARSALearner: On-policy TD(0) algorithm
|
|
8
8
|
* - ActorCriticLearner: Advantage Actor-Critic (A2C) algorithm
|
|
9
9
|
* - PPOLearner: Proximal Policy Optimization (PPO-Clip) algorithm
|
|
10
|
+
* - MAMLMetaLearner: Model-Agnostic Meta-Learning for fast adaptation
|
|
10
11
|
*/
|
|
11
12
|
import { AbstractRLLearner, RLConfig, QValue } from './AbstractRLLearner';
|
|
12
13
|
import { QLearning, QLearningConfig } from '../QLearning';
|
|
13
14
|
import { SARSALearner, SARSAConfig } from './SARSALearner';
|
|
14
15
|
import { ActorCriticLearner, ActorCriticConfig, createDefaultActorCriticConfig } from './ActorCriticLearner';
|
|
15
16
|
import { PPOLearner, PPOConfig, createDefaultPPOConfig } from './PPOLearner';
|
|
17
|
+
import { MAMLMetaLearner, MAMLConfig, createDefaultMAMLConfig } from './MAMLMetaLearner';
|
|
16
18
|
export { AbstractRLLearner, RLConfig, QValue };
|
|
17
19
|
export { QLearning, QLearningConfig };
|
|
18
20
|
export { SARSALearner, SARSAConfig };
|
|
19
21
|
export { ActorCriticLearner, ActorCriticConfig, createDefaultActorCriticConfig };
|
|
20
22
|
export { PPOLearner, PPOConfig, createDefaultPPOConfig };
|
|
23
|
+
export { MAMLMetaLearner, MAMLConfig, createDefaultMAMLConfig };
|
|
21
24
|
/**
|
|
22
25
|
* Supported RL algorithm types
|
|
23
26
|
*/
|
|
24
|
-
export type RLAlgorithmType = 'q-learning' | 'sarsa' | 'actor-critic' | 'ppo';
|
|
27
|
+
export type RLAlgorithmType = 'q-learning' | 'sarsa' | 'actor-critic' | 'ppo' | 'maml' | 'legacy';
|
|
25
28
|
/**
|
|
26
29
|
* Factory function to create RL algorithm instances
|
|
27
30
|
*/
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/learning/algorithms/index.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/learning/algorithms/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAEH,OAAO,EAAE,iBAAiB,EAAE,QAAQ,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAC1E,OAAO,EAAE,SAAS,EAAE,eAAe,EAAE,MAAM,cAAc,CAAC;AAC1D,OAAO,EAAE,YAAY,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAC3D,OAAO,EAAE,kBAAkB,EAAE,iBAAiB,EAAE,8BAA8B,EAAE,MAAM,sBAAsB,CAAC;AAC7G,OAAO,EAAE,UAAU,EAAE,SAAS,EAAE,sBAAsB,EAAE,MAAM,cAAc,CAAC;AAC7E,OAAO,EAAE,eAAe,EAAE,UAAU,EAAE,uBAAuB,EAAE,MAAM,mBAAmB,CAAC;AAEzF,OAAO,EAAE,iBAAiB,EAAE,QAAQ,EAAE,MAAM,EAAE,CAAC;AAC/C,OAAO,EAAE,SAAS,EAAE,eAAe,EAAE,CAAC;AACtC,OAAO,EAAE,YAAY,EAAE,WAAW,EAAE,CAAC;AACrC,OAAO,EAAE,kBAAkB,EAAE,iBAAiB,EAAE,8BAA8B,EAAE,CAAC;AACjF,OAAO,EAAE,UAAU,EAAE,SAAS,EAAE,sBAAsB,EAAE,CAAC;AACzD,OAAO,EAAE,eAAe,EAAE,UAAU,EAAE,uBAAuB,EAAE,CAAC;AAEhE;;GAEG;AACH,MAAM,MAAM,eAAe,GAAG,YAAY,GAAG,OAAO,GAAG,cAAc,GAAG,KAAK,GAAG,MAAM,GAAG,QAAQ,CAAC;AAElG;;GAEG;AACH,wBAAgB,iBAAiB,CAC/B,IAAI,EAAE,eAAe,EACrB,MAAM,CAAC,EAAE,GAAG,GACX,iBAAiB,CAenB"}
|
|
@@ -8,9 +8,10 @@
|
|
|
8
8
|
* - SARSALearner: On-policy TD(0) algorithm
|
|
9
9
|
* - ActorCriticLearner: Advantage Actor-Critic (A2C) algorithm
|
|
10
10
|
* - PPOLearner: Proximal Policy Optimization (PPO-Clip) algorithm
|
|
11
|
+
* - MAMLMetaLearner: Model-Agnostic Meta-Learning for fast adaptation
|
|
11
12
|
*/
|
|
12
13
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
13
|
-
exports.createDefaultPPOConfig = exports.PPOLearner = exports.createDefaultActorCriticConfig = exports.ActorCriticLearner = exports.SARSALearner = exports.QLearning = exports.AbstractRLLearner = void 0;
|
|
14
|
+
exports.createDefaultMAMLConfig = exports.MAMLMetaLearner = exports.createDefaultPPOConfig = exports.PPOLearner = exports.createDefaultActorCriticConfig = exports.ActorCriticLearner = exports.SARSALearner = exports.QLearning = exports.AbstractRLLearner = void 0;
|
|
14
15
|
exports.createRLAlgorithm = createRLAlgorithm;
|
|
15
16
|
const AbstractRLLearner_1 = require("./AbstractRLLearner");
|
|
16
17
|
Object.defineProperty(exports, "AbstractRLLearner", { enumerable: true, get: function () { return AbstractRLLearner_1.AbstractRLLearner; } });
|
|
@@ -24,6 +25,9 @@ Object.defineProperty(exports, "createDefaultActorCriticConfig", { enumerable: t
|
|
|
24
25
|
const PPOLearner_1 = require("./PPOLearner");
|
|
25
26
|
Object.defineProperty(exports, "PPOLearner", { enumerable: true, get: function () { return PPOLearner_1.PPOLearner; } });
|
|
26
27
|
Object.defineProperty(exports, "createDefaultPPOConfig", { enumerable: true, get: function () { return PPOLearner_1.createDefaultPPOConfig; } });
|
|
28
|
+
const MAMLMetaLearner_1 = require("./MAMLMetaLearner");
|
|
29
|
+
Object.defineProperty(exports, "MAMLMetaLearner", { enumerable: true, get: function () { return MAMLMetaLearner_1.MAMLMetaLearner; } });
|
|
30
|
+
Object.defineProperty(exports, "createDefaultMAMLConfig", { enumerable: true, get: function () { return MAMLMetaLearner_1.createDefaultMAMLConfig; } });
|
|
27
31
|
/**
|
|
28
32
|
* Factory function to create RL algorithm instances
|
|
29
33
|
*/
|
|
@@ -37,6 +41,8 @@ function createRLAlgorithm(type, config) {
|
|
|
37
41
|
return new ActorCriticLearner_1.ActorCriticLearner(config ?? (0, ActorCriticLearner_1.createDefaultActorCriticConfig)());
|
|
38
42
|
case 'ppo':
|
|
39
43
|
return new PPOLearner_1.PPOLearner(config ?? (0, PPOLearner_1.createDefaultPPOConfig)());
|
|
44
|
+
case 'maml':
|
|
45
|
+
return new MAMLMetaLearner_1.MAMLMetaLearner(config ?? (0, MAMLMetaLearner_1.createDefaultMAMLConfig)());
|
|
40
46
|
default:
|
|
41
47
|
throw new Error(`Unknown RL algorithm type: ${type}`);
|
|
42
48
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/learning/algorithms/index.ts"],"names":[],"mappings":";AAAA
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/learning/algorithms/index.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;GAUG;;;AAwBH,8CAkBC;AAxCD,2DAA0E;AAOjE,kGAPA,qCAAiB,OAOA;AAN1B,4CAA0D;AAOjD,0FAPA,qBAAS,OAOA;AANlB,iDAA2D;AAOlD,6FAPA,2BAAY,OAOA;AANrB,6DAA6G;AAOpG,mGAPA,uCAAkB,OAOA;AAAqB,+GAPA,mDAA8B,OAOA;AAN9E,6CAA6E;AAOpE,2FAPA,uBAAU,OAOA;AAAa,uGAPA,mCAAsB,OAOA;AANtD,uDAAyF;AAOhF,gGAPA,iCAAe,OAOA;AAAc,wGAPA,yCAAuB,OAOA;AAO7D;;GAEG;AACH,SAAgB,iBAAiB,CAC/B,IAAqB,EACrB,MAAY;IAEZ,QAAQ,IAAI,EAAE,CAAC;QACb,KAAK,YAAY;YACf,OAAO,IAAI,qBAAS,CAAC,MAAM,CAAC,CAAC;QAC/B,KAAK,OAAO;YACV,OAAO,IAAI,2BAAY,CAAC,MAAM,CAAC,CAAC;QAClC,KAAK,cAAc;YACjB,OAAO,IAAI,uCAAkB,CAAC,MAAM,IAAI,IAAA,mDAA8B,GAAE,CAAC,CAAC;QAC5E,KAAK,KAAK;YACR,OAAO,IAAI,uBAAU,CAAC,MAAM,IAAI,IAAA,mCAAsB,GAAE,CAAC,CAAC;QAC5D,KAAK,MAAM;YACT,OAAO,IAAI,iCAAe,CAAC,MAAM,IAAI,IAAA,yCAAuB,GAAE,CAAC,CAAC;QAClE;YACE,MAAM,IAAI,KAAK,CAAC,8BAA8B,IAAI,EAAE,CAAC,CAAC;IAC1D,CAAC;AACH,CAAC"}
|
package/dist/learning/index.d.ts
CHANGED
|
@@ -20,4 +20,12 @@ export * from './SwarmIntegration';
|
|
|
20
20
|
export { FixRecommendationEngine } from './FixRecommendationEngine';
|
|
21
21
|
export { AbstractRLLearner, RLConfig, QValue, SARSALearner, SARSAConfig, ActorCriticLearner, ActorCriticConfig, createDefaultActorCriticConfig, PPOLearner, PPOConfig, createDefaultPPOConfig, createRLAlgorithm } from './algorithms';
|
|
22
22
|
export { ExperienceSharingProtocol, ExperienceSharingConfig, SharedExperience, SharingStats, PeerConnection, SharingEvent } from './ExperienceSharingProtocol';
|
|
23
|
+
export { ExplainableLearning, ActionExplanation, ActionAlternative, ContributingExperience, DecisionFactor, StructuredExplanation, NaturalLanguageExplanation, DecisionType } from './ExplainableLearning';
|
|
24
|
+
export { GossipPatternSharingProtocol, SharedPattern, PatternSharingConfig, PatternSharingStats, AntiEntropyResult, PatternSharingEvent } from './GossipPatternSharingProtocol';
|
|
25
|
+
export { TransferLearningManager, TransferConfig, TransferMetrics, TransferMapping, FineTuningResult } from './TransferLearningManager';
|
|
26
|
+
export type { QEDomain, DomainFeatures } from './TransferLearningManager';
|
|
27
|
+
export { PerformanceOptimizer } from './PerformanceOptimizer';
|
|
28
|
+
export type { PerformanceOptimizerConfig } from './PerformanceOptimizer';
|
|
29
|
+
export { PrivacyManager, PrivacyLevel } from './PrivacyManager';
|
|
30
|
+
export type { PrivacyConfig, SanitizedExperience, RetentionPolicyResult } from './PrivacyManager';
|
|
23
31
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/learning/index.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAEH,cAAc,SAAS,CAAC;AACxB,cAAc,kBAAkB,CAAC;AACjC,cAAc,aAAa,CAAC;AAC5B,cAAc,0BAA0B,CAAC;AACzC,cAAc,sBAAsB,CAAC;AACrC,cAAc,mBAAmB,CAAC;AAClC,cAAc,qBAAqB,CAAC;AACpC,cAAc,qBAAqB,CAAC;AACpC,cAAc,wBAAwB,CAAC;AACvC,cAAc,2BAA2B,CAAC;AAC1C,cAAc,uBAAuB,CAAC;AACtC,cAAc,oBAAoB,CAAC;AAGnC,OAAO,EACL,uBAAuB,EACxB,MAAM,2BAA2B,CAAC;AAInC,OAAO,EACL,iBAAiB,EACjB,QAAQ,EACR,MAAM,EACN,YAAY,EACZ,WAAW,EACX,kBAAkB,EAClB,iBAAiB,EACjB,8BAA8B,EAC9B,UAAU,EACV,SAAS,EACT,sBAAsB,EACtB,iBAAiB,EAClB,MAAM,cAAc,CAAC;AAGtB,OAAO,EACL,yBAAyB,EACzB,uBAAuB,EACvB,gBAAgB,EAChB,YAAY,EACZ,cAAc,EACd,YAAY,EACb,MAAM,6BAA6B,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/learning/index.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAEH,cAAc,SAAS,CAAC;AACxB,cAAc,kBAAkB,CAAC;AACjC,cAAc,aAAa,CAAC;AAC5B,cAAc,0BAA0B,CAAC;AACzC,cAAc,sBAAsB,CAAC;AACrC,cAAc,mBAAmB,CAAC;AAClC,cAAc,qBAAqB,CAAC;AACpC,cAAc,qBAAqB,CAAC;AACpC,cAAc,wBAAwB,CAAC;AACvC,cAAc,2BAA2B,CAAC;AAC1C,cAAc,uBAAuB,CAAC;AACtC,cAAc,oBAAoB,CAAC;AAGnC,OAAO,EACL,uBAAuB,EACxB,MAAM,2BAA2B,CAAC;AAInC,OAAO,EACL,iBAAiB,EACjB,QAAQ,EACR,MAAM,EACN,YAAY,EACZ,WAAW,EACX,kBAAkB,EAClB,iBAAiB,EACjB,8BAA8B,EAC9B,UAAU,EACV,SAAS,EACT,sBAAsB,EACtB,iBAAiB,EAClB,MAAM,cAAc,CAAC;AAGtB,OAAO,EACL,yBAAyB,EACzB,uBAAuB,EACvB,gBAAgB,EAChB,YAAY,EACZ,cAAc,EACd,YAAY,EACb,MAAM,6BAA6B,CAAC;AAGrC,OAAO,EACL,mBAAmB,EACnB,iBAAiB,EACjB,iBAAiB,EACjB,sBAAsB,EACtB,cAAc,EACd,qBAAqB,EACrB,0BAA0B,EAC1B,YAAY,EACb,MAAM,uBAAuB,CAAC;AAG/B,OAAO,EACL,4BAA4B,EAC5B,aAAa,EACb,oBAAoB,EACpB,mBAAmB,EACnB,iBAAiB,EACjB,mBAAmB,EACpB,MAAM,gCAAgC,CAAC;AAGxC,OAAO,EACL,uBAAuB,EACvB,cAAc,EACd,eAAe,EACf,eAAe,EACf,gBAAgB,EACjB,MAAM,2BAA2B,CAAC;AACnC,YAAY,EAAE,QAAQ,EAAE,cAAc,EAAE,MAAM,2BAA2B,CAAC;AAG1E,OAAO,EACL,oBAAoB,EACrB,MAAM,wBAAwB,CAAC;AAChC,YAAY,EAAE,0BAA0B,EAAE,MAAM,wBAAwB,CAAC;AAGzE,OAAO,EACL,cAAc,EACd,YAAY,EACb,MAAM,kBAAkB,CAAC;AAC1B,YAAY,EACV,aAAa,EACb,mBAAmB,EACnB,qBAAqB,EACtB,MAAM,kBAAkB,CAAC"}
|