agentic-qe 2.1.2 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/agentic-quality-engineering/SKILL.md +4 -4
- package/.claude/skills/cicd-pipeline-qe-orchestrator/README.md +14 -11
- package/.claude/skills/skills-manifest.json +2 -2
- package/CHANGELOG.md +138 -0
- package/README.md +92 -214
- package/dist/agents/BaseAgent.d.ts +5 -1
- package/dist/agents/BaseAgent.d.ts.map +1 -1
- package/dist/agents/BaseAgent.js +32 -17
- package/dist/agents/BaseAgent.js.map +1 -1
- package/dist/agents/index.d.ts.map +1 -1
- package/dist/agents/index.js +5 -1
- package/dist/agents/index.js.map +1 -1
- package/dist/cli/commands/improve/index.d.ts +8 -1
- package/dist/cli/commands/improve/index.d.ts.map +1 -1
- package/dist/cli/commands/improve/index.js +18 -16
- package/dist/cli/commands/improve/index.js.map +1 -1
- package/dist/cli/commands/learn/index.d.ts +10 -2
- package/dist/cli/commands/learn/index.d.ts.map +1 -1
- package/dist/cli/commands/learn/index.js +99 -63
- package/dist/cli/commands/learn/index.js.map +1 -1
- package/dist/cli/commands/patterns/index.d.ts +8 -1
- package/dist/cli/commands/patterns/index.d.ts.map +1 -1
- package/dist/cli/commands/patterns/index.js +79 -45
- package/dist/cli/commands/patterns/index.js.map +1 -1
- package/dist/cli/commands/routing/index.d.ts +5 -0
- package/dist/cli/commands/routing/index.d.ts.map +1 -1
- package/dist/cli/commands/routing/index.js +11 -10
- package/dist/cli/commands/routing/index.js.map +1 -1
- package/dist/cli/init/agents.d.ts +1 -1
- package/dist/cli/init/agents.js +2 -2
- package/dist/cli/init/database-init.d.ts +7 -0
- package/dist/cli/init/database-init.d.ts.map +1 -1
- package/dist/cli/init/database-init.js +29 -48
- package/dist/cli/init/database-init.js.map +1 -1
- package/dist/core/di/AgentDependencies.d.ts +127 -0
- package/dist/core/di/AgentDependencies.d.ts.map +1 -0
- package/dist/core/di/AgentDependencies.js +251 -0
- package/dist/core/di/AgentDependencies.js.map +1 -0
- package/dist/core/di/DIContainer.d.ts +149 -0
- package/dist/core/di/DIContainer.d.ts.map +1 -0
- package/dist/core/di/DIContainer.js +333 -0
- package/dist/core/di/DIContainer.js.map +1 -0
- package/dist/core/di/index.d.ts +11 -0
- package/dist/core/di/index.d.ts.map +1 -0
- package/dist/core/di/index.js +22 -0
- package/dist/core/di/index.js.map +1 -0
- package/dist/core/index.d.ts +1 -0
- package/dist/core/index.d.ts.map +1 -1
- package/dist/core/index.js +11 -1
- package/dist/core/index.js.map +1 -1
- package/dist/core/memory/HNSWVectorMemory.d.ts +261 -0
- package/dist/core/memory/HNSWVectorMemory.d.ts.map +1 -0
- package/dist/core/memory/HNSWVectorMemory.js +647 -0
- package/dist/core/memory/HNSWVectorMemory.js.map +1 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts +7 -0
- package/dist/core/memory/SwarmMemoryManager.d.ts.map +1 -1
- package/dist/core/memory/SwarmMemoryManager.js +9 -0
- package/dist/core/memory/SwarmMemoryManager.js.map +1 -1
- package/dist/core/memory/index.d.ts +2 -0
- package/dist/core/memory/index.d.ts.map +1 -1
- package/dist/core/memory/index.js +11 -1
- package/dist/core/memory/index.js.map +1 -1
- package/dist/learning/ExperienceSharingProtocol.d.ts +243 -0
- package/dist/learning/ExperienceSharingProtocol.d.ts.map +1 -0
- package/dist/learning/ExperienceSharingProtocol.js +538 -0
- package/dist/learning/ExperienceSharingProtocol.js.map +1 -0
- package/dist/learning/ExplainableLearning.d.ts +191 -0
- package/dist/learning/ExplainableLearning.d.ts.map +1 -0
- package/dist/learning/ExplainableLearning.js +441 -0
- package/dist/learning/ExplainableLearning.js.map +1 -0
- package/dist/learning/GossipPatternSharingProtocol.d.ts +228 -0
- package/dist/learning/GossipPatternSharingProtocol.d.ts.map +1 -0
- package/dist/learning/GossipPatternSharingProtocol.js +590 -0
- package/dist/learning/GossipPatternSharingProtocol.js.map +1 -0
- package/dist/learning/LearningEngine.d.ts +104 -4
- package/dist/learning/LearningEngine.d.ts.map +1 -1
- package/dist/learning/LearningEngine.js +350 -16
- package/dist/learning/LearningEngine.js.map +1 -1
- package/dist/learning/PerformanceOptimizer.d.ts +268 -0
- package/dist/learning/PerformanceOptimizer.d.ts.map +1 -0
- package/dist/learning/PerformanceOptimizer.js +552 -0
- package/dist/learning/PerformanceOptimizer.js.map +1 -0
- package/dist/learning/PrivacyManager.d.ts +197 -0
- package/dist/learning/PrivacyManager.d.ts.map +1 -0
- package/dist/learning/PrivacyManager.js +551 -0
- package/dist/learning/PrivacyManager.js.map +1 -0
- package/dist/learning/QLearning.d.ts +38 -125
- package/dist/learning/QLearning.d.ts.map +1 -1
- package/dist/learning/QLearning.js +46 -267
- package/dist/learning/QLearning.js.map +1 -1
- package/dist/learning/QLearningLegacy.d.ts +154 -0
- package/dist/learning/QLearningLegacy.d.ts.map +1 -0
- package/dist/learning/QLearningLegacy.js +337 -0
- package/dist/learning/QLearningLegacy.js.map +1 -0
- package/dist/learning/TransferLearningManager.d.ts +212 -0
- package/dist/learning/TransferLearningManager.d.ts.map +1 -0
- package/dist/learning/TransferLearningManager.js +497 -0
- package/dist/learning/TransferLearningManager.js.map +1 -0
- package/dist/learning/algorithms/AbstractRLLearner.d.ts +162 -0
- package/dist/learning/algorithms/AbstractRLLearner.d.ts.map +1 -0
- package/dist/learning/algorithms/AbstractRLLearner.js +300 -0
- package/dist/learning/algorithms/AbstractRLLearner.js.map +1 -0
- package/dist/learning/algorithms/ActorCriticLearner.d.ts +201 -0
- package/dist/learning/algorithms/ActorCriticLearner.d.ts.map +1 -0
- package/dist/learning/algorithms/ActorCriticLearner.js +447 -0
- package/dist/learning/algorithms/ActorCriticLearner.js.map +1 -0
- package/dist/learning/algorithms/MAMLMetaLearner.d.ts +218 -0
- package/dist/learning/algorithms/MAMLMetaLearner.d.ts.map +1 -0
- package/dist/learning/algorithms/MAMLMetaLearner.js +532 -0
- package/dist/learning/algorithms/MAMLMetaLearner.js.map +1 -0
- package/dist/learning/algorithms/PPOLearner.d.ts +207 -0
- package/dist/learning/algorithms/PPOLearner.d.ts.map +1 -0
- package/dist/learning/algorithms/PPOLearner.js +490 -0
- package/dist/learning/algorithms/PPOLearner.js.map +1 -0
- package/dist/learning/algorithms/QLearning.d.ts +68 -0
- package/dist/learning/algorithms/QLearning.d.ts.map +1 -0
- package/dist/learning/algorithms/QLearning.js +116 -0
- package/dist/learning/algorithms/QLearning.js.map +1 -0
- package/dist/learning/algorithms/SARSALearner.d.ts +107 -0
- package/dist/learning/algorithms/SARSALearner.d.ts.map +1 -0
- package/dist/learning/algorithms/SARSALearner.js +252 -0
- package/dist/learning/algorithms/SARSALearner.js.map +1 -0
- package/dist/learning/algorithms/index.d.ts +32 -0
- package/dist/learning/algorithms/index.d.ts.map +1 -0
- package/dist/learning/algorithms/index.js +50 -0
- package/dist/learning/algorithms/index.js.map +1 -0
- package/dist/learning/index.d.ts +11 -0
- package/dist/learning/index.d.ts.map +1 -1
- package/dist/learning/index.js +31 -1
- package/dist/learning/index.js.map +1 -1
- package/dist/learning/types.d.ts +2 -0
- package/dist/learning/types.d.ts.map +1 -1
- package/dist/mcp/server-instructions.d.ts +1 -1
- package/dist/mcp/server-instructions.js +1 -1
- package/dist/memory/DistributedPatternLibrary.d.ts +159 -0
- package/dist/memory/DistributedPatternLibrary.d.ts.map +1 -0
- package/dist/memory/DistributedPatternLibrary.js +370 -0
- package/dist/memory/DistributedPatternLibrary.js.map +1 -0
- package/dist/memory/PatternQualityScorer.d.ts +169 -0
- package/dist/memory/PatternQualityScorer.d.ts.map +1 -0
- package/dist/memory/PatternQualityScorer.js +327 -0
- package/dist/memory/PatternQualityScorer.js.map +1 -0
- package/dist/memory/PatternReplicationService.d.ts +187 -0
- package/dist/memory/PatternReplicationService.d.ts.map +1 -0
- package/dist/memory/PatternReplicationService.js +392 -0
- package/dist/memory/PatternReplicationService.js.map +1 -0
- package/dist/providers/ClaudeProvider.d.ts +98 -0
- package/dist/providers/ClaudeProvider.d.ts.map +1 -0
- package/dist/providers/ClaudeProvider.js +418 -0
- package/dist/providers/ClaudeProvider.js.map +1 -0
- package/dist/providers/HybridRouter.d.ts +217 -0
- package/dist/providers/HybridRouter.d.ts.map +1 -0
- package/dist/providers/HybridRouter.js +679 -0
- package/dist/providers/HybridRouter.js.map +1 -0
- package/dist/providers/ILLMProvider.d.ts +287 -0
- package/dist/providers/ILLMProvider.d.ts.map +1 -0
- package/dist/providers/ILLMProvider.js +33 -0
- package/dist/providers/ILLMProvider.js.map +1 -0
- package/dist/providers/LLMProviderFactory.d.ts +154 -0
- package/dist/providers/LLMProviderFactory.d.ts.map +1 -0
- package/dist/providers/LLMProviderFactory.js +426 -0
- package/dist/providers/LLMProviderFactory.js.map +1 -0
- package/dist/providers/RuvllmProvider.d.ts +107 -0
- package/dist/providers/RuvllmProvider.d.ts.map +1 -0
- package/dist/providers/RuvllmProvider.js +417 -0
- package/dist/providers/RuvllmProvider.js.map +1 -0
- package/dist/providers/index.d.ts +32 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +75 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/telemetry/LearningTelemetry.d.ts +190 -0
- package/dist/telemetry/LearningTelemetry.d.ts.map +1 -0
- package/dist/telemetry/LearningTelemetry.js +403 -0
- package/dist/telemetry/LearningTelemetry.js.map +1 -0
- package/dist/telemetry/index.d.ts +1 -0
- package/dist/telemetry/index.d.ts.map +1 -1
- package/dist/telemetry/index.js +20 -2
- package/dist/telemetry/index.js.map +1 -1
- package/dist/telemetry/instrumentation/agent.d.ts +1 -1
- package/dist/telemetry/instrumentation/agent.js +1 -1
- package/dist/telemetry/instrumentation/index.d.ts +1 -1
- package/dist/telemetry/instrumentation/index.js +1 -1
- package/dist/utils/math.d.ts +11 -0
- package/dist/utils/math.d.ts.map +1 -0
- package/dist/utils/math.js +16 -0
- package/dist/utils/math.js.map +1 -0
- package/docs/reference/agents.md +1 -1
- package/docs/reference/skills.md +3 -3
- package/docs/reference/usage.md +4 -4
- package/package.json +1 -1
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ExplainableLearning - Explainable AI for Reinforcement Learning Decisions
|
|
3
|
+
*
|
|
4
|
+
* Provides human-readable explanations for RL agent decisions, tracking:
|
|
5
|
+
* - Action selection rationale (Q-values, exploration vs exploitation)
|
|
6
|
+
* - Confidence scores based on experience history
|
|
7
|
+
* - Contributing experiences that influenced decisions
|
|
8
|
+
* - Decision factors and alternative actions
|
|
9
|
+
*
|
|
10
|
+
* Supports transparency and trust in agent decision-making for issue #118
|
|
11
|
+
*/
|
|
12
|
+
import { TaskState, AgentAction, TaskExperience } from './types';
|
|
13
|
+
/**
|
|
14
|
+
* Decision type: exploration or exploitation
|
|
15
|
+
*/
|
|
16
|
+
export type DecisionType = 'exploration' | 'exploitation';
|
|
17
|
+
/**
|
|
18
|
+
* Explanation for action selection
|
|
19
|
+
*/
|
|
20
|
+
export interface ActionExplanation {
|
|
21
|
+
/** The selected action */
|
|
22
|
+
selectedAction: AgentAction;
|
|
23
|
+
/** Decision type (exploration or exploitation) */
|
|
24
|
+
decisionType: DecisionType;
|
|
25
|
+
/** Q-value of the selected action */
|
|
26
|
+
qValue: number;
|
|
27
|
+
/** Confidence score (0-1) based on experience */
|
|
28
|
+
confidence: number;
|
|
29
|
+
/** Human-readable reasoning */
|
|
30
|
+
reasoning: string;
|
|
31
|
+
/** Alternative actions considered */
|
|
32
|
+
alternatives: ActionAlternative[];
|
|
33
|
+
/** Contributing experiences that led to this decision */
|
|
34
|
+
contributingExperiences: ContributingExperience[];
|
|
35
|
+
/** Decision factors that influenced selection */
|
|
36
|
+
decisionFactors: DecisionFactor[];
|
|
37
|
+
/** Timestamp of decision */
|
|
38
|
+
timestamp: Date;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Alternative action that was considered
|
|
42
|
+
*/
|
|
43
|
+
export interface ActionAlternative {
|
|
44
|
+
/** The alternative action */
|
|
45
|
+
action: AgentAction;
|
|
46
|
+
/** Q-value of this alternative */
|
|
47
|
+
qValue: number;
|
|
48
|
+
/** Confidence score for this alternative */
|
|
49
|
+
confidence: number;
|
|
50
|
+
/** Why it wasn't selected */
|
|
51
|
+
reason: string;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Experience that contributed to the decision
|
|
55
|
+
*/
|
|
56
|
+
export interface ContributingExperience {
|
|
57
|
+
/** Experience ID */
|
|
58
|
+
experienceId: string;
|
|
59
|
+
/** Task type */
|
|
60
|
+
taskType: string;
|
|
61
|
+
/** Reward received */
|
|
62
|
+
reward: number;
|
|
63
|
+
/** How similar this experience is to current state (0-1) */
|
|
64
|
+
similarity: number;
|
|
65
|
+
/** Timestamp of experience */
|
|
66
|
+
timestamp: Date;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Factor that influenced the decision
|
|
70
|
+
*/
|
|
71
|
+
export interface DecisionFactor {
|
|
72
|
+
/** Factor name */
|
|
73
|
+
name: string;
|
|
74
|
+
/** Factor value */
|
|
75
|
+
value: number | string;
|
|
76
|
+
/** Impact on decision (0-1) */
|
|
77
|
+
impact: number;
|
|
78
|
+
/** Description of this factor */
|
|
79
|
+
description: string;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Structured explanation format for programmatic use
|
|
83
|
+
*/
|
|
84
|
+
export interface StructuredExplanation {
|
|
85
|
+
/** Action explanation */
|
|
86
|
+
explanation: ActionExplanation;
|
|
87
|
+
/** State representation */
|
|
88
|
+
state: TaskState;
|
|
89
|
+
/** All available actions */
|
|
90
|
+
availableActions: AgentAction[];
|
|
91
|
+
/** Exploration rate at time of decision */
|
|
92
|
+
explorationRate: number;
|
|
93
|
+
}
|
|
94
|
+
/**
|
|
95
|
+
* Natural language explanation format
|
|
96
|
+
*/
|
|
97
|
+
export interface NaturalLanguageExplanation {
|
|
98
|
+
/** Summary sentence */
|
|
99
|
+
summary: string;
|
|
100
|
+
/** Detailed explanation paragraphs */
|
|
101
|
+
details: string[];
|
|
102
|
+
/** Key metrics */
|
|
103
|
+
metrics: Record<string, string>;
|
|
104
|
+
/** Recommendations */
|
|
105
|
+
recommendations: string[];
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* ExplainableLearning - Generates explanations for RL decisions
|
|
109
|
+
*/
|
|
110
|
+
export declare class ExplainableLearning {
|
|
111
|
+
private readonly logger;
|
|
112
|
+
constructor();
|
|
113
|
+
/**
|
|
114
|
+
* Generate explanation for action selection
|
|
115
|
+
*
|
|
116
|
+
* @param state - Current task state
|
|
117
|
+
* @param selectedAction - The action that was selected
|
|
118
|
+
* @param availableActions - All available actions
|
|
119
|
+
* @param qValues - Q-values for state-action pairs
|
|
120
|
+
* @param explorationRate - Current exploration rate
|
|
121
|
+
* @param experiences - Historical experiences for this agent
|
|
122
|
+
* @param wasExploration - Whether this was an exploration decision
|
|
123
|
+
* @returns Complete action explanation
|
|
124
|
+
*/
|
|
125
|
+
explainAction(state: TaskState, selectedAction: AgentAction, availableActions: AgentAction[], qValues: Map<string, number>, explorationRate: number, experiences: TaskExperience[], wasExploration: boolean): ActionExplanation;
|
|
126
|
+
/**
|
|
127
|
+
* Calculate confidence score based on experience history
|
|
128
|
+
* Higher confidence when:
|
|
129
|
+
* - More experiences with similar states
|
|
130
|
+
* - Higher success rate
|
|
131
|
+
* - More recent positive experiences
|
|
132
|
+
*/
|
|
133
|
+
private calculateConfidence;
|
|
134
|
+
/**
|
|
135
|
+
* Get experience statistics for state-action pair
|
|
136
|
+
*/
|
|
137
|
+
private getExperienceStats;
|
|
138
|
+
/**
|
|
139
|
+
* Generate human-readable reasoning
|
|
140
|
+
*/
|
|
141
|
+
private generateReasoning;
|
|
142
|
+
/**
|
|
143
|
+
* Identify alternative actions and explain why they weren't selected
|
|
144
|
+
*/
|
|
145
|
+
private identifyAlternatives;
|
|
146
|
+
/**
|
|
147
|
+
* Find experiences that contributed to this decision
|
|
148
|
+
*/
|
|
149
|
+
private findContributingExperiences;
|
|
150
|
+
/**
|
|
151
|
+
* Calculate similarity between two states (0-1)
|
|
152
|
+
*/
|
|
153
|
+
private calculateStateSimilarity;
|
|
154
|
+
/**
|
|
155
|
+
* Analyze decision factors
|
|
156
|
+
*/
|
|
157
|
+
private analyzeDecisionFactors;
|
|
158
|
+
/**
|
|
159
|
+
* Export explanation in structured format
|
|
160
|
+
*/
|
|
161
|
+
exportStructured(explanation: ActionExplanation, state: TaskState, availableActions: AgentAction[], explorationRate: number): StructuredExplanation;
|
|
162
|
+
/**
|
|
163
|
+
* Export explanation in natural language format
|
|
164
|
+
*/
|
|
165
|
+
exportNaturalLanguage(explanation: ActionExplanation): NaturalLanguageExplanation;
|
|
166
|
+
/**
|
|
167
|
+
* Generate summary sentence
|
|
168
|
+
*/
|
|
169
|
+
private generateSummary;
|
|
170
|
+
/**
|
|
171
|
+
* Generate detailed explanation paragraphs
|
|
172
|
+
*/
|
|
173
|
+
private generateDetails;
|
|
174
|
+
/**
|
|
175
|
+
* Generate key metrics
|
|
176
|
+
*/
|
|
177
|
+
private generateMetrics;
|
|
178
|
+
/**
|
|
179
|
+
* Generate recommendations for user
|
|
180
|
+
*/
|
|
181
|
+
private generateRecommendations;
|
|
182
|
+
/**
|
|
183
|
+
* Encode state to string key (matches LearningEngine encoding)
|
|
184
|
+
*/
|
|
185
|
+
private encodeState;
|
|
186
|
+
/**
|
|
187
|
+
* Encode action to string key (matches LearningEngine encoding)
|
|
188
|
+
*/
|
|
189
|
+
private encodeAction;
|
|
190
|
+
}
|
|
191
|
+
//# sourceMappingURL=ExplainableLearning.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ExplainableLearning.d.ts","sourceRoot":"","sources":["../../src/learning/ExplainableLearning.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAGH,OAAO,EAAE,SAAS,EAAE,WAAW,EAAE,cAAc,EAAE,MAAM,SAAS,CAAC;AAEjE;;GAEG;AACH,MAAM,MAAM,YAAY,GAAG,aAAa,GAAG,cAAc,CAAC;AAE1D;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC,0BAA0B;IAC1B,cAAc,EAAE,WAAW,CAAC;IAE5B,kDAAkD;IAClD,YAAY,EAAE,YAAY,CAAC;IAE3B,qCAAqC;IACrC,MAAM,EAAE,MAAM,CAAC;IAEf,iDAAiD;IACjD,UAAU,EAAE,MAAM,CAAC;IAEnB,+BAA+B;IAC/B,SAAS,EAAE,MAAM,CAAC;IAElB,qCAAqC;IACrC,YAAY,EAAE,iBAAiB,EAAE,CAAC;IAElC,yDAAyD;IACzD,uBAAuB,EAAE,sBAAsB,EAAE,CAAC;IAElD,iDAAiD;IACjD,eAAe,EAAE,cAAc,EAAE,CAAC;IAElC,4BAA4B;IAC5B,SAAS,EAAE,IAAI,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC,6BAA6B;IAC7B,MAAM,EAAE,WAAW,CAAC;IAEpB,kCAAkC;IAClC,MAAM,EAAE,MAAM,CAAC;IAEf,4CAA4C;IAC5C,UAAU,EAAE,MAAM,CAAC;IAEnB,6BAA6B;IAC7B,MAAM,EAAE,MAAM,CAAC;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACrC,oBAAoB;IACpB,YAAY,EAAE,MAAM,CAAC;IAErB,gBAAgB;IAChB,QAAQ,EAAE,MAAM,CAAC;IAEjB,sBAAsB;IACtB,MAAM,EAAE,MAAM,CAAC;IAEf,4DAA4D;IAC5D,UAAU,EAAE,MAAM,CAAC;IAEnB,8BAA8B;IAC9B,SAAS,EAAE,IAAI,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,kBAAkB;IAClB,IAAI,EAAE,MAAM,CAAC;IAEb,mBAAmB;IACnB,KAAK,EAAE,MAAM,GAAG,MAAM,CAAC;IAEvB,+BAA+B;IAC/B,MAAM,EAAE,MAAM,CAAC;IAEf,iCAAiC;IACjC,WAAW,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,yBAAyB;IACzB,WAAW,EAAE,iBAAiB,CAAC;IAE/B,2BAA2B;IAC3B,KAAK,EAAE,SAAS,CAAC;IAEjB,4BAA4B;IAC5B,gBAAgB,EAAE,WAAW,EAAE,CAAC;IAEhC,2CAA2C;IAC3C,eAAe,EAAE,MAAM,CAAC;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,0BAA0B;IACzC,uBAAuB;IACvB,OAAO,EAAE,MAAM,CAAC;IAEhB,sCAAsC;IACtC,OAAO,EAAE,MAAM,EAAE,CAAC;IAElB,kBAAkB;IAClB,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAEhC,sBAAsB;IACtB,eAAe,EAAE,MAAM,EAAE,CAAC;CAC3B;AAYD;;GAEG;AACH,qBAAa,mBAAmB;IAC9B,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAS;;IAOhC;;;;;;;;;;;OAWG;IACH,aAAa,CACX,KAAK,EAAE,SAAS,EAChB,cAAc,EAAE,WAAW,EAC3B,gBAAgB,EAAE,WAAW,EAAE,EAC/B,OAAO,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,EAC5B,eAAe,EAAE,MAAM,EACvB,WAAW,EAAE,cAAc,EAAE,EAC7B,cAAc,EAAE,OAAO,GACtB,iBAAiB;IA+DpB;;;;;;OAMG;IACH,OAAO,CAAC,mBAAmB;IAyB3B;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAsC1B;;OAEG;IACH,OAAO,CAAC,iBAAiB;IA+BzB;;OAEG;IACH,OAAO,CAAC,oBAAoB;IAqD5B;;OAEG;IACH,OAAO,CAAC,2BAA2B;IA0CnC;;OAEG;IACH,OAAO,CAAC,wBAAwB;IA+BhC;;OAEG;IACH,OAAO,CAAC,sBAAsB;IAwE9B;;OAEG;IACH,gBAAgB,CACd,WAAW,EAAE,iBAAiB,EAC9B,KAAK,EAAE,SAAS,EAChB,gBAAgB,EAAE,WAAW,EAAE,EAC/B,eAAe,EAAE,MAAM,GACtB,qBAAqB;IASxB;;OAEG;IACH,qBAAqB,CAAC,WAAW,EAAE,iBAAiB,GAAG,0BAA0B;IAcjF;;OAEG;IACH,OAAO,CAAC,eAAe;IAYvB;;OAEG;IACH,OAAO,CAAC,eAAe;IA2CvB;;OAEG;IACH,OAAO,CAAC,eAAe;IAiBvB;;OAEG;IACH,OAAO,CAAC,uBAAuB;IA4C/B;;OAEG;IACH,OAAO,CAAC,WAAW;IAWnB;;OAEG;IACH,OAAO,CAAC,YAAY;CAGrB"}
|
|
@@ -0,0 +1,441 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* ExplainableLearning - Explainable AI for Reinforcement Learning Decisions
|
|
4
|
+
*
|
|
5
|
+
* Provides human-readable explanations for RL agent decisions, tracking:
|
|
6
|
+
* - Action selection rationale (Q-values, exploration vs exploitation)
|
|
7
|
+
* - Confidence scores based on experience history
|
|
8
|
+
* - Contributing experiences that influenced decisions
|
|
9
|
+
* - Decision factors and alternative actions
|
|
10
|
+
*
|
|
11
|
+
* Supports transparency and trust in agent decision-making for issue #118
|
|
12
|
+
*/
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.ExplainableLearning = void 0;
|
|
15
|
+
const Logger_1 = require("../utils/Logger");
|
|
16
|
+
/**
|
|
17
|
+
* ExplainableLearning - Generates explanations for RL decisions
|
|
18
|
+
*/
|
|
19
|
+
class ExplainableLearning {
|
|
20
|
+
constructor() {
|
|
21
|
+
this.logger = Logger_1.Logger.getInstance();
|
|
22
|
+
this.logger.info('ExplainableLearning initialized');
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Generate explanation for action selection
|
|
26
|
+
*
|
|
27
|
+
* @param state - Current task state
|
|
28
|
+
* @param selectedAction - The action that was selected
|
|
29
|
+
* @param availableActions - All available actions
|
|
30
|
+
* @param qValues - Q-values for state-action pairs
|
|
31
|
+
* @param explorationRate - Current exploration rate
|
|
32
|
+
* @param experiences - Historical experiences for this agent
|
|
33
|
+
* @param wasExploration - Whether this was an exploration decision
|
|
34
|
+
* @returns Complete action explanation
|
|
35
|
+
*/
|
|
36
|
+
explainAction(state, selectedAction, availableActions, qValues, explorationRate, experiences, wasExploration) {
|
|
37
|
+
const selectedActionKey = this.encodeAction(selectedAction);
|
|
38
|
+
const selectedQValue = qValues.get(selectedActionKey) ?? 0;
|
|
39
|
+
// Determine decision type
|
|
40
|
+
const decisionType = wasExploration ? 'exploration' : 'exploitation';
|
|
41
|
+
// Calculate confidence based on experience
|
|
42
|
+
const confidence = this.calculateConfidence(state, selectedAction, experiences);
|
|
43
|
+
// Generate reasoning
|
|
44
|
+
const reasoning = this.generateReasoning(decisionType, selectedQValue, confidence, explorationRate, availableActions.length, experiences);
|
|
45
|
+
// Identify alternatives
|
|
46
|
+
const alternatives = this.identifyAlternatives(selectedAction, availableActions, qValues, experiences, decisionType);
|
|
47
|
+
// Find contributing experiences
|
|
48
|
+
const contributingExperiences = this.findContributingExperiences(state, selectedAction, experiences);
|
|
49
|
+
// Analyze decision factors
|
|
50
|
+
const decisionFactors = this.analyzeDecisionFactors(state, selectedAction, selectedQValue, explorationRate, confidence, wasExploration);
|
|
51
|
+
return {
|
|
52
|
+
selectedAction,
|
|
53
|
+
decisionType,
|
|
54
|
+
qValue: selectedQValue,
|
|
55
|
+
confidence,
|
|
56
|
+
reasoning,
|
|
57
|
+
alternatives,
|
|
58
|
+
contributingExperiences,
|
|
59
|
+
decisionFactors,
|
|
60
|
+
timestamp: new Date()
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Calculate confidence score based on experience history
|
|
65
|
+
* Higher confidence when:
|
|
66
|
+
* - More experiences with similar states
|
|
67
|
+
* - Higher success rate
|
|
68
|
+
* - More recent positive experiences
|
|
69
|
+
*/
|
|
70
|
+
calculateConfidence(state, action, experiences) {
|
|
71
|
+
const stats = this.getExperienceStats(state, action, experiences);
|
|
72
|
+
if (stats.totalCount === 0) {
|
|
73
|
+
return 0.1; // Low confidence with no experience
|
|
74
|
+
}
|
|
75
|
+
// Base confidence from experience count (logarithmic scale)
|
|
76
|
+
const experienceConfidence = Math.min(0.4, Math.log10(stats.totalCount + 1) / 2);
|
|
77
|
+
// Success rate confidence
|
|
78
|
+
const successConfidence = stats.successCount > 0
|
|
79
|
+
? (stats.successCount / stats.totalCount) * 0.3
|
|
80
|
+
: 0;
|
|
81
|
+
// Recent performance confidence
|
|
82
|
+
const recentConfidence = Math.max(0, stats.recentPerformance) * 0.3;
|
|
83
|
+
return Math.min(0.95, experienceConfidence + successConfidence + recentConfidence);
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Get experience statistics for state-action pair
|
|
87
|
+
*/
|
|
88
|
+
getExperienceStats(state, action, experiences) {
|
|
89
|
+
const stateKey = this.encodeState(state);
|
|
90
|
+
const actionKey = this.encodeAction(action);
|
|
91
|
+
// Find similar experiences
|
|
92
|
+
const similarExperiences = experiences.filter(exp => {
|
|
93
|
+
const expStateKey = this.encodeState(exp.state);
|
|
94
|
+
const expActionKey = this.encodeAction(exp.action);
|
|
95
|
+
// Exact match for now (could use similarity threshold)
|
|
96
|
+
return expStateKey === stateKey && expActionKey === actionKey;
|
|
97
|
+
});
|
|
98
|
+
const totalCount = similarExperiences.length;
|
|
99
|
+
const successCount = similarExperiences.filter(exp => exp.reward > 0).length;
|
|
100
|
+
const averageReward = totalCount > 0
|
|
101
|
+
? similarExperiences.reduce((sum, exp) => sum + exp.reward, 0) / totalCount
|
|
102
|
+
: 0;
|
|
103
|
+
// Recent performance (last 10 experiences)
|
|
104
|
+
const recentExperiences = similarExperiences.slice(-10);
|
|
105
|
+
const recentPerformance = recentExperiences.length > 0
|
|
106
|
+
? recentExperiences.reduce((sum, exp) => sum + exp.reward, 0) / recentExperiences.length
|
|
107
|
+
: 0;
|
|
108
|
+
return {
|
|
109
|
+
totalCount,
|
|
110
|
+
successCount,
|
|
111
|
+
averageReward,
|
|
112
|
+
recentPerformance
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* Generate human-readable reasoning
|
|
117
|
+
*/
|
|
118
|
+
generateReasoning(decisionType, qValue, confidence, explorationRate, numActions, experiences) {
|
|
119
|
+
if (decisionType === 'exploration') {
|
|
120
|
+
const explorationPercent = (explorationRate * 100).toFixed(1);
|
|
121
|
+
return `Explored this action (confidence: ${confidence.toFixed(2)}) to gather more information. ` +
|
|
122
|
+
`Current exploration rate is ${explorationPercent}%, balancing learning with exploitation.`;
|
|
123
|
+
}
|
|
124
|
+
// Exploitation
|
|
125
|
+
const qValueFormatted = qValue.toFixed(3);
|
|
126
|
+
const confidencePercent = (confidence * 100).toFixed(0);
|
|
127
|
+
const experienceCount = experiences.length;
|
|
128
|
+
if (qValue > 0.5) {
|
|
129
|
+
return `Selected action with highest Q-value (${qValueFormatted}) among ${numActions} alternatives. ` +
|
|
130
|
+
`High confidence (${confidencePercent}%) based on ${experienceCount} past experiences with strong positive results.`;
|
|
131
|
+
}
|
|
132
|
+
else if (qValue > 0) {
|
|
133
|
+
return `Selected action with Q-value ${qValueFormatted} (moderate positive expectation). ` +
|
|
134
|
+
`Medium confidence (${confidencePercent}%) from ${experienceCount} experiences. More data will improve decision quality.`;
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
return `Selected best available action (Q-value: ${qValueFormatted}) among ${numActions} options. ` +
|
|
138
|
+
`Lower confidence (${confidencePercent}%) suggests limited experience in this state.`;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Identify alternative actions and explain why they weren't selected
|
|
143
|
+
*/
|
|
144
|
+
identifyAlternatives(selectedAction, availableActions, qValues, experiences, decisionType) {
|
|
145
|
+
const selectedActionKey = this.encodeAction(selectedAction);
|
|
146
|
+
const alternatives = [];
|
|
147
|
+
for (const action of availableActions) {
|
|
148
|
+
const actionKey = this.encodeAction(action);
|
|
149
|
+
// Skip the selected action
|
|
150
|
+
if (actionKey === selectedActionKey) {
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
const qValue = qValues.get(actionKey) ?? 0;
|
|
154
|
+
const confidence = this.calculateConfidence({ taskComplexity: 0, requiredCapabilities: [], contextFeatures: {}, previousAttempts: 0, availableResources: 1 }, action, experiences);
|
|
155
|
+
// Determine why it wasn't selected
|
|
156
|
+
let reason;
|
|
157
|
+
if (decisionType === 'exploration') {
|
|
158
|
+
reason = 'Random exploration selected different action';
|
|
159
|
+
}
|
|
160
|
+
else {
|
|
161
|
+
const selectedQValue = qValues.get(selectedActionKey) ?? 0;
|
|
162
|
+
if (qValue < selectedQValue) {
|
|
163
|
+
reason = `Lower Q-value (${qValue.toFixed(3)} vs ${selectedQValue.toFixed(3)})`;
|
|
164
|
+
}
|
|
165
|
+
else {
|
|
166
|
+
reason = 'Similar Q-value but other action selected during exploitation';
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
alternatives.push({
|
|
170
|
+
action,
|
|
171
|
+
qValue,
|
|
172
|
+
confidence,
|
|
173
|
+
reason
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
// Sort by Q-value descending
|
|
177
|
+
alternatives.sort((a, b) => b.qValue - a.qValue);
|
|
178
|
+
// Return top 3 alternatives
|
|
179
|
+
return alternatives.slice(0, 3);
|
|
180
|
+
}
|
|
181
|
+
/**
|
|
182
|
+
* Find experiences that contributed to this decision
|
|
183
|
+
*/
|
|
184
|
+
findContributingExperiences(state, action, experiences) {
|
|
185
|
+
const stateKey = this.encodeState(state);
|
|
186
|
+
const actionKey = this.encodeAction(action);
|
|
187
|
+
const contributingExps = [];
|
|
188
|
+
for (const exp of experiences) {
|
|
189
|
+
const expStateKey = this.encodeState(exp.state);
|
|
190
|
+
const expActionKey = this.encodeAction(exp.action);
|
|
191
|
+
// Calculate similarity
|
|
192
|
+
const similarity = this.calculateStateSimilarity(state, exp.state);
|
|
193
|
+
// Include if same action and similar state
|
|
194
|
+
if (expActionKey === actionKey && similarity > 0.5) {
|
|
195
|
+
contributingExps.push({
|
|
196
|
+
experienceId: exp.taskId,
|
|
197
|
+
taskType: exp.taskType,
|
|
198
|
+
reward: exp.reward,
|
|
199
|
+
similarity,
|
|
200
|
+
timestamp: exp.timestamp
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
// Sort by similarity and recency
|
|
205
|
+
contributingExps.sort((a, b) => {
|
|
206
|
+
const similarityDiff = b.similarity - a.similarity;
|
|
207
|
+
if (Math.abs(similarityDiff) > 0.1) {
|
|
208
|
+
return similarityDiff;
|
|
209
|
+
}
|
|
210
|
+
return b.timestamp.getTime() - a.timestamp.getTime();
|
|
211
|
+
});
|
|
212
|
+
// Return top 5
|
|
213
|
+
return contributingExps.slice(0, 5);
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Calculate similarity between two states (0-1)
|
|
217
|
+
*/
|
|
218
|
+
calculateStateSimilarity(state1, state2) {
|
|
219
|
+
let similarity = 0;
|
|
220
|
+
let factors = 0;
|
|
221
|
+
// Task complexity similarity
|
|
222
|
+
similarity += 1 - Math.abs(state1.taskComplexity - state2.taskComplexity);
|
|
223
|
+
factors++;
|
|
224
|
+
// Available resources similarity
|
|
225
|
+
similarity += 1 - Math.abs(state1.availableResources - state2.availableResources);
|
|
226
|
+
factors++;
|
|
227
|
+
// Previous attempts similarity
|
|
228
|
+
const attemptDiff = Math.abs(state1.previousAttempts - state2.previousAttempts);
|
|
229
|
+
similarity += Math.max(0, 1 - attemptDiff / 5);
|
|
230
|
+
factors++;
|
|
231
|
+
// Required capabilities overlap
|
|
232
|
+
const capabilities1 = new Set(state1.requiredCapabilities);
|
|
233
|
+
const capabilities2 = new Set(state2.requiredCapabilities);
|
|
234
|
+
const intersection = new Set([...capabilities1].filter(x => capabilities2.has(x)));
|
|
235
|
+
const union = new Set([...capabilities1, ...capabilities2]);
|
|
236
|
+
if (union.size > 0) {
|
|
237
|
+
similarity += intersection.size / union.size;
|
|
238
|
+
factors++;
|
|
239
|
+
}
|
|
240
|
+
return factors > 0 ? similarity / factors : 0;
|
|
241
|
+
}
|
|
242
|
+
/**
|
|
243
|
+
* Analyze decision factors
|
|
244
|
+
*/
|
|
245
|
+
analyzeDecisionFactors(state, action, qValue, explorationRate, confidence, wasExploration) {
|
|
246
|
+
const factors = [];
|
|
247
|
+
// Q-value factor
|
|
248
|
+
factors.push({
|
|
249
|
+
name: 'Q-Value',
|
|
250
|
+
value: qValue.toFixed(3),
|
|
251
|
+
impact: wasExploration ? 0.3 : 0.9,
|
|
252
|
+
description: 'Expected cumulative reward for this state-action pair'
|
|
253
|
+
});
|
|
254
|
+
// Exploration rate factor
|
|
255
|
+
factors.push({
|
|
256
|
+
name: 'Exploration Rate',
|
|
257
|
+
value: `${(explorationRate * 100).toFixed(1)}%`,
|
|
258
|
+
impact: wasExploration ? 0.9 : 0.1,
|
|
259
|
+
description: 'Probability of selecting random action for exploration'
|
|
260
|
+
});
|
|
261
|
+
// Confidence factor
|
|
262
|
+
factors.push({
|
|
263
|
+
name: 'Confidence',
|
|
264
|
+
value: `${(confidence * 100).toFixed(0)}%`,
|
|
265
|
+
impact: 0.7,
|
|
266
|
+
description: 'Based on number and quality of similar past experiences'
|
|
267
|
+
});
|
|
268
|
+
// Task complexity factor
|
|
269
|
+
factors.push({
|
|
270
|
+
name: 'Task Complexity',
|
|
271
|
+
value: state.taskComplexity.toFixed(2),
|
|
272
|
+
impact: 0.5,
|
|
273
|
+
description: 'Complexity of current task (0=simple, 1=complex)'
|
|
274
|
+
});
|
|
275
|
+
// Resource availability factor
|
|
276
|
+
factors.push({
|
|
277
|
+
name: 'Available Resources',
|
|
278
|
+
value: `${(state.availableResources * 100).toFixed(0)}%`,
|
|
279
|
+
impact: 0.4,
|
|
280
|
+
description: 'Resources available for task execution'
|
|
281
|
+
});
|
|
282
|
+
// Strategy factor
|
|
283
|
+
factors.push({
|
|
284
|
+
name: 'Strategy',
|
|
285
|
+
value: action.strategy,
|
|
286
|
+
impact: 0.8,
|
|
287
|
+
description: 'Selected execution strategy'
|
|
288
|
+
});
|
|
289
|
+
// Parallelization factor
|
|
290
|
+
factors.push({
|
|
291
|
+
name: 'Parallelization',
|
|
292
|
+
value: `${(action.parallelization * 100).toFixed(0)}%`,
|
|
293
|
+
impact: 0.5,
|
|
294
|
+
description: 'Degree of parallel execution'
|
|
295
|
+
});
|
|
296
|
+
// Sort by impact
|
|
297
|
+
factors.sort((a, b) => b.impact - a.impact);
|
|
298
|
+
return factors;
|
|
299
|
+
}
|
|
300
|
+
/**
|
|
301
|
+
* Export explanation in structured format
|
|
302
|
+
*/
|
|
303
|
+
exportStructured(explanation, state, availableActions, explorationRate) {
|
|
304
|
+
return {
|
|
305
|
+
explanation,
|
|
306
|
+
state,
|
|
307
|
+
availableActions,
|
|
308
|
+
explorationRate
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
/**
|
|
312
|
+
* Export explanation in natural language format
|
|
313
|
+
*/
|
|
314
|
+
exportNaturalLanguage(explanation) {
|
|
315
|
+
const summary = this.generateSummary(explanation);
|
|
316
|
+
const details = this.generateDetails(explanation);
|
|
317
|
+
const metrics = this.generateMetrics(explanation);
|
|
318
|
+
const recommendations = this.generateRecommendations(explanation);
|
|
319
|
+
return {
|
|
320
|
+
summary,
|
|
321
|
+
details,
|
|
322
|
+
metrics,
|
|
323
|
+
recommendations
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
/**
|
|
327
|
+
* Generate summary sentence
|
|
328
|
+
*/
|
|
329
|
+
generateSummary(explanation) {
|
|
330
|
+
const action = explanation.selectedAction.strategy;
|
|
331
|
+
const type = explanation.decisionType;
|
|
332
|
+
const confidence = (explanation.confidence * 100).toFixed(0);
|
|
333
|
+
if (type === 'exploration') {
|
|
334
|
+
return `Explored "${action}" strategy with ${confidence}% confidence to gather more experience.`;
|
|
335
|
+
}
|
|
336
|
+
else {
|
|
337
|
+
return `Selected "${action}" strategy with ${confidence}% confidence based on Q-value of ${explanation.qValue.toFixed(3)}.`;
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
/**
|
|
341
|
+
* Generate detailed explanation paragraphs
|
|
342
|
+
*/
|
|
343
|
+
generateDetails(explanation) {
|
|
344
|
+
const details = [];
|
|
345
|
+
// Main reasoning
|
|
346
|
+
details.push(explanation.reasoning);
|
|
347
|
+
// Contributing experiences
|
|
348
|
+
if (explanation.contributingExperiences.length > 0) {
|
|
349
|
+
const exp = explanation.contributingExperiences[0];
|
|
350
|
+
const successRate = explanation.contributingExperiences.filter(e => e.reward > 0).length /
|
|
351
|
+
explanation.contributingExperiences.length;
|
|
352
|
+
details.push(`This decision is based on ${explanation.contributingExperiences.length} similar past experiences ` +
|
|
353
|
+
`with a ${(successRate * 100).toFixed(0)}% success rate. The most similar experience was from ` +
|
|
354
|
+
`"${exp.taskType}" which had a reward of ${exp.reward.toFixed(2)}.`);
|
|
355
|
+
}
|
|
356
|
+
else {
|
|
357
|
+
details.push('This decision is based on limited historical data. As the agent gains more experience, ' +
|
|
358
|
+
'decision quality will improve.');
|
|
359
|
+
}
|
|
360
|
+
// Top decision factors
|
|
361
|
+
const topFactors = explanation.decisionFactors.slice(0, 3);
|
|
362
|
+
if (topFactors.length > 0) {
|
|
363
|
+
const factorList = topFactors.map(f => `${f.name} (${f.value})`).join(', ');
|
|
364
|
+
details.push(`Key decision factors: ${factorList}.`);
|
|
365
|
+
}
|
|
366
|
+
// Alternatives
|
|
367
|
+
if (explanation.alternatives.length > 0) {
|
|
368
|
+
const alt = explanation.alternatives[0];
|
|
369
|
+
details.push(`The next best alternative was "${alt.action.strategy}" with Q-value ${alt.qValue.toFixed(3)}. ` +
|
|
370
|
+
`It wasn't selected because: ${alt.reason}.`);
|
|
371
|
+
}
|
|
372
|
+
return details;
|
|
373
|
+
}
|
|
374
|
+
/**
|
|
375
|
+
* Generate key metrics
|
|
376
|
+
*/
|
|
377
|
+
generateMetrics(explanation) {
|
|
378
|
+
const metrics = {};
|
|
379
|
+
metrics['Decision Type'] = explanation.decisionType === 'exploration' ? 'Exploration' : 'Exploitation';
|
|
380
|
+
metrics['Q-Value'] = explanation.qValue.toFixed(3);
|
|
381
|
+
metrics['Confidence'] = `${(explanation.confidence * 100).toFixed(0)}%`;
|
|
382
|
+
metrics['Strategy'] = explanation.selectedAction.strategy;
|
|
383
|
+
metrics['Similar Experiences'] = explanation.contributingExperiences.length.toString();
|
|
384
|
+
if (explanation.contributingExperiences.length > 0) {
|
|
385
|
+
const successCount = explanation.contributingExperiences.filter(e => e.reward > 0).length;
|
|
386
|
+
metrics['Success Rate'] = `${((successCount / explanation.contributingExperiences.length) * 100).toFixed(0)}%`;
|
|
387
|
+
}
|
|
388
|
+
return metrics;
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Generate recommendations for user
|
|
392
|
+
*/
|
|
393
|
+
generateRecommendations(explanation) {
|
|
394
|
+
const recommendations = [];
|
|
395
|
+
// Low confidence recommendation
|
|
396
|
+
if (explanation.confidence < 0.3) {
|
|
397
|
+
recommendations.push('Low confidence detected. Consider providing feedback to help the agent learn faster.');
|
|
398
|
+
}
|
|
399
|
+
// Limited experience recommendation
|
|
400
|
+
if (explanation.contributingExperiences.length < 3) {
|
|
401
|
+
recommendations.push('Limited experience in this scenario. The agent will improve with more similar tasks.');
|
|
402
|
+
}
|
|
403
|
+
// Exploration recommendation
|
|
404
|
+
if (explanation.decisionType === 'exploration') {
|
|
405
|
+
recommendations.push('This was an exploratory action. If it performs well, it will be favored in future decisions.');
|
|
406
|
+
}
|
|
407
|
+
// High confidence recommendation
|
|
408
|
+
if (explanation.confidence > 0.8) {
|
|
409
|
+
recommendations.push('High confidence in this decision based on extensive past experience.');
|
|
410
|
+
}
|
|
411
|
+
// Alternative suggestion
|
|
412
|
+
if (explanation.alternatives.length > 0) {
|
|
413
|
+
const alt = explanation.alternatives[0];
|
|
414
|
+
if (Math.abs(alt.qValue - explanation.qValue) < 0.1) {
|
|
415
|
+
recommendations.push(`Alternative strategy "${alt.action.strategy}" has similar expected performance and could also work well.`);
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
return recommendations;
|
|
419
|
+
}
|
|
420
|
+
/**
|
|
421
|
+
* Encode state to string key (matches LearningEngine encoding)
|
|
422
|
+
*/
|
|
423
|
+
encodeState(state) {
|
|
424
|
+
const features = [
|
|
425
|
+
state.taskComplexity,
|
|
426
|
+
state.requiredCapabilities.length / 10,
|
|
427
|
+
state.previousAttempts / 5,
|
|
428
|
+
state.availableResources,
|
|
429
|
+
state.timeConstraint ? Math.min(state.timeConstraint / 300000, 1) : 1
|
|
430
|
+
];
|
|
431
|
+
return features.map(f => Math.round(f * 10) / 10).join(',');
|
|
432
|
+
}
|
|
433
|
+
/**
|
|
434
|
+
* Encode action to string key (matches LearningEngine encoding)
|
|
435
|
+
*/
|
|
436
|
+
encodeAction(action) {
|
|
437
|
+
return `${action.strategy}:${action.parallelization.toFixed(1)}:${action.retryPolicy}`;
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
exports.ExplainableLearning = ExplainableLearning;
|
|
441
|
+
//# sourceMappingURL=ExplainableLearning.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ExplainableLearning.js","sourceRoot":"","sources":["../../src/learning/ExplainableLearning.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;GAUG;;;AAEH,4CAAyC;AA0IzC;;GAEG;AACH,MAAa,mBAAmB;IAG9B;QACE,IAAI,CAAC,MAAM,GAAG,eAAM,CAAC,WAAW,EAAE,CAAC;QACnC,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,iCAAiC,CAAC,CAAC;IACtD,CAAC;IAED;;;;;;;;;;;OAWG;IACH,aAAa,CACX,KAAgB,EAChB,cAA2B,EAC3B,gBAA+B,EAC/B,OAA4B,EAC5B,eAAuB,EACvB,WAA6B,EAC7B,cAAuB;QAEvB,MAAM,iBAAiB,GAAG,IAAI,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;QAC5D,MAAM,cAAc,GAAG,OAAO,CAAC,GAAG,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC;QAE3D,0BAA0B;QAC1B,MAAM,YAAY,GAAiB,cAAc,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,cAAc,CAAC;QAEnF,2CAA2C;QAC3C,MAAM,UAAU,GAAG,IAAI,CAAC,mBAAmB,CACzC,KAAK,EACL,cAAc,EACd,WAAW,CACZ,CAAC;QAEF,qBAAqB;QACrB,MAAM,SAAS,GAAG,IAAI,CAAC,iBAAiB,CACtC,YAAY,EACZ,cAAc,EACd,UAAU,EACV,eAAe,EACf,gBAAgB,CAAC,MAAM,EACvB,WAAW,CACZ,CAAC;QAEF,wBAAwB;QACxB,MAAM,YAAY,GAAG,IAAI,CAAC,oBAAoB,CAC5C,cAAc,EACd,gBAAgB,EAChB,OAAO,EACP,WAAW,EACX,YAAY,CACb,CAAC;QAEF,gCAAgC;QAChC,MAAM,uBAAuB,GAAG,IAAI,CAAC,2BAA2B,CAC9D,KAAK,EACL,cAAc,EACd,WAAW,CACZ,CAAC;QAEF,2BAA2B;QAC3B,MAAM,eAAe,GAAG,IAAI,CAAC,sBAAsB,CACjD,KAAK,EACL,cAAc,EACd,cAAc,EACd,eAAe,EACf,UAAU,EACV,cAAc,CACf,CAAC;QAEF,OAAO;YACL,cAAc;YACd,YAAY;YACZ,MAAM,EAAE,cAAc;YACtB,UAAU;YACV,SAAS;YACT,YAAY;YACZ,uBAAuB;YACvB,eAAe;YACf,SAAS,EAAE,IAAI,IAAI,EAAE;SACtB,CAAC;IACJ,CAAC;IAED;;;;;;OAMG;IACK,mBAAmB,CACzB,KAAgB,EAChB,MAAmB,EACnB,WAA6B;QAE7B,MAAM,KAAK,GAAG,IAAI,CAAC,kBAAkB,CAAC,KAAK,EAAE,MAAM,EAAE,WAAW,CAAC,CAAC;QAElE,IAAI,KAAK,CAAC,UAAU,KAAK,CAAC,EAAE,CAAC;YAC3B,OAAO,GAAG,CAAC,CAAC,oCAAoC;QAClD,CAAC;QAED,4DAA4D;QAC5D,MAAM,oBAAoB,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;QAEjF,0BAA0B;QAC1B,MAAM,iBAAiB,GAAG,KAAK,CAAC,YAAY,GAAG,CAAC;YAC9C,CAAC,CAAC,CAAC,KAAK,CAAC,YAAY,GAAG,KAAK,CAAC,UAAU,CAAC,GAAG,GAAG;YAC/C,CAAC,CAAC,CAAC,CAAC;QAEN,gCAAgC;QAChC,MAAM,gBAAgB,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,CAAC,iBAAiB,CAAC,GAAG,GAAG,CAAC;QAEpE,OAAO,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,oBAAoB,GAAG,iBAAiB,GAAG,gBAAgB,CAAC,CAAC;IACrF,CAAC;IAED;;OAEG;IACK,kBAAkB,CACxB,KAAgB,EAChB,MAAmB,EACnB,WAA6B;QAE7B,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;QACzC,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;QAE5C,2BAA2B;QAC3B,MAAM,kBAAkB,GAAG,WAAW,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE;YAClD,MAAM,WAAW,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;YAChD,MAAM,YAAY,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAEnD,uDAAuD;YACvD,OAAO,WAAW,KAAK,QAAQ,IAAI,YAAY,KAAK,SAAS,CAAC;QAChE,CAAC,CAAC,CAAC;QAEH,MAAM,UAAU,GAAG,kBAAkB,CAAC,MAAM,CAAC;QAC7C,MAAM,YAAY,GAAG,kBAAkB,CAAC,MAAM,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC;QAE7E,MAAM,aAAa,GAAG,UAAU,GAAG,CAAC;YAClC,CAAC,CAAC,kBAAkB,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,UAAU;YAC3E,CAAC,CAAC,CAAC,CAAC;QAEN,2CAA2C;QAC3C,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QACxD,MAAM,iBAAiB,GAAG,iBAAiB,CAAC,MAAM,GAAG,CAAC;YACpD,CAAC,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,iBAAiB,CAAC,MAAM;YACxF,CAAC,CAAC,CAAC,CAAC;QAEN,OAAO;YACL,UAAU;YACV,YAAY;YACZ,aAAa;YACb,iBAAiB;SAClB,CAAC;IACJ,CAAC;IAED;;OAEG;IACK,iBAAiB,CACvB,YAA0B,EAC1B,MAAc,EACd,UAAkB,EAClB,eAAuB,EACvB,UAAkB,EAClB,WAA6B;QAE7B,IAAI,YAAY,KAAK,aAAa,EAAE,CAAC;YACnC,MAAM,kBAAkB,GAAG,CAAC,eAAe,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;YAC9D,OAAO,qCAAqC,UAAU,CAAC,OAAO,CAAC,CAAC,CAAC,gCAAgC;gBAC/F,+BAA+B,kBAAkB,0CAA0C,CAAC;QAChG,CAAC;QAED,eAAe;QACf,MAAM,eAAe,GAAG,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;QAC1C,MAAM,iBAAiB,GAAG,CAAC,UAAU,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;QACxD,MAAM,eAAe,GAAG,WAAW,CAAC,MAAM,CAAC;QAE3C,IAAI,MAAM,GAAG,GAAG,EAAE,CAAC;YACjB,OAAO,yCAAyC,eAAe,WAAW,UAAU,iBAAiB;gBACnG,oBAAoB,iBAAiB,eAAe,eAAe,iDAAiD,CAAC;QACzH,CAAC;aAAM,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;YACtB,OAAO,gCAAgC,eAAe,oCAAoC;gBACxF,sBAAsB,iBAAiB,WAAW,eAAe,wDAAwD,CAAC;QAC9H,CAAC;aAAM,CAAC;YACN,OAAO,4CAA4C,eAAe,WAAW,UAAU,YAAY;gBACjG,qBAAqB,iBAAiB,+CAA+C,CAAC;QAC1F,CAAC;IACH,CAAC;IAED;;OAEG;IACK,oBAAoB,CAC1B,cAA2B,EAC3B,gBAA+B,EAC/B,OAA4B,EAC5B,WAA6B,EAC7B,YAA0B;QAE1B,MAAM,iBAAiB,GAAG,IAAI,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;QAC5D,MAAM,YAAY,GAAwB,EAAE,CAAC;QAE7C,KAAK,MAAM,MAAM,IAAI,gBAAgB,EAAE,CAAC;YACtC,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;YAE5C,2BAA2B;YAC3B,IAAI,SAAS,KAAK,iBAAiB,EAAE,CAAC;gBACpC,SAAS;YACX,CAAC;YAED,MAAM,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,IAAI,CAAC,CAAC;YAC3C,MAAM,UAAU,GAAG,IAAI,CAAC,mBAAmB,CACzC,EAAE,cAAc,EAAE,CAAC,EAAE,oBAAoB,EAAE,EAAE,EAAE,eAAe,EAAE,EAAE,EAAE,gBAAgB,EAAE,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,EAChH,MAAM,EACN,WAAW,CACZ,CAAC;YAEF,mCAAmC;YACnC,IAAI,MAAc,CAAC;YACnB,IAAI,YAAY,KAAK,aAAa,EAAE,CAAC;gBACnC,MAAM,GAAG,8CAA8C,CAAC;YAC1D,CAAC;iBAAM,CAAC;gBACN,MAAM,cAAc,GAAG,OAAO,CAAC,GAAG,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC;gBAC3D,IAAI,MAAM,GAAG,cAAc,EAAE,CAAC;oBAC5B,MAAM,GAAG,kBAAkB,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,cAAc,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC;gBAClF,CAAC;qBAAM,CAAC;oBACN,MAAM,GAAG,+DAA+D,CAAC;gBAC3E,CAAC;YACH,CAAC;YAED,YAAY,CAAC,IAAI,CAAC;gBAChB,MAAM;gBACN,MAAM;gBACN,UAAU;gBACV,MAAM;aACP,CAAC,CAAC;QACL,CAAC;QAED,6BAA6B;QAC7B,YAAY,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC;QAEjD,4BAA4B;QAC5B,OAAO,YAAY,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IAClC,CAAC;IAED;;OAEG;IACK,2BAA2B,CACjC,KAAgB,EAChB,MAAmB,EACnB,WAA6B;QAE7B,MAAM,QAAQ,GAAG,IAAI,CAAC,WAAW,CAAC,KAAK,CAAC,CAAC;QACzC,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC;QAE5C,MAAM,gBAAgB,GAA6B,EAAE,CAAC;QAEtD,KAAK,MAAM,GAAG,IAAI,WAAW,EAAE,CAAC;YAC9B,MAAM,WAAW,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;YAChD,MAAM,YAAY,GAAG,IAAI,CAAC,YAAY,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;YAEnD,uBAAuB;YACvB,MAAM,UAAU,GAAG,IAAI,CAAC,wBAAwB,CAAC,KAAK,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC;YAEnE,2CAA2C;YAC3C,IAAI,YAAY,KAAK,SAAS,IAAI,UAAU,GAAG,GAAG,EAAE,CAAC;gBACnD,gBAAgB,CAAC,IAAI,CAAC;oBACpB,YAAY,EAAE,GAAG,CAAC,MAAM;oBACxB,QAAQ,EAAE,GAAG,CAAC,QAAQ;oBACtB,MAAM,EAAE,GAAG,CAAC,MAAM;oBAClB,UAAU;oBACV,SAAS,EAAE,GAAG,CAAC,SAAS;iBACzB,CAAC,CAAC;YACL,CAAC;QACH,CAAC;QAED,iCAAiC;QACjC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE;YAC7B,MAAM,cAAc,GAAG,CAAC,CAAC,UAAU,GAAG,CAAC,CAAC,UAAU,CAAC;YACnD,IAAI,IAAI,CAAC,GAAG,CAAC,cAAc,CAAC,GAAG,GAAG,EAAE,CAAC;gBACnC,OAAO,cAAc,CAAC;YACxB,CAAC;YACD,OAAO,CAAC,CAAC,SAAS,CAAC,OAAO,EAAE,GAAG,CAAC,CAAC,SAAS,CAAC,OAAO,EAAE,CAAC;QACvD,CAAC,CAAC,CAAC;QAEH,eAAe;QACf,OAAO,gBAAgB,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IACtC,CAAC;IAED;;OAEG;IACK,wBAAwB,CAAC,MAAiB,EAAE,MAAiB;QACnE,IAAI,UAAU,GAAG,CAAC,CAAC;QACnB,IAAI,OAAO,GAAG,CAAC,CAAC;QAEhB,6BAA6B;QAC7B,UAAU,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,CAAC,CAAC;QAC1E,OAAO,EAAE,CAAC;QAEV,iCAAiC;QACjC,UAAU,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,kBAAkB,GAAG,MAAM,CAAC,kBAAkB,CAAC,CAAC;QAClF,OAAO,EAAE,CAAC;QAEV,+BAA+B;QAC/B,MAAM,WAAW,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,gBAAgB,GAAG,MAAM,CAAC,gBAAgB,CAAC,CAAC;QAChF,UAAU,IAAI,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAAG,WAAW,GAAG,CAAC,CAAC,CAAC;QAC/C,OAAO,EAAE,CAAC;QAEV,gCAAgC;QAChC,MAAM,aAAa,GAAG,IAAI,GAAG,CAAC,MAAM,CAAC,oBAAoB,CAAC,CAAC;QAC3D,MAAM,aAAa,GAAG,IAAI,GAAG,CAAC,MAAM,CAAC,oBAAoB,CAAC,CAAC;QAC3D,MAAM,YAAY,GAAG,IAAI,GAAG,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;QACnF,MAAM,KAAK,GAAG,IAAI,GAAG,CAAC,CAAC,GAAG,aAAa,EAAE,GAAG,aAAa,CAAC,CAAC,CAAC;QAE5D,IAAI,KAAK,CAAC,IAAI,GAAG,CAAC,EAAE,CAAC;YACnB,UAAU,IAAI,YAAY,CAAC,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC;YAC7C,OAAO,EAAE,CAAC;QACZ,CAAC;QAED,OAAO,OAAO,GAAG,CAAC,CAAC,CAAC,CAAC,UAAU,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;IAChD,CAAC;IAED;;OAEG;IACK,sBAAsB,CAC5B,KAAgB,EAChB,MAAmB,EACnB,MAAc,EACd,eAAuB,EACvB,UAAkB,EAClB,cAAuB;QAEvB,MAAM,OAAO,GAAqB,EAAE,CAAC;QAErC,iBAAiB;QACjB,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,SAAS;YACf,KAAK,EAAE,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC;YACxB,MAAM,EAAE,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;YAClC,WAAW,EAAE,uDAAuD;SACrE,CAAC,CAAC;QAEH,0BAA0B;QAC1B,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,kBAAkB;YACxB,KAAK,EAAE,GAAG,CAAC,eAAe,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;YAC/C,MAAM,EAAE,cAAc,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG;YAClC,WAAW,EAAE,wDAAwD;SACtE,CAAC,CAAC;QAEH,oBAAoB;QACpB,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,YAAY;YAClB,KAAK,EAAE,GAAG,CAAC,UAAU,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;YAC1C,MAAM,EAAE,GAAG;YACX,WAAW,EAAE,yDAAyD;SACvE,CAAC,CAAC;QAEH,yBAAyB;QACzB,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,iBAAiB;YACvB,KAAK,EAAE,KAAK,CAAC,cAAc,CAAC,OAAO,CAAC,CAAC,CAAC;YACtC,MAAM,EAAE,GAAG;YACX,WAAW,EAAE,kDAAkD;SAChE,CAAC,CAAC;QAEH,+BAA+B;QAC/B,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,qBAAqB;YAC3B,KAAK,EAAE,GAAG,CAAC,KAAK,CAAC,kBAAkB,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;YACxD,MAAM,EAAE,GAAG;YACX,WAAW,EAAE,wCAAwC;SACtD,CAAC,CAAC;QAEH,kBAAkB;QAClB,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,UAAU;YAChB,KAAK,EAAE,MAAM,CAAC,QAAQ;YACtB,MAAM,EAAE,GAAG;YACX,WAAW,EAAE,6BAA6B;SAC3C,CAAC,CAAC;QAEH,yBAAyB;QACzB,OAAO,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,iBAAiB;YACvB,KAAK,EAAE,GAAG,CAAC,MAAM,CAAC,eAAe,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG;YACtD,MAAM,EAAE,GAAG;YACX,WAAW,EAAE,8BAA8B;SAC5C,CAAC,CAAC;QAEH,iBAAiB;QACjB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,MAAM,CAAC,CAAC;QAE5C,OAAO,OAAO,CAAC;IACjB,CAAC;IAED;;OAEG;IACH,gBAAgB,CACd,WAA8B,EAC9B,KAAgB,EAChB,gBAA+B,EAC/B,eAAuB;QAEvB,OAAO;YACL,WAAW;YACX,KAAK;YACL,gBAAgB;YAChB,eAAe;SAChB,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,qBAAqB,CAAC,WAA8B;QAClD,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QAClD,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QAClD,MAAM,OAAO,GAAG,IAAI,CAAC,eAAe,CAAC,WAAW,CAAC,CAAC;QAClD,MAAM,eAAe,GAAG,IAAI,CAAC,uBAAuB,CAAC,WAAW,CAAC,CAAC;QAElE,OAAO;YACL,OAAO;YACP,OAAO;YACP,OAAO;YACP,eAAe;SAChB,CAAC;IACJ,CAAC;IAED;;OAEG;IACK,eAAe,CAAC,WAA8B;QACpD,MAAM,MAAM,GAAG,WAAW,CAAC,cAAc,CAAC,QAAQ,CAAC;QACnD,MAAM,IAAI,GAAG,WAAW,CAAC,YAAY,CAAC;QACtC,MAAM,UAAU,GAAG,CAAC,WAAW,CAAC,UAAU,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;QAE7D,IAAI,IAAI,KAAK,aAAa,EAAE,CAAC;YAC3B,OAAO,aAAa,MAAM,mBAAmB,UAAU,yCAAyC,CAAC;QACnG,CAAC;aAAM,CAAC;YACN,OAAO,aAAa,MAAM,mBAAmB,UAAU,oCAAoC,WAAW,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC;QAC9H,CAAC;IACH,CAAC;IAED;;OAEG;IACK,eAAe,CAAC,WAA8B;QACpD,MAAM,OAAO,GAAa,EAAE,CAAC;QAE7B,iBAAiB;QACjB,OAAO,CAAC,IAAI,CAAC,WAAW,CAAC,SAAS,CAAC,CAAC;QAEpC,2BAA2B;QAC3B,IAAI,WAAW,CAAC,uBAAuB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACnD,MAAM,GAAG,GAAG,WAAW,CAAC,uBAAuB,CAAC,CAAC,CAAC,CAAC;YACnD,MAAM,WAAW,GAAG,WAAW,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,MAAM;gBACtF,WAAW,CAAC,uBAAuB,CAAC,MAAM,CAAC;YAE7C,OAAO,CAAC,IAAI,CACV,6BAA6B,WAAW,CAAC,uBAAuB,CAAC,MAAM,4BAA4B;gBACnG,UAAU,CAAC,WAAW,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,uDAAuD;gBAC/F,IAAI,GAAG,CAAC,QAAQ,2BAA2B,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CACpE,CAAC;QACJ,CAAC;aAAM,CAAC;YACN,OAAO,CAAC,IAAI,CACV,yFAAyF;gBACzF,gCAAgC,CACjC,CAAC;QACJ,CAAC;QAED,uBAAuB;QACvB,MAAM,UAAU,GAAG,WAAW,CAAC,eAAe,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QAC3D,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC1B,MAAM,UAAU,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,KAAK,GAAG,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC5E,OAAO,CAAC,IAAI,CAAC,yBAAyB,UAAU,GAAG,CAAC,CAAC;QACvD,CAAC;QAED,eAAe;QACf,IAAI,WAAW,CAAC,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACxC,MAAM,GAAG,GAAG,WAAW,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;YACxC,OAAO,CAAC,IAAI,CACV,kCAAkC,GAAG,CAAC,MAAM,CAAC,QAAQ,kBAAkB,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI;gBAChG,+BAA+B,GAAG,CAAC,MAAM,GAAG,CAC7C,CAAC;QACJ,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAED;;OAEG;IACK,eAAe,CAAC,WAA8B;QACpD,MAAM,OAAO,GAA2B,EAAE,CAAC;QAE3C,OAAO,CAAC,eAAe,CAAC,GAAG,WAAW,CAAC,YAAY,KAAK,aAAa,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,cAAc,CAAC;QACvG,OAAO,CAAC,SAAS,CAAC,GAAG,WAAW,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;QACnD,OAAO,CAAC,YAAY,CAAC,GAAG,GAAG,CAAC,WAAW,CAAC,UAAU,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC;QACxE,OAAO,CAAC,UAAU,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,QAAQ,CAAC;QAC1D,OAAO,CAAC,qBAAqB,CAAC,GAAG,WAAW,CAAC,uBAAuB,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC;QAEvF,IAAI,WAAW,CAAC,uBAAuB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACnD,MAAM,YAAY,GAAG,WAAW,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,MAAM,CAAC;YAC1F,OAAO,CAAC,cAAc,CAAC,GAAG,GAAG,CAAC,CAAC,YAAY,GAAG,WAAW,CAAC,uBAAuB,CAAC,MAAM,CAAC,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC;QACjH,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;IAED;;OAEG;IACK,uBAAuB,CAAC,WAA8B;QAC5D,MAAM,eAAe,GAAa,EAAE,CAAC;QAErC,gCAAgC;QAChC,IAAI,WAAW,CAAC,UAAU,GAAG,GAAG,EAAE,CAAC;YACjC,eAAe,CAAC,IAAI,CAClB,sFAAsF,CACvF,CAAC;QACJ,CAAC;QAED,oCAAoC;QACpC,IAAI,WAAW,CAAC,uBAAuB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACnD,eAAe,CAAC,IAAI,CAClB,sFAAsF,CACvF,CAAC;QACJ,CAAC;QAED,6BAA6B;QAC7B,IAAI,WAAW,CAAC,YAAY,KAAK,aAAa,EAAE,CAAC;YAC/C,eAAe,CAAC,IAAI,CAClB,8FAA8F,CAC/F,CAAC;QACJ,CAAC;QAED,iCAAiC;QACjC,IAAI,WAAW,CAAC,UAAU,GAAG,GAAG,EAAE,CAAC;YACjC,eAAe,CAAC,IAAI,CAClB,sEAAsE,CACvE,CAAC;QACJ,CAAC;QAED,yBAAyB;QACzB,IAAI,WAAW,CAAC,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACxC,MAAM,GAAG,GAAG,WAAW,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;YACxC,IAAI,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,MAAM,GAAG,WAAW,CAAC,MAAM,CAAC,GAAG,GAAG,EAAE,CAAC;gBACpD,eAAe,CAAC,IAAI,CAClB,yBAAyB,GAAG,CAAC,MAAM,CAAC,QAAQ,8DAA8D,CAC3G,CAAC;YACJ,CAAC;QACH,CAAC;QAED,OAAO,eAAe,CAAC;IACzB,CAAC;IAED;;OAEG;IACK,WAAW,CAAC,KAAgB;QAClC,MAAM,QAAQ,GAAG;YACf,KAAK,CAAC,cAAc;YACpB,KAAK,CAAC,oBAAoB,CAAC,MAAM,GAAG,EAAE;YACtC,KAAK,CAAC,gBAAgB,GAAG,CAAC;YAC1B,KAAK,CAAC,kBAAkB;YACxB,KAAK,CAAC,cAAc,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,cAAc,GAAG,MAAM,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;SACtE,CAAC;QACF,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;IAC9D,CAAC;IAED;;OAEG;IACK,YAAY,CAAC,MAAmB;QACtC,OAAO,GAAG,MAAM,CAAC,QAAQ,IAAI,MAAM,CAAC,eAAe,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,MAAM,CAAC,WAAW,EAAE,CAAC;IACzF,CAAC;CACF;AA9kBD,kDA8kBC"}
|