agentdb 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +104 -0
- package/README.md +5 -5
- package/bin/agentdb.js +296 -65
- package/dist/mcp/learning/core/experience-buffer.d.ts +61 -0
- package/dist/mcp/learning/core/experience-buffer.d.ts.map +1 -0
- package/dist/mcp/learning/core/experience-buffer.js +175 -0
- package/dist/mcp/learning/core/experience-buffer.js.map +1 -0
- package/dist/mcp/learning/core/experience-buffer.mjs +170 -0
- package/dist/mcp/learning/core/experience-recorder.d.ts +40 -0
- package/dist/mcp/learning/core/experience-recorder.d.ts.map +1 -0
- package/dist/mcp/learning/core/experience-recorder.js +200 -0
- package/dist/mcp/learning/core/experience-recorder.js.map +1 -0
- package/dist/mcp/learning/core/experience-recorder.mjs +195 -0
- package/dist/mcp/learning/core/learning-manager.d.ts +66 -0
- package/dist/mcp/learning/core/learning-manager.d.ts.map +1 -0
- package/dist/mcp/learning/core/learning-manager.js +252 -0
- package/dist/mcp/learning/core/learning-manager.js.map +1 -0
- package/dist/mcp/learning/core/learning-manager.mjs +247 -0
- package/dist/mcp/learning/core/policy-optimizer.d.ts +53 -0
- package/dist/mcp/learning/core/policy-optimizer.d.ts.map +1 -0
- package/dist/mcp/learning/core/policy-optimizer.js +251 -0
- package/dist/mcp/learning/core/policy-optimizer.js.map +1 -0
- package/dist/mcp/learning/core/policy-optimizer.mjs +246 -0
- package/dist/mcp/learning/core/reward-estimator.d.ts +44 -0
- package/dist/mcp/learning/core/reward-estimator.d.ts.map +1 -0
- package/dist/mcp/learning/core/reward-estimator.js +158 -0
- package/dist/mcp/learning/core/reward-estimator.js.map +1 -0
- package/dist/mcp/learning/core/reward-estimator.mjs +153 -0
- package/dist/mcp/learning/core/session-manager.d.ts +63 -0
- package/dist/mcp/learning/core/session-manager.d.ts.map +1 -0
- package/dist/mcp/learning/core/session-manager.js +202 -0
- package/dist/mcp/learning/core/session-manager.js.map +1 -0
- package/dist/mcp/learning/core/session-manager.mjs +197 -0
- package/dist/mcp/learning/index.d.ts +19 -0
- package/dist/mcp/learning/index.d.ts.map +1 -0
- package/dist/mcp/learning/index.js +30 -0
- package/dist/mcp/learning/index.js.map +1 -0
- package/dist/mcp/learning/index.mjs +19 -0
- package/dist/mcp/learning/tools/mcp-learning-tools.d.ts +369 -0
- package/dist/mcp/learning/tools/mcp-learning-tools.d.ts.map +1 -0
- package/dist/mcp/learning/tools/mcp-learning-tools.js +361 -0
- package/dist/mcp/learning/tools/mcp-learning-tools.js.map +1 -0
- package/dist/mcp/learning/tools/mcp-learning-tools.mjs +356 -0
- package/dist/mcp/learning/types/index.d.ts +138 -0
- package/dist/mcp/learning/types/index.d.ts.map +1 -0
- package/dist/mcp/learning/types/index.js +6 -0
- package/dist/mcp/learning/types/index.js.map +1 -0
- package/dist/mcp/learning/types/index.mjs +4 -0
- package/dist/mcp-server.d.ts +2 -0
- package/dist/mcp-server.d.ts.map +1 -1
- package/dist/mcp-server.js +72 -4
- package/dist/mcp-server.js.map +1 -1
- package/dist/mcp-server.mjs +72 -4
- package/dist/wasm/sql-wasm-debug.js +6989 -0
- package/dist/wasm/sql-wasm-debug.wasm +0 -0
- package/dist/wasm/sql-wasm.js +188 -0
- package/dist/wasm/sql-wasm.wasm +0 -0
- package/dist/wasm-loader.d.ts.map +1 -1
- package/dist/wasm-loader.js +5 -2
- package/dist/wasm-loader.js.map +1 -1
- package/dist/wasm-loader.mjs +5 -2
- package/examples/mcp-learning-example.ts +220 -0
- package/package.json +26 -5
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ExperienceRecorder - Captures and stores learning experiences
|
|
3
|
+
*/
|
|
4
|
+
import { RewardEstimator } from './reward-estimator.mjs';
|
|
5
|
+
export class ExperienceRecorder {
|
|
6
|
+
constructor(db) {
|
|
7
|
+
this.actionCounter = 0;
|
|
8
|
+
this.db = db;
|
|
9
|
+
this.rewardEstimator = new RewardEstimator();
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Record a tool execution as a learning experience
|
|
13
|
+
*/
|
|
14
|
+
async recordToolExecution(toolName, args, result, context, outcome) {
|
|
15
|
+
const state = await this.captureState(context);
|
|
16
|
+
const action = {
|
|
17
|
+
tool: toolName,
|
|
18
|
+
params: args,
|
|
19
|
+
timestamp: Date.now(),
|
|
20
|
+
};
|
|
21
|
+
const reward = await this.rewardEstimator.calculateReward(outcome, context);
|
|
22
|
+
const nextState = await this.captureState({
|
|
23
|
+
...context,
|
|
24
|
+
isTerminal: outcome.success || !!outcome.error,
|
|
25
|
+
});
|
|
26
|
+
const experience = {
|
|
27
|
+
state,
|
|
28
|
+
action,
|
|
29
|
+
reward: reward.combined,
|
|
30
|
+
nextState,
|
|
31
|
+
done: context.isTerminal,
|
|
32
|
+
timestamp: Date.now(),
|
|
33
|
+
metadata: {
|
|
34
|
+
userId: context.userId,
|
|
35
|
+
sessionId: context.sessionId,
|
|
36
|
+
taskType: context.taskType,
|
|
37
|
+
actionId: `action_${this.actionCounter++}`,
|
|
38
|
+
rewardBreakdown: reward,
|
|
39
|
+
outcome: {
|
|
40
|
+
success: outcome.success,
|
|
41
|
+
executionTime: outcome.executionTime,
|
|
42
|
+
tokensUsed: outcome.tokensUsed,
|
|
43
|
+
},
|
|
44
|
+
},
|
|
45
|
+
};
|
|
46
|
+
// Store experience in vector database
|
|
47
|
+
await this.storeExperience(experience);
|
|
48
|
+
return experience;
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Capture current state representation
|
|
52
|
+
*/
|
|
53
|
+
async captureState(context) {
|
|
54
|
+
const state = {
|
|
55
|
+
taskDescription: context.metadata?.taskDescription || '',
|
|
56
|
+
availableTools: context.metadata?.availableTools || [],
|
|
57
|
+
previousActions: context.metadata?.previousActions || [],
|
|
58
|
+
constraints: context.metadata?.constraints,
|
|
59
|
+
context: {
|
|
60
|
+
sessionId: context.sessionId,
|
|
61
|
+
taskType: context.taskType,
|
|
62
|
+
timestamp: context.timestamp,
|
|
63
|
+
},
|
|
64
|
+
};
|
|
65
|
+
// Generate state embedding for similarity search
|
|
66
|
+
state.embedding = await this.generateStateEmbedding(state);
|
|
67
|
+
return state;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Generate vector embedding for state
|
|
71
|
+
*/
|
|
72
|
+
async generateStateEmbedding(state) {
|
|
73
|
+
// Simple hash-based embedding (in production, use a proper embedding model)
|
|
74
|
+
const text = JSON.stringify({
|
|
75
|
+
task: state.taskDescription,
|
|
76
|
+
tools: state.availableTools,
|
|
77
|
+
type: state.context?.taskType,
|
|
78
|
+
});
|
|
79
|
+
// Create a simple hash-based embedding (768 dimensions)
|
|
80
|
+
const embedding = new Float32Array(768);
|
|
81
|
+
for (let i = 0; i < text.length; i++) {
|
|
82
|
+
const index = text.charCodeAt(i) % 768;
|
|
83
|
+
embedding[index] += 1;
|
|
84
|
+
}
|
|
85
|
+
// Normalize
|
|
86
|
+
const magnitude = Math.sqrt(embedding.reduce((sum, val) => sum + val * val, 0));
|
|
87
|
+
if (magnitude > 0) {
|
|
88
|
+
for (let i = 0; i < embedding.length; i++) {
|
|
89
|
+
embedding[i] /= magnitude;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
return embedding;
|
|
93
|
+
}
|
|
94
|
+
/**
|
|
95
|
+
* Store experience in vector database
|
|
96
|
+
*/
|
|
97
|
+
async storeExperience(experience) {
|
|
98
|
+
if (!experience.state.embedding) {
|
|
99
|
+
throw new Error('State embedding is required');
|
|
100
|
+
}
|
|
101
|
+
await this.db.insert({
|
|
102
|
+
embedding: Array.from(experience.state.embedding),
|
|
103
|
+
metadata: {
|
|
104
|
+
type: 'learning_experience',
|
|
105
|
+
sessionId: experience.metadata.sessionId,
|
|
106
|
+
userId: experience.metadata.userId,
|
|
107
|
+
taskType: experience.metadata.taskType,
|
|
108
|
+
actionId: experience.metadata.actionId,
|
|
109
|
+
action: experience.action,
|
|
110
|
+
reward: experience.reward,
|
|
111
|
+
done: experience.done,
|
|
112
|
+
timestamp: experience.timestamp,
|
|
113
|
+
state: {
|
|
114
|
+
taskDescription: experience.state.taskDescription,
|
|
115
|
+
availableTools: experience.state.availableTools,
|
|
116
|
+
previousActionsCount: experience.state.previousActions.length,
|
|
117
|
+
},
|
|
118
|
+
outcome: experience.metadata.outcome,
|
|
119
|
+
rewardBreakdown: experience.metadata.rewardBreakdown,
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Retrieve similar experiences
|
|
125
|
+
*/
|
|
126
|
+
async retrieveSimilarExperiences(state, k = 10) {
|
|
127
|
+
if (!state.embedding) {
|
|
128
|
+
state.embedding = await this.generateStateEmbedding(state);
|
|
129
|
+
}
|
|
130
|
+
const results = await this.db.search(Array.from(state.embedding), k);
|
|
131
|
+
return results.map((result) => ({
|
|
132
|
+
state: {
|
|
133
|
+
taskDescription: result.metadata.state.taskDescription,
|
|
134
|
+
availableTools: result.metadata.state.availableTools,
|
|
135
|
+
previousActions: [],
|
|
136
|
+
embedding: result.embedding,
|
|
137
|
+
},
|
|
138
|
+
action: result.metadata.action,
|
|
139
|
+
reward: result.metadata.reward,
|
|
140
|
+
nextState: {
|
|
141
|
+
taskDescription: '',
|
|
142
|
+
availableTools: [],
|
|
143
|
+
previousActions: [],
|
|
144
|
+
},
|
|
145
|
+
done: result.metadata.done,
|
|
146
|
+
timestamp: result.metadata.timestamp,
|
|
147
|
+
metadata: {
|
|
148
|
+
userId: result.metadata.userId,
|
|
149
|
+
sessionId: result.metadata.sessionId,
|
|
150
|
+
taskType: result.metadata.taskType,
|
|
151
|
+
actionId: result.metadata.actionId,
|
|
152
|
+
},
|
|
153
|
+
}));
|
|
154
|
+
}
|
|
155
|
+
/**
|
|
156
|
+
* Get experiences by session
|
|
157
|
+
*/
|
|
158
|
+
async getSessionExperiences(sessionId) {
|
|
159
|
+
// Query by metadata filter
|
|
160
|
+
const allResults = await this.db.search(Array(768).fill(0), 1000);
|
|
161
|
+
// Filter by session ID
|
|
162
|
+
const sessionResults = allResults.filter((result) => result.metadata.sessionId === sessionId);
|
|
163
|
+
return sessionResults.map((result) => ({
|
|
164
|
+
state: {
|
|
165
|
+
taskDescription: result.metadata.state.taskDescription,
|
|
166
|
+
availableTools: result.metadata.state.availableTools,
|
|
167
|
+
previousActions: [],
|
|
168
|
+
embedding: result.embedding,
|
|
169
|
+
},
|
|
170
|
+
action: result.metadata.action,
|
|
171
|
+
reward: result.metadata.reward,
|
|
172
|
+
nextState: {
|
|
173
|
+
taskDescription: '',
|
|
174
|
+
availableTools: [],
|
|
175
|
+
previousActions: [],
|
|
176
|
+
},
|
|
177
|
+
done: result.metadata.done,
|
|
178
|
+
timestamp: result.metadata.timestamp,
|
|
179
|
+
metadata: {
|
|
180
|
+
userId: result.metadata.userId,
|
|
181
|
+
sessionId: result.metadata.sessionId,
|
|
182
|
+
taskType: result.metadata.taskType,
|
|
183
|
+
actionId: result.metadata.actionId,
|
|
184
|
+
},
|
|
185
|
+
}));
|
|
186
|
+
}
|
|
187
|
+
/**
|
|
188
|
+
* Update experience with feedback
|
|
189
|
+
*/
|
|
190
|
+
async updateExperienceReward(actionId, feedbackReward) {
|
|
191
|
+
// In a real implementation, this would update the stored experience
|
|
192
|
+
// For now, we log the feedback
|
|
193
|
+
console.log(`Updated reward for ${actionId}: ${feedbackReward}`);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LearningManager - Main orchestration layer for MCP learning integration
|
|
3
|
+
*/
|
|
4
|
+
import type { SQLiteVectorDB } from '../../../core/vector-db.js';
|
|
5
|
+
import type { Experience, Outcome, ActionPrediction, TrainingOptions, TrainingMetrics, LearningSession, FeedbackInput, LearningMetrics, TransferMetrics, State } from '../types/index.js';
|
|
6
|
+
export declare class LearningManager {
|
|
7
|
+
private db;
|
|
8
|
+
private experienceRecorder;
|
|
9
|
+
private rewardEstimator;
|
|
10
|
+
private sessionManager;
|
|
11
|
+
private policyOptimizers;
|
|
12
|
+
constructor(db: SQLiteVectorDB);
|
|
13
|
+
/**
|
|
14
|
+
* Start a new learning session
|
|
15
|
+
*/
|
|
16
|
+
startSession(userId: string, sessionType: 'coding' | 'research' | 'debugging' | 'general', plugin?: string, config?: Record<string, any>): Promise<LearningSession>;
|
|
17
|
+
/**
|
|
18
|
+
* End a learning session
|
|
19
|
+
*/
|
|
20
|
+
endSession(sessionId: string): Promise<LearningSession>;
|
|
21
|
+
/**
|
|
22
|
+
* Record a tool execution as learning experience
|
|
23
|
+
*/
|
|
24
|
+
recordExperience(sessionId: string, toolName: string, args: any, result: any, outcome: Outcome): Promise<Experience>;
|
|
25
|
+
/**
|
|
26
|
+
* Predict next best action
|
|
27
|
+
*/
|
|
28
|
+
predictAction(sessionId: string, currentState: State, availableTools: string[]): Promise<ActionPrediction>;
|
|
29
|
+
/**
|
|
30
|
+
* Provide user feedback on action
|
|
31
|
+
*/
|
|
32
|
+
provideFeedback(sessionId: string, actionId: string, feedback: FeedbackInput): Promise<void>;
|
|
33
|
+
/**
|
|
34
|
+
* Train policy on collected experiences
|
|
35
|
+
*/
|
|
36
|
+
train(sessionId: string, options?: TrainingOptions): Promise<TrainingMetrics>;
|
|
37
|
+
/**
|
|
38
|
+
* Get learning metrics
|
|
39
|
+
*/
|
|
40
|
+
getMetrics(sessionId: string, period?: 'session' | 'day' | 'week' | 'month' | 'all'): Promise<LearningMetrics>;
|
|
41
|
+
/**
|
|
42
|
+
* Transfer learning between tasks
|
|
43
|
+
*/
|
|
44
|
+
transferLearning(sourceSessionId: string, targetSessionId: string, similarity?: number): Promise<TransferMetrics>;
|
|
45
|
+
/**
|
|
46
|
+
* Explain a prediction
|
|
47
|
+
*/
|
|
48
|
+
explainPrediction(sessionId: string, state: State): Promise<{
|
|
49
|
+
reasoning: string;
|
|
50
|
+
similarExperiences: Experience[];
|
|
51
|
+
confidenceFactors: Record<string, number>;
|
|
52
|
+
}>;
|
|
53
|
+
/**
|
|
54
|
+
* Calculate consistency of actions in similar experiences
|
|
55
|
+
*/
|
|
56
|
+
private calculateConsistency;
|
|
57
|
+
/**
|
|
58
|
+
* Get session info
|
|
59
|
+
*/
|
|
60
|
+
getSessionInfo(sessionId: string): LearningSession | undefined;
|
|
61
|
+
/**
|
|
62
|
+
* Restore sessions from database
|
|
63
|
+
*/
|
|
64
|
+
restoreSessions(userId?: string): Promise<LearningSession[]>;
|
|
65
|
+
}
|
|
66
|
+
//# sourceMappingURL=learning-manager.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"learning-manager.d.ts","sourceRoot":"","sources":["../../../../src/mcp/learning/core/learning-manager.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,4BAA4B,CAAC;AACjE,OAAO,KAAK,EACV,UAAU,EAEV,OAAO,EACP,gBAAgB,EAChB,eAAe,EACf,eAAe,EACf,eAAe,EACf,aAAa,EACb,eAAe,EACf,eAAe,EACf,KAAK,EACN,MAAM,mBAAmB,CAAC;AAM3B,qBAAa,eAAe;IAC1B,OAAO,CAAC,EAAE,CAAiB;IAC3B,OAAO,CAAC,kBAAkB,CAAqB;IAC/C,OAAO,CAAC,eAAe,CAAkB;IACzC,OAAO,CAAC,cAAc,CAAiB;IACvC,OAAO,CAAC,gBAAgB,CAA2C;gBAEvD,EAAE,EAAE,cAAc;IAO9B;;OAEG;IACG,YAAY,CAChB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,QAAQ,GAAG,UAAU,GAAG,WAAW,GAAG,SAAS,EAC5D,MAAM,GAAE,MAAqB,EAC7B,MAAM,GAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAM,GAC/B,OAAO,CAAC,eAAe,CAAC;IAmB3B;;OAEG;IACG,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,CAAC;IAc7D;;OAEG;IACG,gBAAgB,CACpB,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,MAAM,EAChB,IAAI,EAAE,GAAG,EACT,MAAM,EAAE,GAAG,EACX,OAAO,EAAE,OAAO,GACf,OAAO,CAAC,UAAU,CAAC;IAkCtB;;OAEG;IACG,aAAa,CACjB,SAAS,EAAE,MAAM,EACjB,YAAY,EAAE,KAAK,EACnB,cAAc,EAAE,MAAM,EAAE,GACvB,OAAO,CAAC,gBAAgB,CAAC;IAS5B;;OAEG;IACG,eAAe,CACnB,SAAS,EAAE,MAAM,EACjB,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,aAAa,GACtB,OAAO,CAAC,IAAI,CAAC;IAUhB;;OAEG;IACG,KAAK,CACT,SAAS,EAAE,MAAM,EACjB,OAAO,GAAE,eAAoB,GAC5B,OAAO,CAAC,eAAe,CAAC;IAS3B;;OAEG;IACG,UAAU,CACd,SAAS,EAAE,MAAM,EACjB,MAAM,GAAE,SAAS,GAAG,KAAK,GAAG,MAAM,GAAG,OAAO,GAAG,KAAiB,GAC/D,OAAO,CAAC,eAAe,CAAC;IAkF3B;;OAEG;IACG,gBAAgB,CACpB,eAAe,EAAE,MAAM,EACvB,eAAe,EAAE,MAAM,EACvB,UAAU,GAAE,MAAY,GACvB,OAAO,CAAC,eAAe,CAAC;IA6C3B;;OAEG;IACG,iBAAiB,CACrB,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,KAAK,GACX,OAAO,CAAC;QACT,SAAS,EAAE,MAAM,CAAC;QAClB,kBAAkB,EAAE,UAAU,EAAE,CAAC;QACjC,iBAAiB,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;KAC3C,CAAC;IAuBF;;OAEG;IACH,OAAO,CAAC,oBAAoB;IAa5B;;OAEG;IACH,cAAc,CAAC,SAAS,EAAE,MAAM,GAAG,eAAe,GAAG,SAAS;IAI9D;;OAEG;IACG,eAAe,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,EAAE,CAAC;CAGnE"}
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LearningManager - Main orchestration layer for MCP learning integration
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.LearningManager = void 0;
|
|
7
|
+
const experience_recorder_js_1 = require("./experience-recorder.js");
|
|
8
|
+
const reward_estimator_js_1 = require("./reward-estimator.js");
|
|
9
|
+
const session_manager_js_1 = require("./session-manager.js");
|
|
10
|
+
const policy_optimizer_js_1 = require("./policy-optimizer.js");
|
|
11
|
+
class LearningManager {
|
|
12
|
+
constructor(db) {
|
|
13
|
+
this.policyOptimizers = new Map();
|
|
14
|
+
this.db = db;
|
|
15
|
+
this.experienceRecorder = new experience_recorder_js_1.ExperienceRecorder(db);
|
|
16
|
+
this.rewardEstimator = new reward_estimator_js_1.RewardEstimator();
|
|
17
|
+
this.sessionManager = new session_manager_js_1.SessionManager(db);
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Start a new learning session
|
|
21
|
+
*/
|
|
22
|
+
async startSession(userId, sessionType, plugin = 'q-learning', config = {}) {
|
|
23
|
+
const session = await this.sessionManager.createSession(userId, sessionType, plugin, config);
|
|
24
|
+
// Initialize policy optimizer for this session
|
|
25
|
+
const optimizer = new policy_optimizer_js_1.PolicyOptimizer(config.learningRate || 0.1, config.discountFactor || 0.95, config.bufferSize || 10000);
|
|
26
|
+
this.policyOptimizers.set(session.sessionId, optimizer);
|
|
27
|
+
return session;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* End a learning session
|
|
31
|
+
*/
|
|
32
|
+
async endSession(sessionId) {
|
|
33
|
+
// Export and save policy BEFORE ending session
|
|
34
|
+
const optimizer = this.policyOptimizers.get(sessionId);
|
|
35
|
+
if (optimizer) {
|
|
36
|
+
const policy = optimizer.exportPolicy();
|
|
37
|
+
await this.sessionManager.updateSessionPolicy(sessionId, policy);
|
|
38
|
+
this.policyOptimizers.delete(sessionId);
|
|
39
|
+
}
|
|
40
|
+
// Now end the session
|
|
41
|
+
const session = await this.sessionManager.endSession(sessionId);
|
|
42
|
+
return session;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Record a tool execution as learning experience
|
|
46
|
+
*/
|
|
47
|
+
async recordExperience(sessionId, toolName, args, result, outcome) {
|
|
48
|
+
const session = this.sessionManager.getSession(sessionId);
|
|
49
|
+
if (!session) {
|
|
50
|
+
throw new Error(`Session ${sessionId} not found`);
|
|
51
|
+
}
|
|
52
|
+
const context = {
|
|
53
|
+
userId: session.userId,
|
|
54
|
+
sessionId: session.sessionId,
|
|
55
|
+
taskType: session.sessionType,
|
|
56
|
+
timestamp: Date.now(),
|
|
57
|
+
isTerminal: outcome.success || !!outcome.error,
|
|
58
|
+
};
|
|
59
|
+
const experience = await this.experienceRecorder.recordToolExecution(toolName, args, result, context, outcome);
|
|
60
|
+
// Update policy with new experience
|
|
61
|
+
const optimizer = this.policyOptimizers.get(sessionId);
|
|
62
|
+
if (optimizer) {
|
|
63
|
+
await optimizer.updatePolicy(experience);
|
|
64
|
+
}
|
|
65
|
+
// Increment session experience count
|
|
66
|
+
this.sessionManager.incrementExperienceCount(sessionId);
|
|
67
|
+
return experience;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Predict next best action
|
|
71
|
+
*/
|
|
72
|
+
async predictAction(sessionId, currentState, availableTools) {
|
|
73
|
+
const optimizer = this.policyOptimizers.get(sessionId);
|
|
74
|
+
if (!optimizer) {
|
|
75
|
+
throw new Error(`No policy optimizer for session ${sessionId}`);
|
|
76
|
+
}
|
|
77
|
+
return await optimizer.predictAction(currentState, availableTools);
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Provide user feedback on action
|
|
81
|
+
*/
|
|
82
|
+
async provideFeedback(sessionId, actionId, feedback) {
|
|
83
|
+
// Calculate feedback-adjusted reward
|
|
84
|
+
const normalizedRating = feedback.rating / 5.0; // Assume 0-5 scale
|
|
85
|
+
await this.experienceRecorder.updateExperienceReward(actionId, normalizedRating);
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Train policy on collected experiences
|
|
89
|
+
*/
|
|
90
|
+
async train(sessionId, options = {}) {
|
|
91
|
+
const optimizer = this.policyOptimizers.get(sessionId);
|
|
92
|
+
if (!optimizer) {
|
|
93
|
+
throw new Error(`No policy optimizer for session ${sessionId}`);
|
|
94
|
+
}
|
|
95
|
+
return await optimizer.train(options);
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Get learning metrics
|
|
99
|
+
*/
|
|
100
|
+
async getMetrics(sessionId, period = 'session') {
|
|
101
|
+
const experiences = await this.experienceRecorder.getSessionExperiences(sessionId);
|
|
102
|
+
if (experiences.length === 0) {
|
|
103
|
+
return {
|
|
104
|
+
period,
|
|
105
|
+
totalExperiences: 0,
|
|
106
|
+
averageReward: 0,
|
|
107
|
+
successRate: 0,
|
|
108
|
+
learningProgress: {
|
|
109
|
+
initial: 0,
|
|
110
|
+
current: 0,
|
|
111
|
+
improvement: '0%',
|
|
112
|
+
},
|
|
113
|
+
topActions: [],
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
// Calculate metrics
|
|
117
|
+
const rewards = experiences.map((exp) => exp.reward);
|
|
118
|
+
const avgReward = rewards.reduce((sum, r) => sum + r, 0) / rewards.length;
|
|
119
|
+
const successCount = experiences.filter((exp) => exp.reward > 0.5).length;
|
|
120
|
+
const successRate = successCount / experiences.length;
|
|
121
|
+
// Calculate learning progress (first 10 vs last 10)
|
|
122
|
+
const firstBatch = experiences.slice(0, 10);
|
|
123
|
+
const lastBatch = experiences.slice(-10);
|
|
124
|
+
const initialReward = firstBatch.reduce((sum, exp) => sum + exp.reward, 0) / firstBatch.length;
|
|
125
|
+
const currentReward = lastBatch.reduce((sum, exp) => sum + exp.reward, 0) / lastBatch.length;
|
|
126
|
+
const improvement = initialReward > 0
|
|
127
|
+
? (((currentReward - initialReward) / initialReward) * 100).toFixed(1)
|
|
128
|
+
: '0';
|
|
129
|
+
// Calculate top actions
|
|
130
|
+
const actionStats = new Map();
|
|
131
|
+
for (const exp of experiences) {
|
|
132
|
+
const tool = exp.action.tool;
|
|
133
|
+
const stats = actionStats.get(tool) || {
|
|
134
|
+
count: 0,
|
|
135
|
+
totalReward: 0,
|
|
136
|
+
successCount: 0,
|
|
137
|
+
};
|
|
138
|
+
stats.count++;
|
|
139
|
+
stats.totalReward += exp.reward;
|
|
140
|
+
if (exp.reward > 0.5)
|
|
141
|
+
stats.successCount++;
|
|
142
|
+
actionStats.set(tool, stats);
|
|
143
|
+
}
|
|
144
|
+
const topActions = Array.from(actionStats.entries())
|
|
145
|
+
.map(([tool, stats]) => ({
|
|
146
|
+
tool,
|
|
147
|
+
successRate: stats.successCount / stats.count,
|
|
148
|
+
avgReward: stats.totalReward / stats.count,
|
|
149
|
+
count: stats.count,
|
|
150
|
+
}))
|
|
151
|
+
.sort((a, b) => b.avgReward - a.avgReward)
|
|
152
|
+
.slice(0, 5);
|
|
153
|
+
return {
|
|
154
|
+
period,
|
|
155
|
+
totalExperiences: experiences.length,
|
|
156
|
+
averageReward: avgReward,
|
|
157
|
+
successRate,
|
|
158
|
+
learningProgress: {
|
|
159
|
+
initial: initialReward,
|
|
160
|
+
current: currentReward,
|
|
161
|
+
improvement: `${improvement}%`,
|
|
162
|
+
},
|
|
163
|
+
topActions,
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Transfer learning between tasks
|
|
168
|
+
*/
|
|
169
|
+
async transferLearning(sourceSessionId, targetSessionId, similarity = 0.7) {
|
|
170
|
+
const sourceOptimizer = this.policyOptimizers.get(sourceSessionId);
|
|
171
|
+
const targetOptimizer = this.policyOptimizers.get(targetSessionId);
|
|
172
|
+
if (!sourceOptimizer || !targetOptimizer) {
|
|
173
|
+
throw new Error('Source or target session not found');
|
|
174
|
+
}
|
|
175
|
+
// Export source policy
|
|
176
|
+
const sourcePolicy = sourceOptimizer.exportPolicy();
|
|
177
|
+
// Import into target (with similarity-based weighting)
|
|
178
|
+
const targetPolicy = targetOptimizer.exportPolicy();
|
|
179
|
+
// Merge policies (simplified - in production would use more sophisticated transfer)
|
|
180
|
+
const mergedQTable = { ...targetPolicy.qTable };
|
|
181
|
+
for (const [stateKey, actions] of Object.entries(sourcePolicy.qTable)) {
|
|
182
|
+
if (!mergedQTable[stateKey]) {
|
|
183
|
+
mergedQTable[stateKey] = {};
|
|
184
|
+
}
|
|
185
|
+
for (const [action, value] of Object.entries(actions)) {
|
|
186
|
+
const currentValue = mergedQTable[stateKey][action] || 0;
|
|
187
|
+
// Weighted average based on similarity
|
|
188
|
+
mergedQTable[stateKey][action] =
|
|
189
|
+
currentValue * (1 - similarity) + value * similarity;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
targetOptimizer.importPolicy({ ...targetPolicy, qTable: mergedQTable });
|
|
193
|
+
const sourceSession = this.sessionManager.getSession(sourceSessionId);
|
|
194
|
+
const targetSession = this.sessionManager.getSession(targetSessionId);
|
|
195
|
+
return {
|
|
196
|
+
sourceTask: sourceSession?.sessionType || 'unknown',
|
|
197
|
+
targetTask: targetSession?.sessionType || 'unknown',
|
|
198
|
+
similarity,
|
|
199
|
+
transferSuccess: true,
|
|
200
|
+
performanceGain: similarity * 0.3, // Estimated gain
|
|
201
|
+
experiencesTransferred: Object.keys(sourcePolicy.qTable).length,
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* Explain a prediction
|
|
206
|
+
*/
|
|
207
|
+
async explainPrediction(sessionId, state) {
|
|
208
|
+
// Get similar experiences
|
|
209
|
+
const similarExperiences = await this.experienceRecorder.retrieveSimilarExperiences(state, 5);
|
|
210
|
+
// Calculate confidence factors
|
|
211
|
+
const confidenceFactors = {
|
|
212
|
+
experienceCount: Math.min(1.0, similarExperiences.length / 10),
|
|
213
|
+
avgReward: similarExperiences.reduce((sum, exp) => sum + exp.reward, 0) /
|
|
214
|
+
(similarExperiences.length || 1),
|
|
215
|
+
consistency: this.calculateConsistency(similarExperiences),
|
|
216
|
+
};
|
|
217
|
+
const reasoning = `Based on ${similarExperiences.length} similar past experiences with average reward ${confidenceFactors.avgReward.toFixed(2)}. Action consistency: ${(confidenceFactors.consistency * 100).toFixed(0)}%.`;
|
|
218
|
+
return {
|
|
219
|
+
reasoning,
|
|
220
|
+
similarExperiences,
|
|
221
|
+
confidenceFactors,
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
/**
|
|
225
|
+
* Calculate consistency of actions in similar experiences
|
|
226
|
+
*/
|
|
227
|
+
calculateConsistency(experiences) {
|
|
228
|
+
if (experiences.length === 0)
|
|
229
|
+
return 0;
|
|
230
|
+
const actionCounts = new Map();
|
|
231
|
+
for (const exp of experiences) {
|
|
232
|
+
const tool = exp.action.tool;
|
|
233
|
+
actionCounts.set(tool, (actionCounts.get(tool) || 0) + 1);
|
|
234
|
+
}
|
|
235
|
+
const maxCount = Math.max(...Array.from(actionCounts.values()));
|
|
236
|
+
return maxCount / experiences.length;
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Get session info
|
|
240
|
+
*/
|
|
241
|
+
getSessionInfo(sessionId) {
|
|
242
|
+
return this.sessionManager.getSession(sessionId);
|
|
243
|
+
}
|
|
244
|
+
/**
|
|
245
|
+
* Restore sessions from database
|
|
246
|
+
*/
|
|
247
|
+
async restoreSessions(userId) {
|
|
248
|
+
return await this.sessionManager.restoreSessions(userId);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
exports.LearningManager = LearningManager;
|
|
252
|
+
//# sourceMappingURL=learning-manager.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"learning-manager.js","sourceRoot":"","sources":["../../../../src/mcp/learning/core/learning-manager.ts"],"names":[],"mappings":";AAAA;;GAEG;;;AAgBH,qEAA8D;AAC9D,+DAAwD;AACxD,6DAAsD;AACtD,+DAAwD;AAExD,MAAa,eAAe;IAO1B,YAAY,EAAkB;QAFtB,qBAAgB,GAAiC,IAAI,GAAG,EAAE,CAAC;QAGjE,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;QACb,IAAI,CAAC,kBAAkB,GAAG,IAAI,2CAAkB,CAAC,EAAE,CAAC,CAAC;QACrD,IAAI,CAAC,eAAe,GAAG,IAAI,qCAAe,EAAE,CAAC;QAC7C,IAAI,CAAC,cAAc,GAAG,IAAI,mCAAc,CAAC,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,YAAY,CAChB,MAAc,EACd,WAA4D,EAC5D,SAAiB,YAAY,EAC7B,SAA8B,EAAE;QAEhC,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,cAAc,CAAC,aAAa,CACrD,MAAM,EACN,WAAW,EACX,MAAM,EACN,MAAM,CACP,CAAC;QAEF,+CAA+C;QAC/C,MAAM,SAAS,GAAG,IAAI,qCAAe,CACnC,MAAM,CAAC,YAAY,IAAI,GAAG,EAC1B,MAAM,CAAC,cAAc,IAAI,IAAI,EAC7B,MAAM,CAAC,UAAU,IAAI,KAAK,CAC3B,CAAC;QACF,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,OAAO,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC;QAExD,OAAO,OAAO,CAAC;IACjB,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,UAAU,CAAC,SAAiB;QAChC,+CAA+C;QAC/C,MAAM,SAAS,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QACvD,IAAI,SAAS,EAAE,CAAC;YACd,MAAM,MAAM,GAAG,SAAS,CAAC,YAAY,EAAE,CAAC;YACxC,MAAM,IAAI,CAAC,cAAc,CAAC,mBAAmB,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;YACjE,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;QAC1C,CAAC;QAED,sBAAsB;QACtB,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;QAChE,OAAO,OAAO,CAAC;IACjB,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,gBAAgB,CACpB,SAAiB,EACjB,QAAgB,EAChB,IAAS,EACT,MAAW,EACX,OAAgB;QAEhB,MAAM,OAAO,GAAG,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;QAC1D,IAAI,CAAC,OAAO,EAAE,CAAC;YACb,MAAM,IAAI,KAAK,CAAC,WAAW,SAAS,YAAY,CAAC,CAAC;QACpD,CAAC;QAED,MAAM,OAAO,GAAqB;YAChC,MAAM,EAAE,OAAO,CAAC,MAAM;YACtB,SAAS,EAAE,OAAO,CAAC,SAAS;YAC5B,QAAQ,EAAE,OAAO,CAAC,WAAW;YAC7B,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;YACrB,UAAU,EAAE,OAAO,CAAC,OAAO,IAAI,CAAC,CAAC,OAAO,CAAC,KAAK;SAC/C,CAAC;QAEF,MAAM,UAAU,GAAG,MAAM,IAAI,CAAC,kBAAkB,CAAC,mBAAmB,CAClE,QAAQ,EACR,IAAI,EACJ,MAAM,EACN,OAAO,EACP,OAAO,CACR,CAAC;QAEF,oCAAoC;QACpC,MAAM,SAAS,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QACvD,IAAI,SAAS,EAAE,CAAC;YACd,MAAM,SAAS,CAAC,YAAY,CAAC,UAAU,CAAC,CAAC;QAC3C,CAAC;QAED,qCAAqC;QACrC,IAAI,CAAC,cAAc,CAAC,wBAAwB,CAAC,SAAS,CAAC,CAAC;QAExD,OAAO,UAAU,CAAC;IACpB,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,aAAa,CACjB,SAAiB,EACjB,YAAmB,EACnB,cAAwB;QAExB,MAAM,SAAS,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QACvD,IAAI,CAAC,SAAS,EAAE,CAAC;YACf,MAAM,IAAI,KAAK,CAAC,mCAAmC,SAAS,EAAE,CAAC,CAAC;QAClE,CAAC;QAED,OAAO,MAAM,SAAS,CAAC,aAAa,CAAC,YAAY,EAAE,cAAc,CAAC,CAAC;IACrE,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,eAAe,CACnB,SAAiB,EACjB,QAAgB,EAChB,QAAuB;QAEvB,qCAAqC;QACrC,MAAM,gBAAgB,GAAG,QAAQ,CAAC,MAAM,GAAG,GAAG,CAAC,CAAC,mBAAmB;QAEnE,MAAM,IAAI,CAAC,kBAAkB,CAAC,sBAAsB,CAClD,QAAQ,EACR,gBAAgB,CACjB,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK,CACT,SAAiB,EACjB,UAA2B,EAAE;QAE7B,MAAM,SAAS,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,SAAS,CAAC,CAAC;QACvD,IAAI,CAAC,SAAS,EAAE,CAAC;YACf,MAAM,IAAI,KAAK,CAAC,mCAAmC,SAAS,EAAE,CAAC,CAAC;QAClE,CAAC;QAED,OAAO,MAAM,SAAS,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;IACxC,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,UAAU,CACd,SAAiB,EACjB,SAAuD,SAAS;QAEhE,MAAM,WAAW,GAAG,MAAM,IAAI,CAAC,kBAAkB,CAAC,qBAAqB,CACrE,SAAS,CACV,CAAC;QAEF,IAAI,WAAW,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC7B,OAAO;gBACL,MAAM;gBACN,gBAAgB,EAAE,CAAC;gBACnB,aAAa,EAAE,CAAC;gBAChB,WAAW,EAAE,CAAC;gBACd,gBAAgB,EAAE;oBAChB,OAAO,EAAE,CAAC;oBACV,OAAO,EAAE,CAAC;oBACV,WAAW,EAAE,IAAI;iBAClB;gBACD,UAAU,EAAE,EAAE;aACf,CAAC;QACJ,CAAC;QAED,oBAAoB;QACpB,MAAM,OAAO,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;QACrD,MAAM,SAAS,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,CAAC,GAAG,GAAG,CAAC,EAAE,CAAC,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC;QAE1E,MAAM,YAAY,GAAG,WAAW,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,GAAG,CAAC,MAAM,GAAG,GAAG,CAAC,CAAC,MAAM,CAAC;QAC1E,MAAM,WAAW,GAAG,YAAY,GAAG,WAAW,CAAC,MAAM,CAAC;QAEtD,oDAAoD;QACpD,MAAM,UAAU,GAAG,WAAW,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;QAC5C,MAAM,SAAS,GAAG,WAAW,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QACzC,MAAM,aAAa,GACjB,UAAU,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,UAAU,CAAC,MAAM,CAAC;QAC3E,MAAM,aAAa,GACjB,SAAS,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC,GAAG,SAAS,CAAC,MAAM,CAAC;QACzE,MAAM,WAAW,GACf,aAAa,GAAG,CAAC;YACf,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,GAAG,aAAa,CAAC,GAAG,aAAa,CAAC,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC;YACtE,CAAC,CAAC,GAAG,CAAC;QAEV,wBAAwB;QACxB,MAAM,WAAW,GAAG,IAAI,GAAG,EAGxB,CAAC;QAEJ,KAAK,MAAM,GAAG,IAAI,WAAW,EAAE,CAAC;YAC9B,MAAM,IAAI,GAAG,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC;YAC7B,MAAM,KAAK,GAAG,WAAW,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI;gBACrC,KAAK,EAAE,CAAC;gBACR,WAAW,EAAE,CAAC;gBACd,YAAY,EAAE,CAAC;aAChB,CAAC;YACF,KAAK,CAAC,KAAK,EAAE,CAAC;YACd,KAAK,CAAC,WAAW,IAAI,GAAG,CAAC,MAAM,CAAC;YAChC,IAAI,GAAG,CAAC,MAAM,GAAG,GAAG;gBAAE,KAAK,CAAC,YAAY,EAAE,CAAC;YAC3C,WAAW,CAAC,GAAG,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC;QAC/B,CAAC;QAED,MAAM,UAAU,GAAG,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,OAAO,EAAE,CAAC;aACjD,GAAG,CAAC,CAAC,CAAC,IAAI,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC;YACvB,IAAI;YACJ,WAAW,EAAE,KAAK,CAAC,YAAY,GAAG,KAAK,CAAC,KAAK;YAC7C,SAAS,EAAE,KAAK,CAAC,WAAW,GAAG,KAAK,CAAC,KAAK;YAC1C,KAAK,EAAE,KAAK,CAAC,KAAK;SACnB,CAAC,CAAC;aACF,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,SAAS,GAAG,CAAC,CAAC,SAAS,CAAC;aACzC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QAEf,OAAO;YACL,MAAM;YACN,gBAAgB,EAAE,WAAW,CAAC,MAAM;YACpC,aAAa,EAAE,SAAS;YACxB,WAAW;YACX,gBAAgB,EAAE;gBAChB,OAAO,EAAE,aAAa;gBACtB,OAAO,EAAE,aAAa;gBACtB,WAAW,EAAE,GAAG,WAAW,GAAG;aAC/B;YACD,UAAU;SACX,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,gBAAgB,CACpB,eAAuB,EACvB,eAAuB,EACvB,aAAqB,GAAG;QAExB,MAAM,eAAe,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,eAAe,CAAC,CAAC;QACnE,MAAM,eAAe,GAAG,IAAI,CAAC,gBAAgB,CAAC,GAAG,CAAC,eAAe,CAAC,CAAC;QAEnE,IAAI,CAAC,eAAe,IAAI,CAAC,eAAe,EAAE,CAAC;YACzC,MAAM,IAAI,KAAK,CAAC,oCAAoC,CAAC,CAAC;QACxD,CAAC;QAED,uBAAuB;QACvB,MAAM,YAAY,GAAG,eAAe,CAAC,YAAY,EAAE,CAAC;QAEpD,uDAAuD;QACvD,MAAM,YAAY,GAAG,eAAe,CAAC,YAAY,EAAE,CAAC;QAEpD,oFAAoF;QACpF,MAAM,YAAY,GAAQ,EAAE,GAAG,YAAY,CAAC,MAAM,EAAE,CAAC;QAErD,KAAK,MAAM,CAAC,QAAQ,EAAE,OAAO,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,YAAY,CAAC,MAAM,CAAC,EAAE,CAAC;YACtE,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAC,EAAE,CAAC;gBAC5B,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC;YAC9B,CAAC;YAED,KAAK,MAAM,CAAC,MAAM,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAc,CAAC,EAAE,CAAC;gBAC7D,MAAM,YAAY,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;gBACzD,uCAAuC;gBACvC,YAAY,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC;oBAC5B,YAAY,GAAG,CAAC,CAAC,GAAG,UAAU,CAAC,GAAI,KAAgB,GAAG,UAAU,CAAC;YACrE,CAAC;QACH,CAAC;QAED,eAAe,CAAC,YAAY,CAAC,EAAE,GAAG,YAAY,EAAE,MAAM,EAAE,YAAY,EAAE,CAAC,CAAC;QAExE,MAAM,aAAa,GAAG,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,eAAe,CAAC,CAAC;QACtE,MAAM,aAAa,GAAG,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,eAAe,CAAC,CAAC;QAEtE,OAAO;YACL,UAAU,EAAE,aAAa,EAAE,WAAW,IAAI,SAAS;YACnD,UAAU,EAAE,aAAa,EAAE,WAAW,IAAI,SAAS;YACnD,UAAU;YACV,eAAe,EAAE,IAAI;YACrB,eAAe,EAAE,UAAU,GAAG,GAAG,EAAE,iBAAiB;YACpD,sBAAsB,EAAE,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,MAAM,CAAC,CAAC,MAAM;SAChE,CAAC;IACJ,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,iBAAiB,CACrB,SAAiB,EACjB,KAAY;QAMZ,0BAA0B;QAC1B,MAAM,kBAAkB,GACtB,MAAM,IAAI,CAAC,kBAAkB,CAAC,0BAA0B,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC;QAErE,+BAA+B;QAC/B,MAAM,iBAAiB,GAA2B;YAChD,eAAe,EAAE,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,kBAAkB,CAAC,MAAM,GAAG,EAAE,CAAC;YAC9D,SAAS,EACP,kBAAkB,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE,CAAC,GAAG,GAAG,GAAG,CAAC,MAAM,EAAE,CAAC,CAAC;gBAC5D,CAAC,kBAAkB,CAAC,MAAM,IAAI,CAAC,CAAC;YAClC,WAAW,EAAE,IAAI,CAAC,oBAAoB,CAAC,kBAAkB,CAAC;SAC3D,CAAC;QAEF,MAAM,SAAS,GAAG,YAAY,kBAAkB,CAAC,MAAM,iDAAiD,iBAAiB,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,CAAC,yBAAyB,CAAC,iBAAiB,CAAC,WAAW,GAAG,GAAG,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC;QAE5N,OAAO;YACL,SAAS;YACT,kBAAkB;YAClB,iBAAiB;SAClB,CAAC;IACJ,CAAC;IAED;;OAEG;IACK,oBAAoB,CAAC,WAAyB;QACpD,IAAI,WAAW,CAAC,MAAM,KAAK,CAAC;YAAE,OAAO,CAAC,CAAC;QAEvC,MAAM,YAAY,GAAG,IAAI,GAAG,EAAkB,CAAC;QAC/C,KAAK,MAAM,GAAG,IAAI,WAAW,EAAE,CAAC;YAC9B,MAAM,IAAI,GAAG,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC;YAC7B,YAAY,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC,YAAY,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;QAC5D,CAAC;QAED,MAAM,QAAQ,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC,IAAI,CAAC,YAAY,CAAC,MAAM,EAAE,CAAC,CAAC,CAAC;QAChE,OAAO,QAAQ,GAAG,WAAW,CAAC,MAAM,CAAC;IACvC,CAAC;IAED;;OAEG;IACH,cAAc,CAAC,SAAiB;QAC9B,OAAO,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,SAAS,CAAC,CAAC;IACnD,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,eAAe,CAAC,MAAe;QACnC,OAAO,MAAM,IAAI,CAAC,cAAc,CAAC,eAAe,CAAC,MAAM,CAAC,CAAC;IAC3D,CAAC;CACF;AA/VD,0CA+VC"}
|