claude-flow 2.0.0-alpha.66 → 2.0.0-alpha.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/cache/agent-pool.json +33 -0
- package/.claude/cache/memory-optimization.json +19 -0
- package/.claude/cache/neural-optimization.json +25 -0
- package/.claude/cache/optimized-hooks.json +19 -0
- package/.claude/cache/parallel-processing.json +26 -0
- package/.claude/optimized-settings.json +270 -0
- package/.claude/settings-backup.json +186 -0
- package/.claude/settings-enhanced.json +278 -0
- package/.claude/settings-fixed.json +186 -0
- package/.claude/settings.json +105 -8
- package/CHANGELOG.md +38 -0
- package/bin/claude-flow +1 -1
- package/dist/cli/simple-commands/hive-mind.js +1 -1
- package/dist/cli/simple-commands/hive-mind.js.map +1 -1
- package/dist/cli/simple-commands/hooks.js +6 -4
- package/dist/cli/simple-commands/hooks.js.map +1 -1
- package/dist/providers/anthropic-provider.d.ts +27 -0
- package/dist/providers/anthropic-provider.d.ts.map +1 -0
- package/dist/providers/anthropic-provider.js +247 -0
- package/dist/providers/anthropic-provider.js.map +1 -0
- package/dist/providers/base-provider.d.ts +134 -0
- package/dist/providers/base-provider.d.ts.map +1 -0
- package/dist/providers/base-provider.js +407 -0
- package/dist/providers/base-provider.js.map +1 -0
- package/dist/providers/cohere-provider.d.ts +28 -0
- package/dist/providers/cohere-provider.d.ts.map +1 -0
- package/dist/providers/cohere-provider.js +407 -0
- package/dist/providers/cohere-provider.js.map +1 -0
- package/dist/providers/google-provider.d.ts +23 -0
- package/dist/providers/google-provider.d.ts.map +1 -0
- package/dist/providers/google-provider.js +362 -0
- package/dist/providers/google-provider.js.map +1 -0
- package/dist/providers/index.d.ts +14 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +18 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/ollama-provider.d.ts +23 -0
- package/dist/providers/ollama-provider.d.ts.map +1 -0
- package/dist/providers/ollama-provider.js +374 -0
- package/dist/providers/ollama-provider.js.map +1 -0
- package/dist/providers/openai-provider.d.ts +23 -0
- package/dist/providers/openai-provider.d.ts.map +1 -0
- package/dist/providers/openai-provider.js +349 -0
- package/dist/providers/openai-provider.js.map +1 -0
- package/dist/providers/provider-manager.d.ts +139 -0
- package/dist/providers/provider-manager.d.ts.map +1 -0
- package/dist/providers/provider-manager.js +513 -0
- package/dist/providers/provider-manager.js.map +1 -0
- package/dist/providers/types.d.ts +356 -0
- package/dist/providers/types.d.ts.map +1 -0
- package/dist/providers/types.js +61 -0
- package/dist/providers/types.js.map +1 -0
- package/dist/providers/utils.d.ts +37 -0
- package/dist/providers/utils.d.ts.map +1 -0
- package/dist/providers/utils.js +322 -0
- package/dist/providers/utils.js.map +1 -0
- package/dist/services/agentic-flow-hooks/hook-manager.d.ts +70 -0
- package/dist/services/agentic-flow-hooks/hook-manager.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/hook-manager.js +512 -0
- package/dist/services/agentic-flow-hooks/hook-manager.js.map +1 -0
- package/dist/services/agentic-flow-hooks/index.d.ts +36 -0
- package/dist/services/agentic-flow-hooks/index.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/index.js +325 -0
- package/dist/services/agentic-flow-hooks/index.js.map +1 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.d.ts +33 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.js +415 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.d.ts +45 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.js +532 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.d.ts +39 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.js +561 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.d.ts +33 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.js +621 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/types.d.ts +379 -0
- package/dist/services/agentic-flow-hooks/types.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/types.js +8 -0
- package/dist/services/agentic-flow-hooks/types.js.map +1 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts +39 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.js +742 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.js.map +1 -0
- package/package.json +1 -1
- package/scripts/optimize-performance.js +400 -0
- package/scripts/performance-monitor.js +263 -0
- package/src/cli/help-text.js +1 -1
- package/src/cli/simple-cli.js +1 -1
- package/src/cli/simple-commands/hive-mind.js +1 -1
- package/src/providers/anthropic-provider.ts +282 -0
- package/src/providers/base-provider.ts +560 -0
- package/src/providers/cohere-provider.ts +521 -0
- package/src/providers/google-provider.ts +477 -0
- package/src/providers/index.ts +21 -0
- package/src/providers/ollama-provider.ts +489 -0
- package/src/providers/openai-provider.ts +476 -0
- package/src/providers/provider-manager.ts +654 -0
- package/src/providers/types.ts +531 -0
- package/src/providers/utils.ts +376 -0
- package/src/services/agentic-flow-hooks/hook-manager.ts +701 -0
- package/src/services/agentic-flow-hooks/index.ts +386 -0
- package/src/services/agentic-flow-hooks/llm-hooks.ts +557 -0
- package/src/services/agentic-flow-hooks/memory-hooks.ts +710 -0
- package/src/services/agentic-flow-hooks/neural-hooks.ts +758 -0
- package/src/services/agentic-flow-hooks/performance-hooks.ts +827 -0
- package/src/services/agentic-flow-hooks/types.ts +503 -0
- package/src/services/agentic-flow-hooks/workflow-hooks.ts +1026 -0
|
@@ -0,0 +1,758 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Neural training hooks for agentic-flow
|
|
3
|
+
*
|
|
4
|
+
* Enables learning from multi-model responses with
|
|
5
|
+
* pattern detection and adaptive optimization.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { agenticHookManager } from './hook-manager.js';
|
|
9
|
+
import type {
|
|
10
|
+
AgenticHookContext,
|
|
11
|
+
HookHandlerResult,
|
|
12
|
+
NeuralHookPayload,
|
|
13
|
+
Pattern,
|
|
14
|
+
TrainingData,
|
|
15
|
+
Prediction,
|
|
16
|
+
Adaptation,
|
|
17
|
+
SideEffect,
|
|
18
|
+
} from './types.js';
|
|
19
|
+
|
|
20
|
+
// ===== Pre-Neural Train Hook =====
|
|
21
|
+
|
|
22
|
+
export const preNeuralTrainHook = {
|
|
23
|
+
id: 'agentic-pre-neural-train',
|
|
24
|
+
type: 'pre-neural-train' as const,
|
|
25
|
+
priority: 100,
|
|
26
|
+
handler: async (
|
|
27
|
+
payload: NeuralHookPayload,
|
|
28
|
+
context: AgenticHookContext
|
|
29
|
+
): Promise<HookHandlerResult> => {
|
|
30
|
+
const { operation, modelId, trainingData } = payload;
|
|
31
|
+
|
|
32
|
+
if (operation !== 'train' || !trainingData) {
|
|
33
|
+
return { continue: true };
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const sideEffects: SideEffect[] = [];
|
|
37
|
+
|
|
38
|
+
// Validate training data
|
|
39
|
+
const validation = validateTrainingData(trainingData);
|
|
40
|
+
if (!validation.valid) {
|
|
41
|
+
return {
|
|
42
|
+
continue: false,
|
|
43
|
+
sideEffects: [
|
|
44
|
+
{
|
|
45
|
+
type: 'log',
|
|
46
|
+
action: 'write',
|
|
47
|
+
data: {
|
|
48
|
+
level: 'error',
|
|
49
|
+
message: 'Invalid training data',
|
|
50
|
+
data: validation,
|
|
51
|
+
},
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Augment training data with historical patterns
|
|
58
|
+
const augmentedData = await augmentTrainingData(
|
|
59
|
+
trainingData,
|
|
60
|
+
modelId,
|
|
61
|
+
context
|
|
62
|
+
);
|
|
63
|
+
|
|
64
|
+
// Balance dataset if needed
|
|
65
|
+
const balancedData = balanceTrainingData(augmentedData);
|
|
66
|
+
|
|
67
|
+
// Apply data preprocessing
|
|
68
|
+
const preprocessedData = preprocessTrainingData(balancedData);
|
|
69
|
+
|
|
70
|
+
// Store training session metadata
|
|
71
|
+
sideEffects.push({
|
|
72
|
+
type: 'memory',
|
|
73
|
+
action: 'store',
|
|
74
|
+
data: {
|
|
75
|
+
key: `neural:training:${modelId}:${Date.now()}`,
|
|
76
|
+
value: {
|
|
77
|
+
originalSize: trainingData.inputs.length,
|
|
78
|
+
augmentedSize: augmentedData.inputs.length,
|
|
79
|
+
balancedSize: balancedData.inputs.length,
|
|
80
|
+
epochs: balancedData.epochs,
|
|
81
|
+
timestamp: Date.now(),
|
|
82
|
+
},
|
|
83
|
+
ttl: 86400, // 24 hours
|
|
84
|
+
},
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
return {
|
|
88
|
+
continue: true,
|
|
89
|
+
modified: true,
|
|
90
|
+
payload: {
|
|
91
|
+
...payload,
|
|
92
|
+
trainingData: preprocessedData,
|
|
93
|
+
},
|
|
94
|
+
sideEffects,
|
|
95
|
+
};
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
// ===== Post-Neural Train Hook =====
|
|
100
|
+
|
|
101
|
+
export const postNeuralTrainHook = {
|
|
102
|
+
id: 'agentic-post-neural-train',
|
|
103
|
+
type: 'post-neural-train' as const,
|
|
104
|
+
priority: 100,
|
|
105
|
+
handler: async (
|
|
106
|
+
payload: NeuralHookPayload,
|
|
107
|
+
context: AgenticHookContext
|
|
108
|
+
): Promise<HookHandlerResult> => {
|
|
109
|
+
const { modelId, accuracy, trainingData } = payload;
|
|
110
|
+
|
|
111
|
+
const sideEffects: SideEffect[] = [];
|
|
112
|
+
|
|
113
|
+
// Store training results
|
|
114
|
+
const trainingResult = {
|
|
115
|
+
modelId,
|
|
116
|
+
accuracy,
|
|
117
|
+
timestamp: Date.now(),
|
|
118
|
+
sessionId: context.sessionId,
|
|
119
|
+
dataSize: trainingData?.inputs.length || 0,
|
|
120
|
+
epochs: trainingData?.epochs || 0,
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
sideEffects.push({
|
|
124
|
+
type: 'memory',
|
|
125
|
+
action: 'store',
|
|
126
|
+
data: {
|
|
127
|
+
key: `neural:results:${modelId}:${Date.now()}`,
|
|
128
|
+
value: trainingResult,
|
|
129
|
+
ttl: 604800, // 7 days
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
// Update model performance history
|
|
134
|
+
await updateModelPerformance(modelId, accuracy, context);
|
|
135
|
+
|
|
136
|
+
// Check if model should be promoted
|
|
137
|
+
const shouldPromote = await evaluateModelPromotion(modelId, accuracy, context);
|
|
138
|
+
if (shouldPromote) {
|
|
139
|
+
sideEffects.push({
|
|
140
|
+
type: 'notification',
|
|
141
|
+
action: 'emit',
|
|
142
|
+
data: {
|
|
143
|
+
event: 'neural:model:promoted',
|
|
144
|
+
data: { modelId, accuracy },
|
|
145
|
+
},
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Extract learned patterns
|
|
150
|
+
const patterns = await extractLearnedPatterns(modelId, context);
|
|
151
|
+
if (patterns.length > 0) {
|
|
152
|
+
sideEffects.push({
|
|
153
|
+
type: 'neural',
|
|
154
|
+
action: 'store-patterns',
|
|
155
|
+
data: { patterns },
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
return {
|
|
160
|
+
continue: true,
|
|
161
|
+
sideEffects,
|
|
162
|
+
};
|
|
163
|
+
},
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
// ===== Neural Pattern Detected Hook =====
|
|
167
|
+
|
|
168
|
+
export const neuralPatternDetectedHook = {
|
|
169
|
+
id: 'agentic-neural-pattern-detected',
|
|
170
|
+
type: 'neural-pattern-detected' as const,
|
|
171
|
+
priority: 90,
|
|
172
|
+
handler: async (
|
|
173
|
+
payload: NeuralHookPayload,
|
|
174
|
+
context: AgenticHookContext
|
|
175
|
+
): Promise<HookHandlerResult> => {
|
|
176
|
+
const { patterns } = payload;
|
|
177
|
+
|
|
178
|
+
if (!patterns || patterns.length === 0) {
|
|
179
|
+
return { continue: true };
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
const sideEffects: SideEffect[] = [];
|
|
183
|
+
|
|
184
|
+
// Analyze pattern significance
|
|
185
|
+
for (const pattern of patterns) {
|
|
186
|
+
const significance = calculatePatternSignificance(pattern);
|
|
187
|
+
|
|
188
|
+
if (significance > 0.7) {
|
|
189
|
+
// High significance pattern
|
|
190
|
+
sideEffects.push({
|
|
191
|
+
type: 'memory',
|
|
192
|
+
action: 'store',
|
|
193
|
+
data: {
|
|
194
|
+
key: `pattern:significant:${pattern.id}`,
|
|
195
|
+
value: {
|
|
196
|
+
pattern,
|
|
197
|
+
significance,
|
|
198
|
+
detectedAt: Date.now(),
|
|
199
|
+
context: context.metadata,
|
|
200
|
+
},
|
|
201
|
+
ttl: 0, // Permanent
|
|
202
|
+
},
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
// Trigger adaptation if needed
|
|
206
|
+
const adaptation = await generateAdaptation(pattern, context);
|
|
207
|
+
if (adaptation) {
|
|
208
|
+
sideEffects.push({
|
|
209
|
+
type: 'neural',
|
|
210
|
+
action: 'adapt',
|
|
211
|
+
data: { adaptation },
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// Update pattern store
|
|
217
|
+
context.neural.patterns.add(pattern);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// Check for pattern combinations
|
|
221
|
+
const combinations = findPatternCombinations(patterns, context);
|
|
222
|
+
if (combinations.length > 0) {
|
|
223
|
+
sideEffects.push({
|
|
224
|
+
type: 'log',
|
|
225
|
+
action: 'write',
|
|
226
|
+
data: {
|
|
227
|
+
level: 'info',
|
|
228
|
+
message: 'Pattern combinations detected',
|
|
229
|
+
data: { combinations },
|
|
230
|
+
},
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
continue: true,
|
|
236
|
+
sideEffects,
|
|
237
|
+
};
|
|
238
|
+
},
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
// ===== Neural Prediction Hook =====
|
|
242
|
+
|
|
243
|
+
export const neuralPredictionHook = {
|
|
244
|
+
id: 'agentic-neural-prediction',
|
|
245
|
+
type: 'neural-prediction' as const,
|
|
246
|
+
priority: 100,
|
|
247
|
+
handler: async (
|
|
248
|
+
payload: NeuralHookPayload,
|
|
249
|
+
context: AgenticHookContext
|
|
250
|
+
): Promise<HookHandlerResult> => {
|
|
251
|
+
const { prediction, modelId } = payload;
|
|
252
|
+
|
|
253
|
+
if (!prediction) {
|
|
254
|
+
return { continue: true };
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const sideEffects: SideEffect[] = [];
|
|
258
|
+
|
|
259
|
+
// Validate prediction confidence
|
|
260
|
+
if (prediction.confidence < 0.5) {
|
|
261
|
+
// Low confidence - consider alternatives
|
|
262
|
+
const alternatives = await generateAlternatives(
|
|
263
|
+
prediction.input,
|
|
264
|
+
modelId,
|
|
265
|
+
context
|
|
266
|
+
);
|
|
267
|
+
|
|
268
|
+
if (alternatives.length > 0) {
|
|
269
|
+
return {
|
|
270
|
+
continue: true,
|
|
271
|
+
modified: true,
|
|
272
|
+
payload: {
|
|
273
|
+
...payload,
|
|
274
|
+
prediction: {
|
|
275
|
+
...prediction,
|
|
276
|
+
alternatives: [...prediction.alternatives, ...alternatives],
|
|
277
|
+
},
|
|
278
|
+
},
|
|
279
|
+
sideEffects: [
|
|
280
|
+
{
|
|
281
|
+
type: 'metric',
|
|
282
|
+
action: 'increment',
|
|
283
|
+
data: { name: 'neural.predictions.low_confidence' },
|
|
284
|
+
},
|
|
285
|
+
],
|
|
286
|
+
};
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Store prediction for future training
|
|
291
|
+
sideEffects.push({
|
|
292
|
+
type: 'memory',
|
|
293
|
+
action: 'store',
|
|
294
|
+
data: {
|
|
295
|
+
key: `prediction:${modelId}:${Date.now()}`,
|
|
296
|
+
value: {
|
|
297
|
+
input: prediction.input,
|
|
298
|
+
output: prediction.output,
|
|
299
|
+
confidence: prediction.confidence,
|
|
300
|
+
timestamp: Date.now(),
|
|
301
|
+
},
|
|
302
|
+
ttl: 86400, // 24 hours
|
|
303
|
+
},
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
// Track prediction metrics
|
|
307
|
+
sideEffects.push({
|
|
308
|
+
type: 'metric',
|
|
309
|
+
action: 'update',
|
|
310
|
+
data: {
|
|
311
|
+
name: `neural.predictions.confidence.${modelId}`,
|
|
312
|
+
value: prediction.confidence,
|
|
313
|
+
},
|
|
314
|
+
});
|
|
315
|
+
|
|
316
|
+
return {
|
|
317
|
+
continue: true,
|
|
318
|
+
sideEffects,
|
|
319
|
+
};
|
|
320
|
+
},
|
|
321
|
+
};
|
|
322
|
+
|
|
323
|
+
// ===== Neural Adaptation Hook =====
|
|
324
|
+
|
|
325
|
+
export const neuralAdaptationHook = {
|
|
326
|
+
id: 'agentic-neural-adaptation',
|
|
327
|
+
type: 'neural-adaptation' as const,
|
|
328
|
+
priority: 90,
|
|
329
|
+
handler: async (
|
|
330
|
+
payload: NeuralHookPayload,
|
|
331
|
+
context: AgenticHookContext
|
|
332
|
+
): Promise<HookHandlerResult> => {
|
|
333
|
+
const { adaptations, modelId } = payload;
|
|
334
|
+
|
|
335
|
+
if (!adaptations || adaptations.length === 0) {
|
|
336
|
+
return { continue: true };
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
const sideEffects: SideEffect[] = [];
|
|
340
|
+
|
|
341
|
+
// Validate adaptations
|
|
342
|
+
const validAdaptations = adaptations.filter(a =>
|
|
343
|
+
validateAdaptation(a, modelId, context)
|
|
344
|
+
);
|
|
345
|
+
|
|
346
|
+
if (validAdaptations.length === 0) {
|
|
347
|
+
return { continue: true };
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Apply adaptations in order of impact
|
|
351
|
+
const sortedAdaptations = validAdaptations.sort((a, b) =>
|
|
352
|
+
Math.abs(b.impact) - Math.abs(a.impact)
|
|
353
|
+
);
|
|
354
|
+
|
|
355
|
+
for (const adaptation of sortedAdaptations) {
|
|
356
|
+
// Store adaptation history
|
|
357
|
+
sideEffects.push({
|
|
358
|
+
type: 'memory',
|
|
359
|
+
action: 'store',
|
|
360
|
+
data: {
|
|
361
|
+
key: `adaptation:${modelId}:${adaptation.target}:${Date.now()}`,
|
|
362
|
+
value: adaptation,
|
|
363
|
+
ttl: 604800, // 7 days
|
|
364
|
+
},
|
|
365
|
+
});
|
|
366
|
+
|
|
367
|
+
// Apply adaptation based on type
|
|
368
|
+
switch (adaptation.type) {
|
|
369
|
+
case 'parameter':
|
|
370
|
+
await applyParameterAdaptation(adaptation, modelId, context);
|
|
371
|
+
break;
|
|
372
|
+
|
|
373
|
+
case 'architecture':
|
|
374
|
+
await applyArchitectureAdaptation(adaptation, modelId, context);
|
|
375
|
+
break;
|
|
376
|
+
|
|
377
|
+
case 'strategy':
|
|
378
|
+
await applyStrategyAdaptation(adaptation, modelId, context);
|
|
379
|
+
break;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
// Track adaptation metrics
|
|
383
|
+
sideEffects.push({
|
|
384
|
+
type: 'metric',
|
|
385
|
+
action: 'increment',
|
|
386
|
+
data: { name: `neural.adaptations.${adaptation.type}` },
|
|
387
|
+
});
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
// Trigger retraining if significant adaptations
|
|
391
|
+
const totalImpact = sortedAdaptations.reduce((sum, a) =>
|
|
392
|
+
sum + Math.abs(a.impact), 0
|
|
393
|
+
);
|
|
394
|
+
|
|
395
|
+
if (totalImpact > 0.5) {
|
|
396
|
+
sideEffects.push({
|
|
397
|
+
type: 'neural',
|
|
398
|
+
action: 'retrain',
|
|
399
|
+
data: {
|
|
400
|
+
modelId,
|
|
401
|
+
reason: 'significant_adaptations',
|
|
402
|
+
adaptations: sortedAdaptations.length,
|
|
403
|
+
},
|
|
404
|
+
});
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
return {
|
|
408
|
+
continue: true,
|
|
409
|
+
sideEffects,
|
|
410
|
+
};
|
|
411
|
+
},
|
|
412
|
+
};
|
|
413
|
+
|
|
414
|
+
// ===== Helper Functions =====
|
|
415
|
+
|
|
416
|
+
function validateTrainingData(data: TrainingData): { valid: boolean; errors?: string[] } {
|
|
417
|
+
const errors: string[] = [];
|
|
418
|
+
|
|
419
|
+
if (!data.inputs || data.inputs.length === 0) {
|
|
420
|
+
errors.push('No input data provided');
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
if (!data.outputs || data.outputs.length === 0) {
|
|
424
|
+
errors.push('No output data provided');
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
if (data.inputs.length !== data.outputs.length) {
|
|
428
|
+
errors.push('Input and output lengths do not match');
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
if (data.batchSize <= 0) {
|
|
432
|
+
errors.push('Invalid batch size');
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
if (data.epochs <= 0) {
|
|
436
|
+
errors.push('Invalid number of epochs');
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
return {
|
|
440
|
+
valid: errors.length === 0,
|
|
441
|
+
errors: errors.length > 0 ? errors : undefined,
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
async function augmentTrainingData(
|
|
446
|
+
data: TrainingData,
|
|
447
|
+
modelId: string,
|
|
448
|
+
context: AgenticHookContext
|
|
449
|
+
): Promise<TrainingData> {
|
|
450
|
+
// Augment with historical successful patterns
|
|
451
|
+
const historicalPatterns = await loadHistoricalPatterns(modelId, context);
|
|
452
|
+
|
|
453
|
+
const augmented: TrainingData = {
|
|
454
|
+
...data,
|
|
455
|
+
inputs: [...data.inputs],
|
|
456
|
+
outputs: [...data.outputs],
|
|
457
|
+
labels: data.labels ? [...data.labels] : undefined,
|
|
458
|
+
weights: data.weights ? [...data.weights] : undefined,
|
|
459
|
+
};
|
|
460
|
+
|
|
461
|
+
// Add successful patterns
|
|
462
|
+
for (const pattern of historicalPatterns) {
|
|
463
|
+
if (pattern.type === 'success' && pattern.confidence > 0.8) {
|
|
464
|
+
augmented.inputs.push(pattern.context.input);
|
|
465
|
+
augmented.outputs.push(pattern.context.output);
|
|
466
|
+
|
|
467
|
+
if (augmented.weights) {
|
|
468
|
+
// Give higher weight to successful patterns
|
|
469
|
+
augmented.weights.push(pattern.confidence);
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
return augmented;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
function balanceTrainingData(data: TrainingData): TrainingData {
|
|
478
|
+
// Balance dataset to prevent bias
|
|
479
|
+
if (!data.labels) {
|
|
480
|
+
return data;
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
// Count occurrences of each label
|
|
484
|
+
const labelCounts = new Map<string, number>();
|
|
485
|
+
for (const label of data.labels) {
|
|
486
|
+
labelCounts.set(label, (labelCounts.get(label) || 0) + 1);
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
// Find minimum count
|
|
490
|
+
const minCount = Math.min(...labelCounts.values());
|
|
491
|
+
|
|
492
|
+
// Balance by undersampling
|
|
493
|
+
const balanced: TrainingData = {
|
|
494
|
+
...data,
|
|
495
|
+
inputs: [],
|
|
496
|
+
outputs: [],
|
|
497
|
+
labels: [],
|
|
498
|
+
weights: data.weights ? [] : undefined,
|
|
499
|
+
};
|
|
500
|
+
|
|
501
|
+
const labelIndices = new Map<string, number[]>();
|
|
502
|
+
data.labels.forEach((label, i) => {
|
|
503
|
+
if (!labelIndices.has(label)) {
|
|
504
|
+
labelIndices.set(label, []);
|
|
505
|
+
}
|
|
506
|
+
labelIndices.get(label)!.push(i);
|
|
507
|
+
});
|
|
508
|
+
|
|
509
|
+
// Sample equally from each label
|
|
510
|
+
for (const [label, indices] of labelIndices.entries()) {
|
|
511
|
+
const sampled = indices
|
|
512
|
+
.sort(() => Math.random() - 0.5)
|
|
513
|
+
.slice(0, minCount);
|
|
514
|
+
|
|
515
|
+
for (const idx of sampled) {
|
|
516
|
+
balanced.inputs.push(data.inputs[idx]);
|
|
517
|
+
balanced.outputs.push(data.outputs[idx]);
|
|
518
|
+
balanced.labels!.push(label);
|
|
519
|
+
|
|
520
|
+
if (data.weights && balanced.weights) {
|
|
521
|
+
balanced.weights.push(data.weights[idx]);
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
return balanced;
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
function preprocessTrainingData(data: TrainingData): TrainingData {
|
|
530
|
+
// Apply preprocessing transformations
|
|
531
|
+
const processed: TrainingData = {
|
|
532
|
+
...data,
|
|
533
|
+
inputs: data.inputs.map(input => normalizeInput(input)),
|
|
534
|
+
outputs: data.outputs.map(output => normalizeOutput(output)),
|
|
535
|
+
};
|
|
536
|
+
|
|
537
|
+
return processed;
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
function normalizeInput(input: any): any {
|
|
541
|
+
// Normalize input data
|
|
542
|
+
// Placeholder - actual implementation would depend on data type
|
|
543
|
+
return input;
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
function normalizeOutput(output: any): any {
|
|
547
|
+
// Normalize output data
|
|
548
|
+
// Placeholder - actual implementation would depend on data type
|
|
549
|
+
return output;
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
async function updateModelPerformance(
|
|
553
|
+
modelId: string,
|
|
554
|
+
accuracy: number,
|
|
555
|
+
context: AgenticHookContext
|
|
556
|
+
): Promise<void> {
|
|
557
|
+
const perfKey = `model:performance:${modelId}`;
|
|
558
|
+
const history = await context.memory.cache.get(perfKey) || [];
|
|
559
|
+
|
|
560
|
+
history.push({
|
|
561
|
+
accuracy,
|
|
562
|
+
timestamp: Date.now(),
|
|
563
|
+
sessionId: context.sessionId,
|
|
564
|
+
});
|
|
565
|
+
|
|
566
|
+
// Keep last 100 performance records
|
|
567
|
+
if (history.length > 100) {
|
|
568
|
+
history.shift();
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
await context.memory.cache.set(perfKey, history);
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
async function evaluateModelPromotion(
|
|
575
|
+
modelId: string,
|
|
576
|
+
accuracy: number,
|
|
577
|
+
context: AgenticHookContext
|
|
578
|
+
): Promise<boolean> {
|
|
579
|
+
// Check if model should be promoted to production
|
|
580
|
+
const perfKey = `model:performance:${modelId}`;
|
|
581
|
+
const history = await context.memory.cache.get(perfKey) || [];
|
|
582
|
+
|
|
583
|
+
if (history.length < 10) {
|
|
584
|
+
return false; // Not enough history
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
// Calculate average accuracy over last 10 runs
|
|
588
|
+
const recent = history.slice(-10);
|
|
589
|
+
const avgAccuracy = recent.reduce((sum: number, h: any) =>
|
|
590
|
+
sum + h.accuracy, 0
|
|
591
|
+
) / recent.length;
|
|
592
|
+
|
|
593
|
+
// Promote if consistently above threshold
|
|
594
|
+
return avgAccuracy > 0.85 && accuracy > 0.85;
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
async function extractLearnedPatterns(
|
|
598
|
+
modelId: string,
|
|
599
|
+
context: AgenticHookContext
|
|
600
|
+
): Promise<Pattern[]> {
|
|
601
|
+
// Extract patterns learned during training
|
|
602
|
+
// Placeholder implementation
|
|
603
|
+
return [];
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
function calculatePatternSignificance(pattern: Pattern): number {
|
|
607
|
+
// Calculate pattern significance score
|
|
608
|
+
const baseScore = pattern.confidence;
|
|
609
|
+
const occurrenceBonus = Math.min(pattern.occurrences / 100, 0.2);
|
|
610
|
+
|
|
611
|
+
return Math.min(baseScore + occurrenceBonus, 1.0);
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
async function generateAdaptation(
|
|
615
|
+
pattern: Pattern,
|
|
616
|
+
context: AgenticHookContext
|
|
617
|
+
): Promise<Adaptation | null> {
|
|
618
|
+
// Generate adaptation based on pattern
|
|
619
|
+
if (pattern.type === 'failure' && pattern.confidence > 0.8) {
|
|
620
|
+
return {
|
|
621
|
+
type: 'parameter',
|
|
622
|
+
target: 'learning_rate',
|
|
623
|
+
oldValue: context.neural.training.learningRate,
|
|
624
|
+
newValue: context.neural.training.learningRate * 0.9,
|
|
625
|
+
reason: `High confidence failure pattern detected: ${pattern.id}`,
|
|
626
|
+
impact: -0.1,
|
|
627
|
+
};
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
if (pattern.type === 'optimization' && pattern.confidence > 0.9) {
|
|
631
|
+
return {
|
|
632
|
+
type: 'strategy',
|
|
633
|
+
target: 'batch_size',
|
|
634
|
+
oldValue: 32,
|
|
635
|
+
newValue: 64,
|
|
636
|
+
reason: `Optimization opportunity detected: ${pattern.id}`,
|
|
637
|
+
impact: 0.2,
|
|
638
|
+
};
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
return null;
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
function findPatternCombinations(
|
|
645
|
+
patterns: Pattern[],
|
|
646
|
+
context: AgenticHookContext
|
|
647
|
+
): Array<{ patterns: Pattern[]; significance: number }> {
|
|
648
|
+
const combinations: Array<{ patterns: Pattern[]; significance: number }> = [];
|
|
649
|
+
|
|
650
|
+
// Find co-occurring patterns
|
|
651
|
+
for (let i = 0; i < patterns.length; i++) {
|
|
652
|
+
for (let j = i + 1; j < patterns.length; j++) {
|
|
653
|
+
const pattern1 = patterns[i];
|
|
654
|
+
const pattern2 = patterns[j];
|
|
655
|
+
|
|
656
|
+
// Check if patterns are related
|
|
657
|
+
if (areRelatedPatterns(pattern1, pattern2)) {
|
|
658
|
+
const significance =
|
|
659
|
+
(pattern1.confidence + pattern2.confidence) / 2 * 1.2;
|
|
660
|
+
|
|
661
|
+
combinations.push({
|
|
662
|
+
patterns: [pattern1, pattern2],
|
|
663
|
+
significance: Math.min(significance, 1.0),
|
|
664
|
+
});
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
return combinations;
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
function areRelatedPatterns(p1: Pattern, p2: Pattern): boolean {
|
|
673
|
+
// Check if patterns are related
|
|
674
|
+
// Simplified implementation
|
|
675
|
+
return p1.type === p2.type ||
|
|
676
|
+
Object.keys(p1.context).some(key => key in p2.context);
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
async function generateAlternatives(
|
|
680
|
+
input: any,
|
|
681
|
+
modelId: string,
|
|
682
|
+
context: AgenticHookContext
|
|
683
|
+
): Promise<Array<{ output: any; confidence: number }>> {
|
|
684
|
+
// Generate alternative predictions
|
|
685
|
+
// Placeholder implementation
|
|
686
|
+
return [];
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
function validateAdaptation(
|
|
690
|
+
adaptation: Adaptation,
|
|
691
|
+
modelId: string,
|
|
692
|
+
context: AgenticHookContext
|
|
693
|
+
): boolean {
|
|
694
|
+
// Validate adaptation is safe to apply
|
|
695
|
+
if (Math.abs(adaptation.impact) > 0.5) {
|
|
696
|
+
// Large impact adaptations need more validation
|
|
697
|
+
return context.neural.training.epoch > 10;
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
return true;
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
async function applyParameterAdaptation(
|
|
704
|
+
adaptation: Adaptation,
|
|
705
|
+
modelId: string,
|
|
706
|
+
context: AgenticHookContext
|
|
707
|
+
): Promise<void> {
|
|
708
|
+
// Apply parameter adaptation
|
|
709
|
+
// Placeholder implementation
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
async function applyArchitectureAdaptation(
|
|
713
|
+
adaptation: Adaptation,
|
|
714
|
+
modelId: string,
|
|
715
|
+
context: AgenticHookContext
|
|
716
|
+
): Promise<void> {
|
|
717
|
+
// Apply architecture adaptation
|
|
718
|
+
// Placeholder implementation
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
async function applyStrategyAdaptation(
|
|
722
|
+
adaptation: Adaptation,
|
|
723
|
+
modelId: string,
|
|
724
|
+
context: AgenticHookContext
|
|
725
|
+
): Promise<void> {
|
|
726
|
+
// Apply strategy adaptation
|
|
727
|
+
// Placeholder implementation
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
async function loadHistoricalPatterns(
|
|
731
|
+
modelId: string,
|
|
732
|
+
context: AgenticHookContext
|
|
733
|
+
): Promise<Pattern[]> {
|
|
734
|
+
// Load historical patterns
|
|
735
|
+
const patterns: Pattern[] = [];
|
|
736
|
+
|
|
737
|
+
// Get recent patterns from memory
|
|
738
|
+
const patternKeys = await context.memory.cache.get(`patterns:${modelId}`) || [];
|
|
739
|
+
|
|
740
|
+
for (const key of patternKeys.slice(-100)) {
|
|
741
|
+
const pattern = await context.memory.cache.get(key);
|
|
742
|
+
if (pattern) {
|
|
743
|
+
patterns.push(pattern);
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
return patterns;
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
// ===== Register Hooks =====
|
|
751
|
+
|
|
752
|
+
export function registerNeuralHooks(): void {
|
|
753
|
+
agenticHookManager.register(preNeuralTrainHook);
|
|
754
|
+
agenticHookManager.register(postNeuralTrainHook);
|
|
755
|
+
agenticHookManager.register(neuralPatternDetectedHook);
|
|
756
|
+
agenticHookManager.register(neuralPredictionHook);
|
|
757
|
+
agenticHookManager.register(neuralAdaptationHook);
|
|
758
|
+
}
|