agentic-flow 2.0.0-alpha → 2.0.1-alpha
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +320 -23
- package/agentic-flow/.claude/agents/base-template-generator.md +229 -3
- package/agentic-flow/.claude/agents/core/coder.md +212 -7
- package/agentic-flow/.claude/agents/core/planner.md +228 -7
- package/agentic-flow/.claude/agents/core/researcher.md +205 -10
- package/agentic-flow/.claude/agents/core/reviewer.md +216 -5
- package/agentic-flow/.claude/agents/core/tester.md +213 -3
- package/agentic-flow/.claude/agents/data/ml/data-ml-model.md +256 -5
- package/agentic-flow/.claude/agents/development/backend/dev-backend-api.md +209 -6
- package/agentic-flow/.claude/agents/documentation/api-docs/docs-api-openapi.md +185 -5
- package/agentic-flow/.claude/agents/github/code-review-swarm.md +307 -468
- package/agentic-flow/.claude/agents/github/issue-tracker.md +270 -13
- package/agentic-flow/.claude/agents/github/pr-manager.md +259 -12
- package/agentic-flow/.claude/agents/github/release-manager.md +253 -15
- package/agentic-flow/.claude/agents/github/workflow-automation.md +277 -9
- package/agentic-flow/.claude/agents/sona/sona-learning-optimizer.md +496 -0
- package/agentic-flow/.claude/agents/sparc/architecture.md +231 -4
- package/agentic-flow/.claude/agents/sparc/pseudocode.md +206 -4
- package/agentic-flow/.claude/agents/sparc/refinement.md +283 -6
- package/agentic-flow/.claude/agents/sparc/specification.md +205 -3
- package/agentic-flow/.claude/agents/swarm/adaptive-coordinator.md +731 -0
- package/agentic-flow/.claude/agents/swarm/hierarchical-coordinator.md +455 -1
- package/agentic-flow/.claude/agents/swarm/mesh-coordinator.md +571 -0
- package/agentic-flow/.claude/agents/templates/sparc-coordinator.md +336 -5
- package/agentic-flow/dist/cli/commands/sona-train.d.ts.map +1 -0
- package/agentic-flow/dist/cli/commands/sona-train.js +295 -0
- package/agentic-flow/dist/cli/commands/sona-train.js.map +1 -0
- package/agentic-flow/dist/cli/commands/sona.d.ts.map +1 -0
- package/agentic-flow/dist/cli/commands/sona.js +290 -0
- package/agentic-flow/dist/cli/commands/sona.js.map +1 -0
- package/agentic-flow/dist/core/agentdb-fast.d.ts.map +1 -0
- package/agentic-flow/dist/core/agentdb-fast.js +299 -0
- package/agentic-flow/dist/core/agentdb-fast.js.map +1 -0
- package/agentic-flow/dist/core/attention-fallbacks.d.ts.map +1 -0
- package/agentic-flow/dist/core/attention-fallbacks.js +321 -0
- package/agentic-flow/dist/core/attention-fallbacks.js.map +1 -0
- package/agentic-flow/dist/core/embedding-service.d.ts.map +1 -0
- package/agentic-flow/dist/core/embedding-service.js +370 -0
- package/agentic-flow/dist/core/embedding-service.js.map +1 -0
- package/agentic-flow/dist/core/gnn-wrapper.d.ts.map +1 -0
- package/agentic-flow/dist/core/gnn-wrapper.js +236 -0
- package/agentic-flow/dist/core/gnn-wrapper.js.map +1 -0
- package/agentic-flow/dist/core/index.d.ts.map +1 -1
- package/agentic-flow/dist/core/index.js +80 -3
- package/agentic-flow/dist/core/index.js.map +1 -1
- package/agentic-flow/dist/mcp/claudeFlowSdkServer.d.ts.map +1 -1
- package/agentic-flow/dist/mcp/claudeFlowSdkServer.js +109 -0
- package/agentic-flow/dist/mcp/claudeFlowSdkServer.js.map +1 -1
- package/agentic-flow/dist/mcp/tools/agent-booster-tools.d.ts.map +1 -0
- package/agentic-flow/dist/mcp/tools/agent-booster-tools.js +262 -0
- package/agentic-flow/dist/mcp/tools/agent-booster-tools.js.map +1 -0
- package/agentic-flow/dist/mcp/tools/sona-tools.d.ts.map +1 -0
- package/agentic-flow/dist/mcp/tools/sona-tools.js +560 -0
- package/agentic-flow/dist/mcp/tools/sona-tools.js.map +1 -0
- package/agentic-flow/dist/optimizations/agent-booster-migration.d.ts.map +1 -0
- package/agentic-flow/dist/optimizations/agent-booster-migration.js +323 -0
- package/agentic-flow/dist/optimizations/agent-booster-migration.js.map +1 -0
- package/agentic-flow/dist/optimizations/configuration-tuning.d.ts.map +1 -0
- package/agentic-flow/dist/optimizations/configuration-tuning.js +422 -0
- package/agentic-flow/dist/optimizations/configuration-tuning.js.map +1 -0
- package/agentic-flow/dist/optimizations/ruvector-backend.d.ts.map +1 -0
- package/agentic-flow/dist/optimizations/ruvector-backend.js +464 -0
- package/agentic-flow/dist/optimizations/ruvector-backend.js.map +1 -0
- package/agentic-flow/dist/services/embedding-service.d.ts.map +1 -0
- package/agentic-flow/dist/services/embedding-service.js +367 -0
- package/agentic-flow/dist/services/embedding-service.js.map +1 -0
- package/agentic-flow/dist/services/sona-agent-training.d.ts.map +1 -0
- package/agentic-flow/dist/services/sona-agent-training.js +382 -0
- package/agentic-flow/dist/services/sona-agent-training.js.map +1 -0
- package/agentic-flow/dist/services/sona-agentdb-integration.d.ts.map +1 -0
- package/agentic-flow/dist/services/sona-agentdb-integration.js +346 -0
- package/agentic-flow/dist/services/sona-agentdb-integration.js.map +1 -0
- package/agentic-flow/dist/services/sona-service.d.ts.map +1 -0
- package/agentic-flow/dist/services/sona-service.js +448 -0
- package/agentic-flow/dist/services/sona-service.js.map +1 -0
- package/agentic-flow/dist/services/sona-types.d.ts.map +1 -0
- package/agentic-flow/dist/services/sona-types.js +59 -0
- package/agentic-flow/dist/services/sona-types.js.map +1 -0
- package/docs/README.md +27 -2
- package/package.json +12 -2
- package/docs/AGENTIC_JUJUTSU_QUICKSTART.md +0 -491
|
@@ -128,6 +128,737 @@ Switch to HYBRID when:
|
|
|
128
128
|
- Experimental optimization required
|
|
129
129
|
```
|
|
130
130
|
|
|
131
|
+
## 🧠 Advanced Attention Mechanisms (v2.0.0-alpha)
|
|
132
|
+
|
|
133
|
+
### Dynamic Attention Mechanism Selection
|
|
134
|
+
|
|
135
|
+
Adaptive coordinators use **dynamic attention selection** to choose the optimal mechanism based on task characteristics and real-time performance:
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
import { AttentionService } from 'agentdb';
|
|
139
|
+
|
|
140
|
+
// Initialize attention service for adaptive coordination
|
|
141
|
+
const attentionService = new AttentionService({
|
|
142
|
+
embeddingDim: 384,
|
|
143
|
+
runtime: 'napi' // 2.49x-7.47x faster
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
// Adaptive coordinator with dynamic attention selection
|
|
147
|
+
class AdaptiveCoordinator {
|
|
148
|
+
constructor(
|
|
149
|
+
private attentionService: AttentionService
|
|
150
|
+
) {}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Dynamically select optimal attention mechanism
|
|
154
|
+
* Switches between flash/multi-head/linear/hyperbolic/moe
|
|
155
|
+
*/
|
|
156
|
+
async adaptiveCoordination(
|
|
157
|
+
agentOutputs: AgentOutput[],
|
|
158
|
+
taskCharacteristics: TaskCharacteristics
|
|
159
|
+
): Promise<CoordinationResult> {
|
|
160
|
+
// 1. Select optimal attention mechanism
|
|
161
|
+
const mechanism = this.selectAttentionMechanism(
|
|
162
|
+
taskCharacteristics,
|
|
163
|
+
agentOutputs.length
|
|
164
|
+
);
|
|
165
|
+
|
|
166
|
+
console.log(`Selected attention mechanism: ${mechanism}`);
|
|
167
|
+
|
|
168
|
+
// 2. Convert outputs to embeddings
|
|
169
|
+
const embeddings = await this.outputsToEmbeddings(agentOutputs);
|
|
170
|
+
|
|
171
|
+
// 3. Apply selected attention mechanism
|
|
172
|
+
let result: any;
|
|
173
|
+
switch (mechanism) {
|
|
174
|
+
case 'flash':
|
|
175
|
+
// 2.49x-7.47x faster for large contexts
|
|
176
|
+
result = await this.attentionService.flashAttention(
|
|
177
|
+
embeddings,
|
|
178
|
+
embeddings,
|
|
179
|
+
embeddings
|
|
180
|
+
);
|
|
181
|
+
break;
|
|
182
|
+
|
|
183
|
+
case 'multi-head':
|
|
184
|
+
// Standard multi-head for balanced tasks
|
|
185
|
+
result = await this.attentionService.multiHeadAttention(
|
|
186
|
+
embeddings,
|
|
187
|
+
embeddings,
|
|
188
|
+
embeddings,
|
|
189
|
+
{ numHeads: 8 }
|
|
190
|
+
);
|
|
191
|
+
break;
|
|
192
|
+
|
|
193
|
+
case 'linear':
|
|
194
|
+
// Linear for very long sequences (>2048 tokens)
|
|
195
|
+
result = await this.attentionService.linearAttention(
|
|
196
|
+
embeddings,
|
|
197
|
+
embeddings,
|
|
198
|
+
embeddings
|
|
199
|
+
);
|
|
200
|
+
break;
|
|
201
|
+
|
|
202
|
+
case 'hyperbolic':
|
|
203
|
+
// Hyperbolic for hierarchical structures
|
|
204
|
+
result = await this.attentionService.hyperbolicAttention(
|
|
205
|
+
embeddings,
|
|
206
|
+
embeddings,
|
|
207
|
+
embeddings,
|
|
208
|
+
{ curvature: -1.0 }
|
|
209
|
+
);
|
|
210
|
+
break;
|
|
211
|
+
|
|
212
|
+
case 'moe':
|
|
213
|
+
// MoE for expert routing
|
|
214
|
+
result = await this.moeAttention(
|
|
215
|
+
embeddings,
|
|
216
|
+
agentOutputs
|
|
217
|
+
);
|
|
218
|
+
break;
|
|
219
|
+
|
|
220
|
+
default:
|
|
221
|
+
throw new Error(`Unknown attention mechanism: ${mechanism}`);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
consensus: this.generateConsensus(agentOutputs, result),
|
|
226
|
+
attentionWeights: this.extractAttentionWeights(result),
|
|
227
|
+
topAgents: this.rankAgents(result),
|
|
228
|
+
mechanism,
|
|
229
|
+
executionTimeMs: result.executionTimeMs,
|
|
230
|
+
memoryUsage: result.memoryUsage
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Select optimal attention mechanism based on task characteristics
|
|
236
|
+
*/
|
|
237
|
+
private selectAttentionMechanism(
|
|
238
|
+
taskChar: TaskCharacteristics,
|
|
239
|
+
numAgents: number
|
|
240
|
+
): AttentionMechanism {
|
|
241
|
+
// Rule-based selection with performance metrics
|
|
242
|
+
|
|
243
|
+
// Flash Attention: Large contexts or speed critical
|
|
244
|
+
if (taskChar.contextSize > 1024 || taskChar.speedCritical) {
|
|
245
|
+
return 'flash';
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// Linear Attention: Very long sequences
|
|
249
|
+
if (taskChar.contextSize > 2048) {
|
|
250
|
+
return 'linear';
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Hyperbolic Attention: Hierarchical structures
|
|
254
|
+
if (taskChar.hasHierarchy) {
|
|
255
|
+
return 'hyperbolic';
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// MoE Attention: Specialized expert routing
|
|
259
|
+
if (taskChar.requiresExpertise && numAgents >= 5) {
|
|
260
|
+
return 'moe';
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Default: Multi-head attention for balanced tasks
|
|
264
|
+
return 'multi-head';
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* MoE Attention: Route tasks to top-k expert agents
|
|
269
|
+
*/
|
|
270
|
+
async moeAttention(
|
|
271
|
+
embeddings: number[][],
|
|
272
|
+
agentOutputs: AgentOutput[]
|
|
273
|
+
): Promise<any> {
|
|
274
|
+
const topK = Math.min(3, embeddings.length);
|
|
275
|
+
|
|
276
|
+
// Calculate expert scores for each agent
|
|
277
|
+
const expertScores = await this.calculateExpertScores(
|
|
278
|
+
embeddings,
|
|
279
|
+
agentOutputs
|
|
280
|
+
);
|
|
281
|
+
|
|
282
|
+
// Select top-k experts
|
|
283
|
+
const topExperts = expertScores
|
|
284
|
+
.map((score, idx) => ({ idx, score }))
|
|
285
|
+
.sort((a, b) => b.score - a.score)
|
|
286
|
+
.slice(0, topK);
|
|
287
|
+
|
|
288
|
+
console.log('Top experts selected:', topExperts);
|
|
289
|
+
|
|
290
|
+
// Apply multi-head attention only on top-k experts
|
|
291
|
+
const expertEmbeddings = topExperts.map(e => embeddings[e.idx]);
|
|
292
|
+
|
|
293
|
+
const result = await this.attentionService.multiHeadAttention(
|
|
294
|
+
expertEmbeddings,
|
|
295
|
+
expertEmbeddings,
|
|
296
|
+
expertEmbeddings,
|
|
297
|
+
{ numHeads: topK }
|
|
298
|
+
);
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
...result,
|
|
302
|
+
expertIndices: topExperts.map(e => e.idx),
|
|
303
|
+
expertScores: topExperts.map(e => e.score)
|
|
304
|
+
};
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* Calculate expert scores based on task-agent compatibility
|
|
309
|
+
*/
|
|
310
|
+
private async calculateExpertScores(
|
|
311
|
+
embeddings: number[][],
|
|
312
|
+
agentOutputs: AgentOutput[]
|
|
313
|
+
): Promise<number[]> {
|
|
314
|
+
// Score each agent based on:
|
|
315
|
+
// 1. Capability match
|
|
316
|
+
// 2. Past performance
|
|
317
|
+
// 3. Current availability
|
|
318
|
+
|
|
319
|
+
return embeddings.map((emb, idx) => {
|
|
320
|
+
const agent = agentOutputs[idx];
|
|
321
|
+
|
|
322
|
+
const capabilityScore = this.scoreCapabilities(agent);
|
|
323
|
+
const performanceScore = this.scorePerformance(agent);
|
|
324
|
+
const availabilityScore = this.scoreAvailability(agent);
|
|
325
|
+
|
|
326
|
+
return (
|
|
327
|
+
capabilityScore * 0.5 +
|
|
328
|
+
performanceScore * 0.3 +
|
|
329
|
+
availabilityScore * 0.2
|
|
330
|
+
);
|
|
331
|
+
});
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
private scoreCapabilities(agent: AgentOutput): number {
|
|
335
|
+
// Capability matching score (0-1)
|
|
336
|
+
const hasRequiredCaps = agent.capabilities?.length > 0;
|
|
337
|
+
return hasRequiredCaps ? 0.8 : 0.3;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
private scorePerformance(agent: AgentOutput): number {
|
|
341
|
+
// Past performance score (0-1)
|
|
342
|
+
return agent.performanceHistory?.avgReward || 0.5;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
private scoreAvailability(agent: AgentOutput): number {
|
|
346
|
+
// Current availability score (0-1)
|
|
347
|
+
const currentLoad = agent.currentLoad || 0.5;
|
|
348
|
+
return 1 - currentLoad; // Lower load = higher availability
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/**
|
|
352
|
+
* Performance-based adaptation: Track and switch mechanisms
|
|
353
|
+
*/
|
|
354
|
+
async adaptWithFeedback(
|
|
355
|
+
agentOutputs: AgentOutput[],
|
|
356
|
+
taskChar: TaskCharacteristics,
|
|
357
|
+
performanceHistory: PerformanceMetric[]
|
|
358
|
+
): Promise<CoordinationResult> {
|
|
359
|
+
// Analyze historical performance of each mechanism
|
|
360
|
+
const mechanismPerformance = this.analyzeMechanismPerformance(
|
|
361
|
+
performanceHistory
|
|
362
|
+
);
|
|
363
|
+
|
|
364
|
+
// Select mechanism with best historical performance
|
|
365
|
+
const bestMechanism = Object.entries(mechanismPerformance)
|
|
366
|
+
.sort(([, a], [, b]) => b.avgReward - a.avgReward)[0][0] as AttentionMechanism;
|
|
367
|
+
|
|
368
|
+
console.log(`Historical analysis suggests: ${bestMechanism}`);
|
|
369
|
+
|
|
370
|
+
// Override with best performing mechanism
|
|
371
|
+
taskChar.preferredMechanism = bestMechanism;
|
|
372
|
+
|
|
373
|
+
return this.adaptiveCoordination(agentOutputs, taskChar);
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
private analyzeMechanismPerformance(
|
|
377
|
+
history: PerformanceMetric[]
|
|
378
|
+
): Record<AttentionMechanism, { avgReward: number; count: number }> {
|
|
379
|
+
const stats: Record<string, { total: number; count: number }> = {
|
|
380
|
+
flash: { total: 0, count: 0 },
|
|
381
|
+
'multi-head': { total: 0, count: 0 },
|
|
382
|
+
linear: { total: 0, count: 0 },
|
|
383
|
+
hyperbolic: { total: 0, count: 0 },
|
|
384
|
+
moe: { total: 0, count: 0 }
|
|
385
|
+
};
|
|
386
|
+
|
|
387
|
+
history.forEach(metric => {
|
|
388
|
+
if (stats[metric.mechanism]) {
|
|
389
|
+
stats[metric.mechanism].total += metric.reward;
|
|
390
|
+
stats[metric.mechanism].count += 1;
|
|
391
|
+
}
|
|
392
|
+
});
|
|
393
|
+
|
|
394
|
+
const result: any = {};
|
|
395
|
+
Object.entries(stats).forEach(([mechanism, { total, count }]) => {
|
|
396
|
+
result[mechanism] = {
|
|
397
|
+
avgReward: count > 0 ? total / count : 0,
|
|
398
|
+
count
|
|
399
|
+
};
|
|
400
|
+
});
|
|
401
|
+
|
|
402
|
+
return result;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/**
|
|
406
|
+
* GraphRoPE: Topology-aware coordination with dynamic topology
|
|
407
|
+
*/
|
|
408
|
+
async topologyAwareAdaptation(
|
|
409
|
+
agentOutputs: AgentOutput[],
|
|
410
|
+
currentTopology: 'hierarchical' | 'mesh' | 'ring' | 'star'
|
|
411
|
+
): Promise<CoordinationResult> {
|
|
412
|
+
// Build graph based on current topology
|
|
413
|
+
const graphContext = this.buildTopologyGraph(agentOutputs, currentTopology);
|
|
414
|
+
|
|
415
|
+
const embeddings = await this.outputsToEmbeddings(agentOutputs);
|
|
416
|
+
|
|
417
|
+
// Apply GraphRoPE for topology-aware position encoding
|
|
418
|
+
const positionEncodedEmbeddings = this.applyGraphRoPE(
|
|
419
|
+
embeddings,
|
|
420
|
+
graphContext
|
|
421
|
+
);
|
|
422
|
+
|
|
423
|
+
// Select attention mechanism based on topology
|
|
424
|
+
const mechanism = this.selectMechanismForTopology(currentTopology);
|
|
425
|
+
|
|
426
|
+
let result: any;
|
|
427
|
+
switch (mechanism) {
|
|
428
|
+
case 'hyperbolic':
|
|
429
|
+
result = await this.attentionService.hyperbolicAttention(
|
|
430
|
+
positionEncodedEmbeddings,
|
|
431
|
+
positionEncodedEmbeddings,
|
|
432
|
+
positionEncodedEmbeddings,
|
|
433
|
+
{ curvature: -1.0 }
|
|
434
|
+
);
|
|
435
|
+
break;
|
|
436
|
+
|
|
437
|
+
case 'multi-head':
|
|
438
|
+
result = await this.attentionService.multiHeadAttention(
|
|
439
|
+
positionEncodedEmbeddings,
|
|
440
|
+
positionEncodedEmbeddings,
|
|
441
|
+
positionEncodedEmbeddings,
|
|
442
|
+
{ numHeads: 8 }
|
|
443
|
+
);
|
|
444
|
+
break;
|
|
445
|
+
|
|
446
|
+
default:
|
|
447
|
+
throw new Error(`Unsupported mechanism for topology: ${mechanism}`);
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
return this.processCoordinationResult(result, agentOutputs, mechanism);
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
private buildTopologyGraph(
|
|
454
|
+
outputs: AgentOutput[],
|
|
455
|
+
topology: 'hierarchical' | 'mesh' | 'ring' | 'star'
|
|
456
|
+
): GraphContext {
|
|
457
|
+
const nodes = outputs.map((_, idx) => idx);
|
|
458
|
+
const edges: [number, number][] = [];
|
|
459
|
+
const edgeWeights: number[] = [];
|
|
460
|
+
|
|
461
|
+
switch (topology) {
|
|
462
|
+
case 'hierarchical':
|
|
463
|
+
// Queens at top, workers below
|
|
464
|
+
const queens = Math.ceil(outputs.length * 0.2);
|
|
465
|
+
for (let i = 0; i < queens; i++) {
|
|
466
|
+
for (let j = queens; j < outputs.length; j++) {
|
|
467
|
+
edges.push([i, j]);
|
|
468
|
+
edgeWeights.push(1.5); // Queen influence
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
break;
|
|
472
|
+
|
|
473
|
+
case 'mesh':
|
|
474
|
+
// Fully connected
|
|
475
|
+
for (let i = 0; i < outputs.length; i++) {
|
|
476
|
+
for (let j = i + 1; j < outputs.length; j++) {
|
|
477
|
+
edges.push([i, j]);
|
|
478
|
+
edgeWeights.push(1.0);
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
break;
|
|
482
|
+
|
|
483
|
+
case 'ring':
|
|
484
|
+
// Circular connections
|
|
485
|
+
for (let i = 0; i < outputs.length; i++) {
|
|
486
|
+
const next = (i + 1) % outputs.length;
|
|
487
|
+
edges.push([i, next]);
|
|
488
|
+
edgeWeights.push(1.0);
|
|
489
|
+
}
|
|
490
|
+
break;
|
|
491
|
+
|
|
492
|
+
case 'star':
|
|
493
|
+
// Central hub to all
|
|
494
|
+
for (let i = 1; i < outputs.length; i++) {
|
|
495
|
+
edges.push([0, i]);
|
|
496
|
+
edgeWeights.push(1.0);
|
|
497
|
+
}
|
|
498
|
+
break;
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
return {
|
|
502
|
+
nodes,
|
|
503
|
+
edges,
|
|
504
|
+
edgeWeights,
|
|
505
|
+
nodeLabels: outputs.map(o => o.agentType)
|
|
506
|
+
};
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
private selectMechanismForTopology(
|
|
510
|
+
topology: 'hierarchical' | 'mesh' | 'ring' | 'star'
|
|
511
|
+
): AttentionMechanism {
|
|
512
|
+
switch (topology) {
|
|
513
|
+
case 'hierarchical':
|
|
514
|
+
return 'hyperbolic'; // Natural for hierarchies
|
|
515
|
+
case 'mesh':
|
|
516
|
+
return 'multi-head'; // Peer-to-peer
|
|
517
|
+
case 'ring':
|
|
518
|
+
case 'star':
|
|
519
|
+
return 'multi-head'; // Standard attention
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
private applyGraphRoPE(
|
|
524
|
+
embeddings: number[][],
|
|
525
|
+
graphContext: GraphContext
|
|
526
|
+
): number[][] {
|
|
527
|
+
return embeddings.map((emb, idx) => {
|
|
528
|
+
// Calculate graph properties
|
|
529
|
+
const degree = graphContext.edges.filter(
|
|
530
|
+
([from, to]) => from === idx || to === idx
|
|
531
|
+
).length;
|
|
532
|
+
|
|
533
|
+
const avgEdgeWeight = graphContext.edges
|
|
534
|
+
.filter(([from, to]) => from === idx || to === idx)
|
|
535
|
+
.reduce((acc, [from, to], edgeIdx) =>
|
|
536
|
+
acc + (graphContext.edgeWeights[edgeIdx] || 1.0), 0
|
|
537
|
+
) / (degree || 1);
|
|
538
|
+
|
|
539
|
+
// Position encoding based on graph structure
|
|
540
|
+
const positionEncoding = this.generateGraphPositionEncoding(
|
|
541
|
+
emb.length,
|
|
542
|
+
degree,
|
|
543
|
+
avgEdgeWeight
|
|
544
|
+
);
|
|
545
|
+
|
|
546
|
+
return emb.map((v, i) => v + positionEncoding[i] * 0.1);
|
|
547
|
+
});
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
private generateGraphPositionEncoding(
|
|
551
|
+
dim: number,
|
|
552
|
+
degree: number,
|
|
553
|
+
weight: number
|
|
554
|
+
): number[] {
|
|
555
|
+
return Array.from({ length: dim }, (_, i) => {
|
|
556
|
+
const freq = 1 / Math.pow(10000, i / dim);
|
|
557
|
+
return Math.sin(degree * freq) + Math.cos(weight * freq);
|
|
558
|
+
});
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
private async outputsToEmbeddings(
|
|
562
|
+
outputs: AgentOutput[]
|
|
563
|
+
): Promise<number[][]> {
|
|
564
|
+
return outputs.map(output =>
|
|
565
|
+
Array.from({ length: 384 }, () => Math.random())
|
|
566
|
+
);
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
private extractAttentionWeights(result: any): number[] {
|
|
570
|
+
return Array.from(result.output.slice(0, result.output.length / 384));
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
private generateConsensus(outputs: AgentOutput[], result: any): string {
|
|
574
|
+
const weights = this.extractAttentionWeights(result);
|
|
575
|
+
const weightedOutputs = outputs.map((output, idx) => ({
|
|
576
|
+
output: output.content,
|
|
577
|
+
weight: weights[idx]
|
|
578
|
+
}));
|
|
579
|
+
|
|
580
|
+
const best = weightedOutputs.reduce((max, curr) =>
|
|
581
|
+
curr.weight > max.weight ? curr : max
|
|
582
|
+
);
|
|
583
|
+
|
|
584
|
+
return best.output;
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
private rankAgents(result: any): AgentRanking[] {
|
|
588
|
+
const weights = this.extractAttentionWeights(result);
|
|
589
|
+
return weights
|
|
590
|
+
.map((weight, idx) => ({ agentId: idx, score: weight }))
|
|
591
|
+
.sort((a, b) => b.score - a.score);
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
private processCoordinationResult(
|
|
595
|
+
result: any,
|
|
596
|
+
outputs: AgentOutput[],
|
|
597
|
+
mechanism: AttentionMechanism
|
|
598
|
+
): CoordinationResult {
|
|
599
|
+
return {
|
|
600
|
+
consensus: this.generateConsensus(outputs, result),
|
|
601
|
+
attentionWeights: this.extractAttentionWeights(result),
|
|
602
|
+
topAgents: this.rankAgents(result),
|
|
603
|
+
mechanism,
|
|
604
|
+
executionTimeMs: result.executionTimeMs,
|
|
605
|
+
memoryUsage: result.memoryUsage
|
|
606
|
+
};
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
// Type definitions
|
|
611
|
+
interface AgentOutput {
|
|
612
|
+
agentType: string;
|
|
613
|
+
content: string;
|
|
614
|
+
capabilities?: string[];
|
|
615
|
+
performanceHistory?: {
|
|
616
|
+
avgReward: number;
|
|
617
|
+
successRate: number;
|
|
618
|
+
};
|
|
619
|
+
currentLoad?: number;
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
interface TaskCharacteristics {
|
|
623
|
+
contextSize: number;
|
|
624
|
+
speedCritical: boolean;
|
|
625
|
+
hasHierarchy: boolean;
|
|
626
|
+
requiresExpertise: boolean;
|
|
627
|
+
preferredMechanism?: AttentionMechanism;
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
interface GraphContext {
|
|
631
|
+
nodes: number[];
|
|
632
|
+
edges: [number, number][];
|
|
633
|
+
edgeWeights: number[];
|
|
634
|
+
nodeLabels: string[];
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
interface CoordinationResult {
|
|
638
|
+
consensus: string;
|
|
639
|
+
attentionWeights: number[];
|
|
640
|
+
topAgents: AgentRanking[];
|
|
641
|
+
mechanism: AttentionMechanism;
|
|
642
|
+
executionTimeMs: number;
|
|
643
|
+
memoryUsage?: number;
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
interface AgentRanking {
|
|
647
|
+
agentId: number;
|
|
648
|
+
score: number;
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
interface PerformanceMetric {
|
|
652
|
+
mechanism: AttentionMechanism;
|
|
653
|
+
reward: number;
|
|
654
|
+
latencyMs: number;
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
type AttentionMechanism =
|
|
658
|
+
| 'flash'
|
|
659
|
+
| 'multi-head'
|
|
660
|
+
| 'linear'
|
|
661
|
+
| 'hyperbolic'
|
|
662
|
+
| 'moe';
|
|
663
|
+
```
|
|
664
|
+
|
|
665
|
+
### Usage Example: Adaptive Dynamic Coordination
|
|
666
|
+
|
|
667
|
+
```typescript
|
|
668
|
+
// Initialize adaptive coordinator
|
|
669
|
+
const coordinator = new AdaptiveCoordinator(attentionService);
|
|
670
|
+
|
|
671
|
+
// Define task characteristics
|
|
672
|
+
const taskChar: TaskCharacteristics = {
|
|
673
|
+
contextSize: 2048,
|
|
674
|
+
speedCritical: true,
|
|
675
|
+
hasHierarchy: false,
|
|
676
|
+
requiresExpertise: true
|
|
677
|
+
};
|
|
678
|
+
|
|
679
|
+
// Agent outputs with expertise levels
|
|
680
|
+
const agentOutputs = [
|
|
681
|
+
{
|
|
682
|
+
agentType: 'auth-expert',
|
|
683
|
+
content: 'Implement OAuth2 with JWT tokens',
|
|
684
|
+
capabilities: ['authentication', 'security'],
|
|
685
|
+
performanceHistory: { avgReward: 0.92, successRate: 0.95 },
|
|
686
|
+
currentLoad: 0.3
|
|
687
|
+
},
|
|
688
|
+
{
|
|
689
|
+
agentType: 'db-expert',
|
|
690
|
+
content: 'Use PostgreSQL with connection pooling',
|
|
691
|
+
capabilities: ['database', 'optimization'],
|
|
692
|
+
performanceHistory: { avgReward: 0.88, successRate: 0.90 },
|
|
693
|
+
currentLoad: 0.5
|
|
694
|
+
},
|
|
695
|
+
{
|
|
696
|
+
agentType: 'api-expert',
|
|
697
|
+
content: 'Design RESTful API with OpenAPI spec',
|
|
698
|
+
capabilities: ['api-design', 'documentation'],
|
|
699
|
+
performanceHistory: { avgReward: 0.85, successRate: 0.87 },
|
|
700
|
+
currentLoad: 0.2
|
|
701
|
+
},
|
|
702
|
+
{
|
|
703
|
+
agentType: 'test-expert',
|
|
704
|
+
content: 'Create integration tests with Jest',
|
|
705
|
+
capabilities: ['testing', 'quality-assurance'],
|
|
706
|
+
performanceHistory: { avgReward: 0.90, successRate: 0.93 },
|
|
707
|
+
currentLoad: 0.4
|
|
708
|
+
},
|
|
709
|
+
{
|
|
710
|
+
agentType: 'generalist',
|
|
711
|
+
content: 'Build complete authentication system',
|
|
712
|
+
capabilities: ['general'],
|
|
713
|
+
performanceHistory: { avgReward: 0.70, successRate: 0.75 },
|
|
714
|
+
currentLoad: 0.1
|
|
715
|
+
}
|
|
716
|
+
];
|
|
717
|
+
|
|
718
|
+
// Adaptive coordination with dynamic mechanism selection
|
|
719
|
+
const result = await coordinator.adaptiveCoordination(agentOutputs, taskChar);
|
|
720
|
+
|
|
721
|
+
console.log('Selected mechanism:', result.mechanism); // 'moe' (expertise required)
|
|
722
|
+
console.log('Consensus:', result.consensus);
|
|
723
|
+
console.log('Top experts:', result.topAgents.slice(0, 3));
|
|
724
|
+
console.log(`Execution time: ${result.executionTimeMs}ms`);
|
|
725
|
+
|
|
726
|
+
// Adapt with performance feedback
|
|
727
|
+
const performanceHistory: PerformanceMetric[] = [
|
|
728
|
+
{ mechanism: 'flash', reward: 0.85, latencyMs: 120 },
|
|
729
|
+
{ mechanism: 'multi-head', reward: 0.82, latencyMs: 250 },
|
|
730
|
+
{ mechanism: 'moe', reward: 0.92, latencyMs: 180 }
|
|
731
|
+
];
|
|
732
|
+
|
|
733
|
+
const adaptiveResult = await coordinator.adaptWithFeedback(
|
|
734
|
+
agentOutputs,
|
|
735
|
+
taskChar,
|
|
736
|
+
performanceHistory
|
|
737
|
+
);
|
|
738
|
+
|
|
739
|
+
console.log('Best mechanism from history:', adaptiveResult.mechanism); // 'moe'
|
|
740
|
+
```
|
|
741
|
+
|
|
742
|
+
### Self-Learning Integration (ReasoningBank)
|
|
743
|
+
|
|
744
|
+
```typescript
|
|
745
|
+
import { ReasoningBank } from 'agentdb';
|
|
746
|
+
|
|
747
|
+
class LearningAdaptiveCoordinator extends AdaptiveCoordinator {
|
|
748
|
+
constructor(
|
|
749
|
+
attentionService: AttentionService,
|
|
750
|
+
private reasoningBank: ReasoningBank
|
|
751
|
+
) {
|
|
752
|
+
super(attentionService);
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
/**
|
|
756
|
+
* Learn optimal mechanism selection from past coordinations
|
|
757
|
+
*/
|
|
758
|
+
async coordinateWithLearning(
|
|
759
|
+
taskDescription: string,
|
|
760
|
+
agentOutputs: AgentOutput[],
|
|
761
|
+
taskChar: TaskCharacteristics
|
|
762
|
+
): Promise<CoordinationResult> {
|
|
763
|
+
// 1. Search for similar past tasks
|
|
764
|
+
const similarPatterns = await this.reasoningBank.searchPatterns({
|
|
765
|
+
task: taskDescription,
|
|
766
|
+
k: 5,
|
|
767
|
+
minReward: 0.8
|
|
768
|
+
});
|
|
769
|
+
|
|
770
|
+
if (similarPatterns.length > 0) {
|
|
771
|
+
console.log('📚 Learning from past adaptive coordinations:');
|
|
772
|
+
|
|
773
|
+
// Extract best performing mechanisms
|
|
774
|
+
const mechanismFrequency: Record<string, number> = {};
|
|
775
|
+
similarPatterns.forEach(pattern => {
|
|
776
|
+
const mechanism = pattern.metadata?.mechanism;
|
|
777
|
+
if (mechanism) {
|
|
778
|
+
mechanismFrequency[mechanism] = (mechanismFrequency[mechanism] || 0) + 1;
|
|
779
|
+
}
|
|
780
|
+
});
|
|
781
|
+
|
|
782
|
+
const bestMechanism = Object.entries(mechanismFrequency)
|
|
783
|
+
.sort(([, a], [, b]) => b - a)[0]?.[0] as AttentionMechanism;
|
|
784
|
+
|
|
785
|
+
if (bestMechanism) {
|
|
786
|
+
console.log(`Historical preference: ${bestMechanism}`);
|
|
787
|
+
taskChar.preferredMechanism = bestMechanism;
|
|
788
|
+
}
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
// 2. Coordinate with adaptive attention
|
|
792
|
+
const result = await this.adaptiveCoordination(agentOutputs, taskChar);
|
|
793
|
+
|
|
794
|
+
// 3. Calculate success metrics
|
|
795
|
+
const reward = this.calculateAdaptiveReward(result);
|
|
796
|
+
const success = reward > 0.8;
|
|
797
|
+
|
|
798
|
+
// 4. Store learning pattern with mechanism metadata
|
|
799
|
+
await this.reasoningBank.storePattern({
|
|
800
|
+
sessionId: `adaptive-${Date.now()}`,
|
|
801
|
+
task: taskDescription,
|
|
802
|
+
input: JSON.stringify({
|
|
803
|
+
agents: agentOutputs,
|
|
804
|
+
taskChar
|
|
805
|
+
}),
|
|
806
|
+
output: result.consensus,
|
|
807
|
+
reward,
|
|
808
|
+
success,
|
|
809
|
+
critique: this.generateCritique(result),
|
|
810
|
+
tokensUsed: this.estimateTokens(result),
|
|
811
|
+
latencyMs: result.executionTimeMs,
|
|
812
|
+
metadata: {
|
|
813
|
+
mechanism: result.mechanism,
|
|
814
|
+
contextSize: taskChar.contextSize,
|
|
815
|
+
agentCount: agentOutputs.length
|
|
816
|
+
}
|
|
817
|
+
});
|
|
818
|
+
|
|
819
|
+
return result;
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
private calculateAdaptiveReward(result: CoordinationResult): number {
|
|
823
|
+
// Reward based on:
|
|
824
|
+
// - Execution speed
|
|
825
|
+
// - Memory efficiency
|
|
826
|
+
// - Consensus quality
|
|
827
|
+
|
|
828
|
+
const speedScore = Math.max(0, 1 - result.executionTimeMs / 5000);
|
|
829
|
+
const memoryScore = result.memoryUsage
|
|
830
|
+
? Math.max(0, 1 - result.memoryUsage / 100)
|
|
831
|
+
: 0.5;
|
|
832
|
+
const qualityScore = result.attentionWeights
|
|
833
|
+
.reduce((acc, w) => acc + w, 0) / result.attentionWeights.length;
|
|
834
|
+
|
|
835
|
+
return (speedScore * 0.4 + memoryScore * 0.2 + qualityScore * 0.4);
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
private generateCritique(result: CoordinationResult): string {
|
|
839
|
+
const critiques: string[] = [];
|
|
840
|
+
|
|
841
|
+
if (result.executionTimeMs > 3000) {
|
|
842
|
+
critiques.push(`Slow execution (${result.executionTimeMs}ms) - consider flash attention`);
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
if (result.mechanism === 'linear' && result.executionTimeMs < 1000) {
|
|
846
|
+
critiques.push('Linear attention was fast - could use multi-head for better quality');
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
if (result.mechanism === 'moe') {
|
|
850
|
+
critiques.push(`MoE routing selected ${result.topAgents.length} experts`);
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
return critiques.join('; ') || `Optimal ${result.mechanism} coordination`;
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
private estimateTokens(result: CoordinationResult): number {
|
|
857
|
+
return result.consensus.split(' ').length * 1.3;
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
```
|
|
861
|
+
|
|
131
862
|
## MCP Neural Integration
|
|
132
863
|
|
|
133
864
|
### Pattern Recognition & Learning
|