claude-flow 2.7.33 → 2.7.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +9 -2
- package/.claude/skills/agentic-jujutsu/SKILL.md +1 -1
- package/CHANGELOG.md +75 -0
- package/bin/claude-flow +1 -1
- package/dist/src/cli/commands/mcp.js +61 -7
- package/dist/src/cli/commands/mcp.js.map +1 -1
- package/dist/src/cli/help-formatter.js +5 -0
- package/dist/src/cli/simple-commands/init/agent-copier.js +9 -3
- package/dist/src/cli/simple-commands/init/agent-copier.js.map +1 -1
- package/dist/src/core/version.js +1 -1
- package/dist/src/mcp/async/job-manager-mcp25.js +240 -0
- package/dist/src/mcp/async/job-manager-mcp25.js.map +1 -0
- package/dist/src/mcp/index.js +8 -0
- package/dist/src/mcp/index.js.map +1 -1
- package/dist/src/mcp/protocol/version-negotiation.js +182 -0
- package/dist/src/mcp/protocol/version-negotiation.js.map +1 -0
- package/dist/src/mcp/registry/mcp-registry-client-2025.js +210 -0
- package/dist/src/mcp/registry/mcp-registry-client-2025.js.map +1 -0
- package/dist/src/mcp/server-factory.js +189 -0
- package/dist/src/mcp/server-factory.js.map +1 -0
- package/dist/src/mcp/server-mcp-2025.js +283 -0
- package/dist/src/mcp/server-mcp-2025.js.map +1 -0
- package/dist/src/mcp/tool-registry-progressive.js +319 -0
- package/dist/src/mcp/tool-registry-progressive.js.map +1 -0
- package/dist/src/mcp/tools/_template.js +62 -0
- package/dist/src/mcp/tools/_template.js.map +1 -0
- package/dist/src/mcp/tools/loader.js +228 -0
- package/dist/src/mcp/tools/loader.js.map +1 -0
- package/dist/src/mcp/tools/system/search.js +224 -0
- package/dist/src/mcp/tools/system/search.js.map +1 -0
- package/dist/src/mcp/tools/system/status.js +168 -0
- package/dist/src/mcp/tools/system/status.js.map +1 -0
- package/dist/src/mcp/validation/schema-validator-2025.js +198 -0
- package/dist/src/mcp/validation/schema-validator-2025.js.map +1 -0
- package/docs/.claude-flow/metrics/performance.json +3 -3
- package/docs/.claude-flow/metrics/task-metrics.json +3 -3
- package/docs/.github-release-issue-v2.7.33.md +488 -0
- package/docs/AGENTDB_BRANCH_MERGE_VERIFICATION.md +436 -0
- package/docs/BRANCH_REVIEW_SUMMARY.md +439 -0
- package/docs/DEEP_CODE_REVIEW_v2.7.33.md +1159 -0
- package/docs/MCP_2025_FEATURE_CONFIRMATION.md +698 -0
- package/docs/NPM_PUBLISH_GUIDE_v2.7.33.md +628 -0
- package/docs/REGRESSION_TEST_REPORT_v2.7.33.md +397 -0
- package/docs/RELEASE_NOTES_v2.7.33.md +618 -0
- package/docs/RELEASE_READINESS_SUMMARY.md +377 -0
- package/docs/RELEASE_SUMMARY_v2.7.33.md +456 -0
- package/docs/agentic-flow-agentdb-mcp-integration.md +1198 -0
- package/docs/mcp-2025-implementation-summary.md +459 -0
- package/docs/mcp-spec-2025-implementation-plan.md +1330 -0
- package/docs/phase-1-2-implementation-summary.md +676 -0
- package/docs/regression-analysis-phase-1-2.md +555 -0
- package/package.json +5 -2
- package/src/cli/commands/mcp.ts +86 -9
- package/src/cli/simple-commands/init/agent-copier.js +10 -5
- package/src/mcp/async/job-manager-mcp25.ts +456 -0
- package/src/mcp/index.ts +60 -0
- package/src/mcp/protocol/version-negotiation.ts +329 -0
- package/src/mcp/registry/mcp-registry-client-2025.ts +334 -0
- package/src/mcp/server-factory.ts +426 -0
- package/src/mcp/server-mcp-2025.ts +507 -0
- package/src/mcp/tool-registry-progressive.ts +539 -0
- package/src/mcp/tools/_template.ts +174 -0
- package/src/mcp/tools/loader.ts +362 -0
- package/src/mcp/tools/system/search.ts +276 -0
- package/src/mcp/tools/system/status.ts +206 -0
- package/src/mcp/validation/schema-validator-2025.ts +294 -0
- package/docs/AGENTDB_V1.6.1_DEEP_REVIEW.md +0 -386
- package/docs/AGENT_FOLDER_STRUCTURE_FIX.md +0 -192
- package/docs/RECENT_RELEASES_SUMMARY.md +0 -375
- package/docs/V2.7.31_RELEASE_NOTES.md +0 -375
- /package/.claude/agents/analysis/{analyze-code-quality.md → code-review/analyze-code-quality.md} +0 -0
- /package/.claude/agents/architecture/{arch-system-design.md → system-design/arch-system-design.md} +0 -0
- /package/.claude/agents/data/{data-ml-model.md → ml/data-ml-model.md} +0 -0
- /package/.claude/agents/development/{dev-backend-api.md → backend/dev-backend-api.md} +0 -0
- /package/.claude/agents/devops/{ops-cicd-github.md → ci-cd/ops-cicd-github.md} +0 -0
- /package/.claude/agents/documentation/{docs-api-openapi.md → api-docs/docs-api-openapi.md} +0 -0
- /package/.claude/agents/specialized/{spec-mobile-react-native.md → mobile/spec-mobile-react-native.md} +0 -0
- /package/.claude/agents/testing/{tdd-london-swarm.md → unit/tdd-london-swarm.md} +0 -0
- /package/.claude/agents/testing/{production-validator.md → validation/production-validator.md} +0 -0
|
@@ -0,0 +1,1198 @@
|
|
|
1
|
+
# Agentic Flow & AgentDB MCP 2025 Integration
|
|
2
|
+
|
|
3
|
+
**Purpose**: Map MCP 2025 spec changes onto Agentic Flow and AgentDB with concrete interface updates
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Overview
|
|
8
|
+
|
|
9
|
+
The MCP 2025 spec introduces async operations, registry discovery, and code execution patterns. Here's how these map to our existing systems:
|
|
10
|
+
|
|
11
|
+
| MCP 2025 Feature | Agentic Flow Integration | AgentDB Integration |
|
|
12
|
+
|------------------|-------------------------|---------------------|
|
|
13
|
+
| **Async Operations** | Swarm task orchestration with job handles | Neural training as async jobs |
|
|
14
|
+
| **Registry Discovery** | Auto-publish swarm capabilities | Publish AgentDB as data backend |
|
|
15
|
+
| **Code Execution** | Execute agent logic in sandbox | Process vectors/embeddings locally |
|
|
16
|
+
| **Progressive Disclosure** | Lazy-load agent types | Lazy-load collection schemas |
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## Part 1: Agentic Flow Interface Updates
|
|
21
|
+
|
|
22
|
+
### Current Architecture
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
// Current: Agentic Flow Orchestrator
|
|
26
|
+
class AgenticFlowOrchestrator {
|
|
27
|
+
async spawnAgent(profile: AgentProfile): Promise<string>;
|
|
28
|
+
async assignTask(agentId: string, task: Task): Promise<void>;
|
|
29
|
+
async getAgentStatus(agentId: string): Promise<AgentStatus>;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Current: Swarm Coordinator
|
|
33
|
+
class SwarmCoordinator {
|
|
34
|
+
async initSwarm(topology: Topology): Promise<string>;
|
|
35
|
+
async scaleSwarm(swarmId: string, targetSize: number): Promise<void>;
|
|
36
|
+
async coordinateTask(swarmId: string, task: Task): Promise<Result>;
|
|
37
|
+
}
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### New: Async-First Interfaces
|
|
41
|
+
|
|
42
|
+
```typescript
|
|
43
|
+
/**
|
|
44
|
+
* Updated Orchestrator with async operations
|
|
45
|
+
*/
|
|
46
|
+
export class AgenticFlowOrchestrator {
|
|
47
|
+
private jobManager: JobManager;
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Spawn agent asynchronously
|
|
51
|
+
* Returns job handle immediately, agent initialization happens in background
|
|
52
|
+
*/
|
|
53
|
+
async spawnAgentAsync(profile: AgentProfile): Promise<JobHandle> {
|
|
54
|
+
return this.jobManager.submitJob(
|
|
55
|
+
'agent:spawn',
|
|
56
|
+
profile,
|
|
57
|
+
async (input, onProgress) => {
|
|
58
|
+
onProgress(0);
|
|
59
|
+
|
|
60
|
+
// Initialize agent
|
|
61
|
+
const agent = await this.initializeAgent(input);
|
|
62
|
+
onProgress(30);
|
|
63
|
+
|
|
64
|
+
// Load capabilities
|
|
65
|
+
await agent.loadCapabilities();
|
|
66
|
+
onProgress(60);
|
|
67
|
+
|
|
68
|
+
// Connect to memory store
|
|
69
|
+
await agent.connectMemory();
|
|
70
|
+
onProgress(80);
|
|
71
|
+
|
|
72
|
+
// Register with swarm
|
|
73
|
+
await this.registerAgent(agent);
|
|
74
|
+
onProgress(100);
|
|
75
|
+
|
|
76
|
+
return {
|
|
77
|
+
agentId: agent.id,
|
|
78
|
+
sessionId: agent.sessionId,
|
|
79
|
+
status: 'ready',
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Spawn multiple agents in parallel
|
|
87
|
+
* Returns single job handle for batch operation
|
|
88
|
+
*/
|
|
89
|
+
async spawnAgentSwarmAsync(
|
|
90
|
+
profiles: AgentProfile[]
|
|
91
|
+
): Promise<JobHandle> {
|
|
92
|
+
return this.jobManager.submitJob(
|
|
93
|
+
'agent:spawn-swarm',
|
|
94
|
+
{ profiles },
|
|
95
|
+
async (input, onProgress) => {
|
|
96
|
+
const total = input.profiles.length;
|
|
97
|
+
let completed = 0;
|
|
98
|
+
|
|
99
|
+
const agents = await Promise.all(
|
|
100
|
+
input.profiles.map(async (profile) => {
|
|
101
|
+
const agent = await this.initializeAgent(profile);
|
|
102
|
+
completed++;
|
|
103
|
+
onProgress((completed / total) * 100);
|
|
104
|
+
return agent;
|
|
105
|
+
})
|
|
106
|
+
);
|
|
107
|
+
|
|
108
|
+
return {
|
|
109
|
+
swarmId: generateId(),
|
|
110
|
+
agents: agents.map(a => ({ id: a.id, status: 'ready' })),
|
|
111
|
+
topology: this.detectTopology(agents),
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Execute swarm task asynchronously
|
|
119
|
+
* Supports long-running coordination workflows
|
|
120
|
+
*/
|
|
121
|
+
async executeSwarmTaskAsync(
|
|
122
|
+
swarmId: string,
|
|
123
|
+
task: Task
|
|
124
|
+
): Promise<JobHandle> {
|
|
125
|
+
return this.jobManager.submitJob(
|
|
126
|
+
'swarm:execute-task',
|
|
127
|
+
{ swarmId, task },
|
|
128
|
+
async (input, onProgress) => {
|
|
129
|
+
const swarm = await this.getSwarm(input.swarmId);
|
|
130
|
+
|
|
131
|
+
// Decompose task
|
|
132
|
+
const subtasks = await this.decomposeTask(input.task);
|
|
133
|
+
onProgress(10);
|
|
134
|
+
|
|
135
|
+
// Assign to agents
|
|
136
|
+
const assignments = await this.assignSubtasks(swarm, subtasks);
|
|
137
|
+
onProgress(20);
|
|
138
|
+
|
|
139
|
+
// Coordinate execution
|
|
140
|
+
let completed = 0;
|
|
141
|
+
const results = await Promise.all(
|
|
142
|
+
assignments.map(async (assignment) => {
|
|
143
|
+
const result = await this.executeAssignment(assignment);
|
|
144
|
+
completed++;
|
|
145
|
+
onProgress(20 + (completed / assignments.length) * 70);
|
|
146
|
+
return result;
|
|
147
|
+
})
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
// Synthesize results
|
|
151
|
+
const finalResult = await this.synthesizeResults(results);
|
|
152
|
+
onProgress(95);
|
|
153
|
+
|
|
154
|
+
// Update memory
|
|
155
|
+
await this.updateSwarmMemory(swarmId, finalResult);
|
|
156
|
+
onProgress(100);
|
|
157
|
+
|
|
158
|
+
return finalResult;
|
|
159
|
+
}
|
|
160
|
+
);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### New: Swarm Coordinator with Job Management
|
|
166
|
+
|
|
167
|
+
```typescript
|
|
168
|
+
/**
|
|
169
|
+
* Updated Swarm Coordinator with async support
|
|
170
|
+
*/
|
|
171
|
+
export class SwarmCoordinator {
|
|
172
|
+
private jobManager: JobManager;
|
|
173
|
+
private registryClient: RegistryClient;
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Initialize swarm with async topology optimization
|
|
177
|
+
*/
|
|
178
|
+
async initSwarmAsync(config: SwarmConfig): Promise<JobHandle> {
|
|
179
|
+
return this.jobManager.submitJob(
|
|
180
|
+
'swarm:init',
|
|
181
|
+
config,
|
|
182
|
+
async (input, onProgress) => {
|
|
183
|
+
onProgress(0);
|
|
184
|
+
|
|
185
|
+
// Analyze task complexity
|
|
186
|
+
const complexity = await this.analyzeComplexity(input);
|
|
187
|
+
onProgress(10);
|
|
188
|
+
|
|
189
|
+
// Select optimal topology
|
|
190
|
+
const topology = await this.selectTopology(complexity);
|
|
191
|
+
onProgress(30);
|
|
192
|
+
|
|
193
|
+
// Spawn agents
|
|
194
|
+
const agents = await this.spawnAgents(topology, input);
|
|
195
|
+
onProgress(60);
|
|
196
|
+
|
|
197
|
+
// Establish coordination channels
|
|
198
|
+
await this.setupCoordination(agents, topology);
|
|
199
|
+
onProgress(80);
|
|
200
|
+
|
|
201
|
+
// Initialize memory synchronization
|
|
202
|
+
await this.initMemorySync(agents);
|
|
203
|
+
onProgress(100);
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
swarmId: generateId(),
|
|
207
|
+
topology: topology.type,
|
|
208
|
+
agentCount: agents.length,
|
|
209
|
+
coordinationChannels: topology.channels,
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Auto-scale swarm based on workload
|
|
217
|
+
* Returns job handle for scaling operation
|
|
218
|
+
*/
|
|
219
|
+
async autoScaleAsync(
|
|
220
|
+
swarmId: string,
|
|
221
|
+
targetLoad: number
|
|
222
|
+
): Promise<JobHandle> {
|
|
223
|
+
return this.jobManager.submitJob(
|
|
224
|
+
'swarm:autoscale',
|
|
225
|
+
{ swarmId, targetLoad },
|
|
226
|
+
async (input, onProgress) => {
|
|
227
|
+
const swarm = await this.getSwarm(input.swarmId);
|
|
228
|
+
onProgress(10);
|
|
229
|
+
|
|
230
|
+
// Calculate optimal size
|
|
231
|
+
const optimalSize = await this.calculateOptimalSize(
|
|
232
|
+
swarm,
|
|
233
|
+
input.targetLoad
|
|
234
|
+
);
|
|
235
|
+
onProgress(30);
|
|
236
|
+
|
|
237
|
+
if (optimalSize > swarm.agents.length) {
|
|
238
|
+
// Scale up
|
|
239
|
+
const newAgents = await this.addAgents(
|
|
240
|
+
swarm,
|
|
241
|
+
optimalSize - swarm.agents.length
|
|
242
|
+
);
|
|
243
|
+
onProgress(70);
|
|
244
|
+
|
|
245
|
+
await this.integrateAgents(swarm, newAgents);
|
|
246
|
+
} else if (optimalSize < swarm.agents.length) {
|
|
247
|
+
// Scale down
|
|
248
|
+
const toRemove = swarm.agents.slice(optimalSize);
|
|
249
|
+
await this.removeAgents(swarm, toRemove);
|
|
250
|
+
onProgress(70);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
onProgress(100);
|
|
254
|
+
|
|
255
|
+
return {
|
|
256
|
+
swarmId: input.swarmId,
|
|
257
|
+
previousSize: swarm.agents.length,
|
|
258
|
+
newSize: optimalSize,
|
|
259
|
+
scalingAction: optimalSize > swarm.agents.length ? 'up' : 'down',
|
|
260
|
+
};
|
|
261
|
+
}
|
|
262
|
+
);
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
/**
|
|
266
|
+
* Register swarm capabilities in MCP Registry
|
|
267
|
+
*/
|
|
268
|
+
async registerInRegistry(): Promise<void> {
|
|
269
|
+
const metadata: ServerMetadata = {
|
|
270
|
+
name: 'agentic-flow-swarm',
|
|
271
|
+
version: this.version,
|
|
272
|
+
description: 'Multi-agent swarm coordination with adaptive topologies',
|
|
273
|
+
author: 'ruvnet',
|
|
274
|
+
capabilities: {
|
|
275
|
+
tools: true,
|
|
276
|
+
resources: true,
|
|
277
|
+
prompts: false,
|
|
278
|
+
async: true,
|
|
279
|
+
streaming: true,
|
|
280
|
+
},
|
|
281
|
+
categories: ['orchestration', 'swarm', 'coordination'],
|
|
282
|
+
tags: [
|
|
283
|
+
'swarm',
|
|
284
|
+
'multi-agent',
|
|
285
|
+
'coordination',
|
|
286
|
+
'adaptive-topology',
|
|
287
|
+
'load-balancing',
|
|
288
|
+
],
|
|
289
|
+
transport: ['stdio', 'http'],
|
|
290
|
+
security: {
|
|
291
|
+
authRequired: true,
|
|
292
|
+
authMethods: ['token', 'oauth'],
|
|
293
|
+
piiHandling: 'none',
|
|
294
|
+
},
|
|
295
|
+
};
|
|
296
|
+
|
|
297
|
+
await this.registryClient.publishServer(metadata);
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
---
|
|
303
|
+
|
|
304
|
+
## Part 2: AgentDB Interface Updates
|
|
305
|
+
|
|
306
|
+
### Current Architecture
|
|
307
|
+
|
|
308
|
+
```typescript
|
|
309
|
+
// Current: AgentDB Client
|
|
310
|
+
class AgentDB {
|
|
311
|
+
async createCollection(name: string, schema: Schema): Promise<void>;
|
|
312
|
+
async insert(collection: string, vectors: Vector[]): Promise<void>;
|
|
313
|
+
async search(collection: string, query: Vector, k: number): Promise<Result[]>;
|
|
314
|
+
}
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
### New: Async-First AgentDB
|
|
318
|
+
|
|
319
|
+
```typescript
|
|
320
|
+
/**
|
|
321
|
+
* Updated AgentDB with async operations and code execution pattern
|
|
322
|
+
*/
|
|
323
|
+
export class AgentDB {
|
|
324
|
+
private jobManager: JobManager;
|
|
325
|
+
private registryClient: RegistryClient;
|
|
326
|
+
|
|
327
|
+
/**
|
|
328
|
+
* Index large document corpus asynchronously
|
|
329
|
+
* Processing happens in execution environment, only metadata returned
|
|
330
|
+
*/
|
|
331
|
+
async indexCorpusAsync(
|
|
332
|
+
collection: string,
|
|
333
|
+
documents: AsyncIterable<Document>
|
|
334
|
+
): Promise<JobHandle> {
|
|
335
|
+
return this.jobManager.submitJob(
|
|
336
|
+
'agentdb:index-corpus',
|
|
337
|
+
{ collection, documents },
|
|
338
|
+
async (input, onProgress) => {
|
|
339
|
+
let processed = 0;
|
|
340
|
+
let totalVectors = 0;
|
|
341
|
+
|
|
342
|
+
onProgress(0);
|
|
343
|
+
|
|
344
|
+
// Process documents in batches (execution environment)
|
|
345
|
+
for await (const batch of this.batchDocuments(input.documents, 100)) {
|
|
346
|
+
// Generate embeddings locally (not in model context!)
|
|
347
|
+
const vectors = await this.generateEmbeddings(batch);
|
|
348
|
+
|
|
349
|
+
// Insert to AgentDB
|
|
350
|
+
await this.insert(input.collection, vectors);
|
|
351
|
+
|
|
352
|
+
processed += batch.length;
|
|
353
|
+
totalVectors += vectors.length;
|
|
354
|
+
onProgress(Math.min(95, (processed / 10000) * 100)); // Estimate progress
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// Build HNSW index
|
|
358
|
+
await this.buildIndex(input.collection);
|
|
359
|
+
onProgress(100);
|
|
360
|
+
|
|
361
|
+
return {
|
|
362
|
+
collection: input.collection,
|
|
363
|
+
documentsProcessed: processed,
|
|
364
|
+
vectorsIndexed: totalVectors,
|
|
365
|
+
indexType: 'hnsw',
|
|
366
|
+
indexingTime: Date.now(),
|
|
367
|
+
};
|
|
368
|
+
}
|
|
369
|
+
);
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Train neural model asynchronously
|
|
374
|
+
* Training happens in execution environment with progress updates
|
|
375
|
+
*/
|
|
376
|
+
async trainModelAsync(
|
|
377
|
+
modelName: string,
|
|
378
|
+
trainingData: TrainingDataset,
|
|
379
|
+
config: TrainingConfig
|
|
380
|
+
): Promise<JobHandle> {
|
|
381
|
+
return this.jobManager.submitJob(
|
|
382
|
+
'agentdb:train-model',
|
|
383
|
+
{ modelName, trainingData, config },
|
|
384
|
+
async (input, onProgress) => {
|
|
385
|
+
const { epochs, batchSize } = input.config;
|
|
386
|
+
let currentEpoch = 0;
|
|
387
|
+
|
|
388
|
+
// Initialize model
|
|
389
|
+
const model = await this.initModel(input.modelName, input.config);
|
|
390
|
+
onProgress(5);
|
|
391
|
+
|
|
392
|
+
// Training loop (in execution environment)
|
|
393
|
+
for (let epoch = 0; epoch < epochs; epoch++) {
|
|
394
|
+
currentEpoch = epoch + 1;
|
|
395
|
+
|
|
396
|
+
// Train one epoch
|
|
397
|
+
const metrics = await model.trainEpoch(
|
|
398
|
+
input.trainingData,
|
|
399
|
+
batchSize
|
|
400
|
+
);
|
|
401
|
+
|
|
402
|
+
// Store checkpoint
|
|
403
|
+
await this.saveCheckpoint(model, epoch, metrics);
|
|
404
|
+
|
|
405
|
+
// Report progress
|
|
406
|
+
onProgress(5 + ((currentEpoch / epochs) * 90));
|
|
407
|
+
|
|
408
|
+
// Log metrics (not sent to model context)
|
|
409
|
+
await this.logMetrics(input.modelName, epoch, metrics);
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
// Final evaluation
|
|
413
|
+
const finalMetrics = await model.evaluate();
|
|
414
|
+
onProgress(98);
|
|
415
|
+
|
|
416
|
+
// Save final model
|
|
417
|
+
await this.saveModel(model, input.modelName);
|
|
418
|
+
onProgress(100);
|
|
419
|
+
|
|
420
|
+
return {
|
|
421
|
+
modelName: input.modelName,
|
|
422
|
+
epochs: currentEpoch,
|
|
423
|
+
finalLoss: finalMetrics.loss,
|
|
424
|
+
finalAccuracy: finalMetrics.accuracy,
|
|
425
|
+
modelPath: `models/${input.modelName}`,
|
|
426
|
+
checkpoints: epochs,
|
|
427
|
+
};
|
|
428
|
+
}
|
|
429
|
+
);
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
* Semantic search with data processing in execution environment
|
|
434
|
+
* Returns only top-k results, not full vector space
|
|
435
|
+
*/
|
|
436
|
+
async semanticSearchAsync(
|
|
437
|
+
collection: string,
|
|
438
|
+
query: string,
|
|
439
|
+
options: SearchOptions
|
|
440
|
+
): Promise<JobHandle> {
|
|
441
|
+
return this.jobManager.submitJob(
|
|
442
|
+
'agentdb:semantic-search',
|
|
443
|
+
{ collection, query, options },
|
|
444
|
+
async (input, onProgress) => {
|
|
445
|
+
onProgress(10);
|
|
446
|
+
|
|
447
|
+
// Generate query embedding (execution environment)
|
|
448
|
+
const queryVector = await this.generateEmbedding(input.query);
|
|
449
|
+
onProgress(30);
|
|
450
|
+
|
|
451
|
+
// Search AgentDB (local processing)
|
|
452
|
+
const results = await this.vectorSearch(
|
|
453
|
+
input.collection,
|
|
454
|
+
queryVector,
|
|
455
|
+
input.options.k || 10
|
|
456
|
+
);
|
|
457
|
+
onProgress(70);
|
|
458
|
+
|
|
459
|
+
// Apply filters in execution environment
|
|
460
|
+
const filtered = await this.applyFilters(results, input.options.filters);
|
|
461
|
+
onProgress(85);
|
|
462
|
+
|
|
463
|
+
// Rerank if requested
|
|
464
|
+
const reranked = input.options.rerank
|
|
465
|
+
? await this.rerank(filtered, input.query)
|
|
466
|
+
: filtered;
|
|
467
|
+
onProgress(95);
|
|
468
|
+
|
|
469
|
+
// Return only top results (not full dataset)
|
|
470
|
+
const topResults = reranked.slice(0, input.options.k || 10);
|
|
471
|
+
onProgress(100);
|
|
472
|
+
|
|
473
|
+
return {
|
|
474
|
+
query: input.query,
|
|
475
|
+
collection: input.collection,
|
|
476
|
+
results: topResults.map(r => ({
|
|
477
|
+
id: r.id,
|
|
478
|
+
score: r.score,
|
|
479
|
+
metadata: r.metadata,
|
|
480
|
+
// Content returned only if explicitly requested
|
|
481
|
+
content: input.options.includeContent ? r.content : undefined,
|
|
482
|
+
})),
|
|
483
|
+
totalMatches: results.length,
|
|
484
|
+
processingTime: Date.now(),
|
|
485
|
+
};
|
|
486
|
+
}
|
|
487
|
+
);
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
/**
|
|
491
|
+
* Register AgentDB in MCP Registry
|
|
492
|
+
*/
|
|
493
|
+
async registerInRegistry(): Promise<void> {
|
|
494
|
+
const metadata: ServerMetadata = {
|
|
495
|
+
name: 'agentdb',
|
|
496
|
+
version: this.version,
|
|
497
|
+
description: 'High-performance vector database with 150x faster search and 9 RL algorithms',
|
|
498
|
+
author: 'ruvnet',
|
|
499
|
+
capabilities: {
|
|
500
|
+
tools: true,
|
|
501
|
+
resources: true,
|
|
502
|
+
prompts: false,
|
|
503
|
+
async: true,
|
|
504
|
+
streaming: false,
|
|
505
|
+
},
|
|
506
|
+
categories: ['database', 'vector-search', 'machine-learning'],
|
|
507
|
+
tags: [
|
|
508
|
+
'vector-database',
|
|
509
|
+
'semantic-search',
|
|
510
|
+
'embeddings',
|
|
511
|
+
'hnsw',
|
|
512
|
+
'neural-training',
|
|
513
|
+
'reinforcement-learning',
|
|
514
|
+
],
|
|
515
|
+
transport: ['stdio', 'http'],
|
|
516
|
+
security: {
|
|
517
|
+
authRequired: true,
|
|
518
|
+
authMethods: ['token'],
|
|
519
|
+
piiHandling: 'encrypted',
|
|
520
|
+
},
|
|
521
|
+
};
|
|
522
|
+
|
|
523
|
+
await this.registryClient.publishServer(metadata);
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
```
|
|
527
|
+
|
|
528
|
+
### New: ReasoningBank with Async Operations
|
|
529
|
+
|
|
530
|
+
```typescript
|
|
531
|
+
/**
|
|
532
|
+
* ReasoningBank with async trajectory processing
|
|
533
|
+
*/
|
|
534
|
+
export class ReasoningBank {
|
|
535
|
+
private agentDB: AgentDB;
|
|
536
|
+
private jobManager: JobManager;
|
|
537
|
+
|
|
538
|
+
/**
|
|
539
|
+
* Process and learn from trajectory asynchronously
|
|
540
|
+
* Heavy distillation happens in execution environment
|
|
541
|
+
*/
|
|
542
|
+
async processTrajectoryAsync(
|
|
543
|
+
trajectory: Trajectory
|
|
544
|
+
): Promise<JobHandle> {
|
|
545
|
+
return this.jobManager.submitJob(
|
|
546
|
+
'reasoningbank:process-trajectory',
|
|
547
|
+
trajectory,
|
|
548
|
+
async (input, onProgress) => {
|
|
549
|
+
onProgress(0);
|
|
550
|
+
|
|
551
|
+
// Extract action-outcome pairs
|
|
552
|
+
const pairs = await this.extractPairs(input);
|
|
553
|
+
onProgress(20);
|
|
554
|
+
|
|
555
|
+
// Generate embeddings (execution environment)
|
|
556
|
+
const embeddings = await this.generateTrajectoryEmbeddings(pairs);
|
|
557
|
+
onProgress(40);
|
|
558
|
+
|
|
559
|
+
// Store in AgentDB
|
|
560
|
+
await this.agentDB.insert('trajectories', embeddings);
|
|
561
|
+
onProgress(60);
|
|
562
|
+
|
|
563
|
+
// Identify successful patterns
|
|
564
|
+
const patterns = await this.identifyPatterns(pairs);
|
|
565
|
+
onProgress(80);
|
|
566
|
+
|
|
567
|
+
// Distill to memory
|
|
568
|
+
const distilled = await this.distillMemory(patterns);
|
|
569
|
+
await this.agentDB.insert('distilled-memory', distilled);
|
|
570
|
+
onProgress(100);
|
|
571
|
+
|
|
572
|
+
return {
|
|
573
|
+
trajectoryId: input.id,
|
|
574
|
+
pairsExtracted: pairs.length,
|
|
575
|
+
patternsIdentified: patterns.length,
|
|
576
|
+
memoryDistilled: distilled.length,
|
|
577
|
+
verdict: await this.judgeVerdict(input),
|
|
578
|
+
};
|
|
579
|
+
}
|
|
580
|
+
);
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
/**
|
|
584
|
+
* Query reasoning patterns asynchronously
|
|
585
|
+
* Returns only relevant patterns, not full memory
|
|
586
|
+
*/
|
|
587
|
+
async queryPatternsAsync(
|
|
588
|
+
query: string,
|
|
589
|
+
context: ExecutionContext
|
|
590
|
+
): Promise<JobHandle> {
|
|
591
|
+
return this.jobManager.submitJob(
|
|
592
|
+
'reasoningbank:query-patterns',
|
|
593
|
+
{ query, context },
|
|
594
|
+
async (input, onProgress) => {
|
|
595
|
+
onProgress(10);
|
|
596
|
+
|
|
597
|
+
// Generate query embedding
|
|
598
|
+
const queryVector = await this.generateEmbedding(input.query);
|
|
599
|
+
onProgress(30);
|
|
600
|
+
|
|
601
|
+
// Search distilled memory
|
|
602
|
+
const patterns = await this.agentDB.search(
|
|
603
|
+
'distilled-memory',
|
|
604
|
+
queryVector,
|
|
605
|
+
10
|
|
606
|
+
);
|
|
607
|
+
onProgress(60);
|
|
608
|
+
|
|
609
|
+
// Filter by context relevance
|
|
610
|
+
const relevant = await this.filterByContext(patterns, input.context);
|
|
611
|
+
onProgress(80);
|
|
612
|
+
|
|
613
|
+
// Rank by historical success
|
|
614
|
+
const ranked = await this.rankBySuccess(relevant);
|
|
615
|
+
onProgress(100);
|
|
616
|
+
|
|
617
|
+
return {
|
|
618
|
+
query: input.query,
|
|
619
|
+
patterns: ranked.map(p => ({
|
|
620
|
+
id: p.id,
|
|
621
|
+
description: p.metadata.description,
|
|
622
|
+
successRate: p.metadata.successRate,
|
|
623
|
+
usageCount: p.metadata.usageCount,
|
|
624
|
+
// Full pattern details only if explicitly requested
|
|
625
|
+
})),
|
|
626
|
+
totalMatches: patterns.length,
|
|
627
|
+
};
|
|
628
|
+
}
|
|
629
|
+
);
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
```
|
|
633
|
+
|
|
634
|
+
---
|
|
635
|
+
|
|
636
|
+
## Part 3: MCP Tool Definitions
|
|
637
|
+
|
|
638
|
+
### New Tools for Agentic Flow
|
|
639
|
+
|
|
640
|
+
```typescript
|
|
641
|
+
// agentic-flow-tools.ts
|
|
642
|
+
|
|
643
|
+
export const agenticFlowTools: MCPTool[] = [
|
|
644
|
+
// Async agent spawning
|
|
645
|
+
{
|
|
646
|
+
name: 'agentic-flow/spawn-agent:async',
|
|
647
|
+
description: 'Spawn agent asynchronously with progress tracking',
|
|
648
|
+
inputSchema: {
|
|
649
|
+
type: 'object',
|
|
650
|
+
properties: {
|
|
651
|
+
profile: { type: 'object' },
|
|
652
|
+
_async: { type: 'object' },
|
|
653
|
+
},
|
|
654
|
+
},
|
|
655
|
+
handler: async (input, context) => {
|
|
656
|
+
return await context.orchestrator.spawnAgentAsync(input.profile);
|
|
657
|
+
},
|
|
658
|
+
},
|
|
659
|
+
|
|
660
|
+
// Async swarm initialization
|
|
661
|
+
{
|
|
662
|
+
name: 'agentic-flow/init-swarm:async',
|
|
663
|
+
description: 'Initialize swarm with adaptive topology selection',
|
|
664
|
+
inputSchema: {
|
|
665
|
+
type: 'object',
|
|
666
|
+
properties: {
|
|
667
|
+
config: { type: 'object' },
|
|
668
|
+
_async: { type: 'object' },
|
|
669
|
+
},
|
|
670
|
+
},
|
|
671
|
+
handler: async (input, context) => {
|
|
672
|
+
return await context.swarmCoordinator.initSwarmAsync(input.config);
|
|
673
|
+
},
|
|
674
|
+
},
|
|
675
|
+
|
|
676
|
+
// Async task execution
|
|
677
|
+
{
|
|
678
|
+
name: 'agentic-flow/execute-task:async',
|
|
679
|
+
description: 'Execute swarm task with coordination and progress tracking',
|
|
680
|
+
inputSchema: {
|
|
681
|
+
type: 'object',
|
|
682
|
+
properties: {
|
|
683
|
+
swarmId: { type: 'string' },
|
|
684
|
+
task: { type: 'object' },
|
|
685
|
+
_async: { type: 'object' },
|
|
686
|
+
},
|
|
687
|
+
},
|
|
688
|
+
handler: async (input, context) => {
|
|
689
|
+
return await context.orchestrator.executeSwarmTaskAsync(
|
|
690
|
+
input.swarmId,
|
|
691
|
+
input.task
|
|
692
|
+
);
|
|
693
|
+
},
|
|
694
|
+
},
|
|
695
|
+
];
|
|
696
|
+
```
|
|
697
|
+
|
|
698
|
+
### New Tools for AgentDB
|
|
699
|
+
|
|
700
|
+
```typescript
|
|
701
|
+
// agentdb-tools.ts
|
|
702
|
+
|
|
703
|
+
export const agentDBTools: MCPTool[] = [
|
|
704
|
+
// Async corpus indexing
|
|
705
|
+
{
|
|
706
|
+
name: 'agentdb/index-corpus:async',
|
|
707
|
+
description: 'Index large document corpus with progress tracking',
|
|
708
|
+
inputSchema: {
|
|
709
|
+
type: 'object',
|
|
710
|
+
properties: {
|
|
711
|
+
collection: { type: 'string' },
|
|
712
|
+
documents: { type: 'array' },
|
|
713
|
+
_async: { type: 'object' },
|
|
714
|
+
},
|
|
715
|
+
},
|
|
716
|
+
handler: async (input, context) => {
|
|
717
|
+
const docs = async function* () {
|
|
718
|
+
for (const doc of input.documents) {
|
|
719
|
+
yield doc;
|
|
720
|
+
}
|
|
721
|
+
};
|
|
722
|
+
return await context.agentDB.indexCorpusAsync(input.collection, docs());
|
|
723
|
+
},
|
|
724
|
+
},
|
|
725
|
+
|
|
726
|
+
// Async model training
|
|
727
|
+
{
|
|
728
|
+
name: 'agentdb/train-model:async',
|
|
729
|
+
description: 'Train neural model with reinforcement learning algorithms',
|
|
730
|
+
inputSchema: {
|
|
731
|
+
type: 'object',
|
|
732
|
+
properties: {
|
|
733
|
+
modelName: { type: 'string' },
|
|
734
|
+
algorithm: {
|
|
735
|
+
type: 'string',
|
|
736
|
+
enum: [
|
|
737
|
+
'decision-transformer',
|
|
738
|
+
'q-learning',
|
|
739
|
+
'sarsa',
|
|
740
|
+
'actor-critic',
|
|
741
|
+
'ppo',
|
|
742
|
+
'dqn',
|
|
743
|
+
'a2c',
|
|
744
|
+
'ddpg',
|
|
745
|
+
'sac',
|
|
746
|
+
],
|
|
747
|
+
},
|
|
748
|
+
trainingData: { type: 'object' },
|
|
749
|
+
config: { type: 'object' },
|
|
750
|
+
_async: { type: 'object' },
|
|
751
|
+
},
|
|
752
|
+
},
|
|
753
|
+
handler: async (input, context) => {
|
|
754
|
+
return await context.agentDB.trainModelAsync(
|
|
755
|
+
input.modelName,
|
|
756
|
+
input.trainingData,
|
|
757
|
+
input.config
|
|
758
|
+
);
|
|
759
|
+
},
|
|
760
|
+
},
|
|
761
|
+
|
|
762
|
+
// Async semantic search
|
|
763
|
+
{
|
|
764
|
+
name: 'agentdb/semantic-search:async',
|
|
765
|
+
description: 'Semantic search with HNSW index (150x faster)',
|
|
766
|
+
inputSchema: {
|
|
767
|
+
type: 'object',
|
|
768
|
+
properties: {
|
|
769
|
+
collection: { type: 'string' },
|
|
770
|
+
query: { type: 'string' },
|
|
771
|
+
options: {
|
|
772
|
+
type: 'object',
|
|
773
|
+
properties: {
|
|
774
|
+
k: { type: 'number', default: 10 },
|
|
775
|
+
filters: { type: 'object' },
|
|
776
|
+
rerank: { type: 'boolean', default: false },
|
|
777
|
+
includeContent: { type: 'boolean', default: false },
|
|
778
|
+
},
|
|
779
|
+
},
|
|
780
|
+
_async: { type: 'object' },
|
|
781
|
+
},
|
|
782
|
+
},
|
|
783
|
+
handler: async (input, context) => {
|
|
784
|
+
return await context.agentDB.semanticSearchAsync(
|
|
785
|
+
input.collection,
|
|
786
|
+
input.query,
|
|
787
|
+
input.options
|
|
788
|
+
);
|
|
789
|
+
},
|
|
790
|
+
},
|
|
791
|
+
];
|
|
792
|
+
```
|
|
793
|
+
|
|
794
|
+
### New Tools for ReasoningBank
|
|
795
|
+
|
|
796
|
+
```typescript
|
|
797
|
+
// reasoningbank-tools.ts
|
|
798
|
+
|
|
799
|
+
export const reasoningBankTools: MCPTool[] = [
|
|
800
|
+
// Async trajectory processing
|
|
801
|
+
{
|
|
802
|
+
name: 'reasoningbank/process-trajectory:async',
|
|
803
|
+
description: 'Process trajectory and extract learning patterns',
|
|
804
|
+
inputSchema: {
|
|
805
|
+
type: 'object',
|
|
806
|
+
properties: {
|
|
807
|
+
trajectory: { type: 'object' },
|
|
808
|
+
_async: { type: 'object' },
|
|
809
|
+
},
|
|
810
|
+
},
|
|
811
|
+
handler: async (input, context) => {
|
|
812
|
+
return await context.reasoningBank.processTrajectoryAsync(
|
|
813
|
+
input.trajectory
|
|
814
|
+
);
|
|
815
|
+
},
|
|
816
|
+
},
|
|
817
|
+
|
|
818
|
+
// Async pattern query
|
|
819
|
+
{
|
|
820
|
+
name: 'reasoningbank/query-patterns:async',
|
|
821
|
+
description: 'Query reasoning patterns from distilled memory',
|
|
822
|
+
inputSchema: {
|
|
823
|
+
type: 'object',
|
|
824
|
+
properties: {
|
|
825
|
+
query: { type: 'string' },
|
|
826
|
+
context: { type: 'object' },
|
|
827
|
+
_async: { type: 'object' },
|
|
828
|
+
},
|
|
829
|
+
},
|
|
830
|
+
handler: async (input, context) => {
|
|
831
|
+
return await context.reasoningBank.queryPatternsAsync(
|
|
832
|
+
input.query,
|
|
833
|
+
input.context
|
|
834
|
+
);
|
|
835
|
+
},
|
|
836
|
+
},
|
|
837
|
+
];
|
|
838
|
+
```
|
|
839
|
+
|
|
840
|
+
---
|
|
841
|
+
|
|
842
|
+
## Part 4: End-to-End Test Plan
|
|
843
|
+
|
|
844
|
+
### Test Suite Structure
|
|
845
|
+
|
|
846
|
+
```typescript
|
|
847
|
+
// tests/e2e/async-workflow.test.ts
|
|
848
|
+
|
|
849
|
+
describe('Async Workflow - End-to-End', () => {
|
|
850
|
+
describe('Scenario 1: Large-Scale Document Processing', () => {
|
|
851
|
+
it('should index 10,000 documents with 98% token reduction', async () => {
|
|
852
|
+
// 1. Submit indexing job
|
|
853
|
+
const jobHandle = await mcpClient.callTool('agentdb/index-corpus:async', {
|
|
854
|
+
collection: 'research-papers',
|
|
855
|
+
documents: largePaperDataset, // 10,000 papers
|
|
856
|
+
_async: { mode: 'poll' },
|
|
857
|
+
});
|
|
858
|
+
|
|
859
|
+
expect(jobHandle.jobHandle).toHaveProperty('jobId');
|
|
860
|
+
|
|
861
|
+
// 2. Poll for progress
|
|
862
|
+
let status = await mcpClient.callTool('jobs/status', {
|
|
863
|
+
jobId: jobHandle.jobHandle.jobId,
|
|
864
|
+
});
|
|
865
|
+
|
|
866
|
+
while (status.job.status === 'running') {
|
|
867
|
+
await sleep(1000);
|
|
868
|
+
status = await mcpClient.callTool('jobs/status', {
|
|
869
|
+
jobId: jobHandle.jobHandle.jobId,
|
|
870
|
+
});
|
|
871
|
+
|
|
872
|
+
console.log(`Progress: ${status.job.progress}%`);
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
// 3. Verify completion
|
|
876
|
+
expect(status.job.status).toBe('completed');
|
|
877
|
+
expect(status.job.result.documentsProcessed).toBe(10000);
|
|
878
|
+
|
|
879
|
+
// 4. Verify token reduction
|
|
880
|
+
const contextSize = JSON.stringify(jobHandle).length;
|
|
881
|
+
expect(contextSize).toBeLessThan(2000); // < 2KB in context
|
|
882
|
+
|
|
883
|
+
// Original dataset would be ~10MB in context
|
|
884
|
+
// Achievement: 99.98% reduction
|
|
885
|
+
});
|
|
886
|
+
});
|
|
887
|
+
|
|
888
|
+
describe('Scenario 2: Multi-Agent Swarm Coordination', () => {
|
|
889
|
+
it('should spawn 20 agents and coordinate complex task', async () => {
|
|
890
|
+
// 1. Initialize swarm asynchronously
|
|
891
|
+
const swarmJob = await mcpClient.callTool('agentic-flow/init-swarm:async', {
|
|
892
|
+
config: {
|
|
893
|
+
agentCount: 20,
|
|
894
|
+
topology: 'adaptive',
|
|
895
|
+
taskComplexity: 'high',
|
|
896
|
+
},
|
|
897
|
+
_async: { mode: 'poll' },
|
|
898
|
+
});
|
|
899
|
+
|
|
900
|
+
// 2. Poll for swarm initialization
|
|
901
|
+
let swarmStatus = await mcpClient.callTool('jobs/status', {
|
|
902
|
+
jobId: swarmJob.jobHandle.jobId,
|
|
903
|
+
});
|
|
904
|
+
|
|
905
|
+
while (swarmStatus.job.status === 'running') {
|
|
906
|
+
await sleep(2000);
|
|
907
|
+
swarmStatus = await mcpClient.callTool('jobs/status', {
|
|
908
|
+
jobId: swarmJob.jobHandle.jobId,
|
|
909
|
+
});
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
expect(swarmStatus.job.status).toBe('completed');
|
|
913
|
+
const swarmId = swarmStatus.job.result.swarmId;
|
|
914
|
+
|
|
915
|
+
// 3. Execute task on swarm
|
|
916
|
+
const taskJob = await mcpClient.callTool('agentic-flow/execute-task:async', {
|
|
917
|
+
swarmId,
|
|
918
|
+
task: {
|
|
919
|
+
type: 'research-and-implement',
|
|
920
|
+
description: 'Research and implement new feature',
|
|
921
|
+
requirements: [...complexRequirements],
|
|
922
|
+
},
|
|
923
|
+
_async: { mode: 'poll' },
|
|
924
|
+
});
|
|
925
|
+
|
|
926
|
+
// 4. Monitor task execution
|
|
927
|
+
let taskStatus = await mcpClient.callTool('jobs/status', {
|
|
928
|
+
jobId: taskJob.jobHandle.jobId,
|
|
929
|
+
});
|
|
930
|
+
|
|
931
|
+
const startTime = Date.now();
|
|
932
|
+
while (
|
|
933
|
+
taskStatus.job.status === 'running' &&
|
|
934
|
+
Date.now() - startTime < 600000 // 10 minute timeout
|
|
935
|
+
) {
|
|
936
|
+
await sleep(5000);
|
|
937
|
+
taskStatus = await mcpClient.callTool('jobs/status', {
|
|
938
|
+
jobId: taskJob.jobHandle.jobId,
|
|
939
|
+
});
|
|
940
|
+
|
|
941
|
+
console.log(`Task progress: ${taskStatus.job.progress}%`);
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
// 5. Verify completion
|
|
945
|
+
expect(taskStatus.job.status).toBe('completed');
|
|
946
|
+
expect(taskStatus.job.result.subtasksCompleted).toBeGreaterThan(0);
|
|
947
|
+
});
|
|
948
|
+
});
|
|
949
|
+
|
|
950
|
+
describe('Scenario 3: Neural Model Training with ReasoningBank', () => {
|
|
951
|
+
it('should train model on trajectories and improve over time', async () => {
|
|
952
|
+
// 1. Process initial trajectories
|
|
953
|
+
const trajectories = generateTestTrajectories(1000);
|
|
954
|
+
|
|
955
|
+
const processingJobs = await Promise.all(
|
|
956
|
+
trajectories.map(t =>
|
|
957
|
+
mcpClient.callTool('reasoningbank/process-trajectory:async', {
|
|
958
|
+
trajectory: t,
|
|
959
|
+
_async: { mode: 'fire-and-forget' },
|
|
960
|
+
})
|
|
961
|
+
)
|
|
962
|
+
);
|
|
963
|
+
|
|
964
|
+
// 2. Start model training
|
|
965
|
+
const trainingJob = await mcpClient.callTool('agentdb/train-model:async', {
|
|
966
|
+
modelName: 'reasoning-agent-v1',
|
|
967
|
+
algorithm: 'decision-transformer',
|
|
968
|
+
trainingData: {
|
|
969
|
+
source: 'reasoningbank',
|
|
970
|
+
collection: 'trajectories',
|
|
971
|
+
},
|
|
972
|
+
config: {
|
|
973
|
+
epochs: 50,
|
|
974
|
+
batchSize: 32,
|
|
975
|
+
learningRate: 0.001,
|
|
976
|
+
},
|
|
977
|
+
_async: { mode: 'poll' },
|
|
978
|
+
});
|
|
979
|
+
|
|
980
|
+
// 3. Monitor training progress
|
|
981
|
+
let trainingStatus = await mcpClient.callTool('jobs/status', {
|
|
982
|
+
jobId: trainingJob.jobHandle.jobId,
|
|
983
|
+
});
|
|
984
|
+
|
|
985
|
+
while (trainingStatus.job.status === 'running') {
|
|
986
|
+
await sleep(10000); // Poll every 10 seconds
|
|
987
|
+
trainingStatus = await mcpClient.callTool('jobs/status', {
|
|
988
|
+
jobId: trainingJob.jobHandle.jobId,
|
|
989
|
+
});
|
|
990
|
+
|
|
991
|
+
if (trainingStatus.job.progress) {
|
|
992
|
+
console.log(`Training progress: ${trainingStatus.job.progress}%`);
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
|
|
996
|
+
// 4. Verify training completed
|
|
997
|
+
expect(trainingStatus.job.status).toBe('completed');
|
|
998
|
+
expect(trainingStatus.job.result.epochs).toBe(50);
|
|
999
|
+
expect(trainingStatus.job.result.finalAccuracy).toBeGreaterThan(0.8);
|
|
1000
|
+
|
|
1001
|
+
// 5. Query learned patterns
|
|
1002
|
+
const patternJob = await mcpClient.callTool('reasoningbank/query-patterns:async', {
|
|
1003
|
+
query: 'How to handle API rate limiting?',
|
|
1004
|
+
context: { domain: 'backend-development' },
|
|
1005
|
+
_async: { mode: 'wait', timeout: 30000 },
|
|
1006
|
+
});
|
|
1007
|
+
|
|
1008
|
+
expect(patternJob.completed).toBe(true);
|
|
1009
|
+
expect(patternJob.result.patterns.length).toBeGreaterThan(0);
|
|
1010
|
+
});
|
|
1011
|
+
});
|
|
1012
|
+
|
|
1013
|
+
describe('Scenario 4: Token Reduction Validation', () => {
|
|
1014
|
+
it('should achieve 98%+ token reduction across all operations', async () => {
|
|
1015
|
+
// Measure token usage for old pattern (sync, full data in context)
|
|
1016
|
+
const oldPattern = {
|
|
1017
|
+
documents: largePaperDataset, // 10MB
|
|
1018
|
+
agents: [...20agentProfiles], // 500KB
|
|
1019
|
+
trainingData: trajectories, // 2MB
|
|
1020
|
+
};
|
|
1021
|
+
|
|
1022
|
+
const oldTokens = estimateTokens(JSON.stringify(oldPattern));
|
|
1023
|
+
expect(oldTokens).toBeGreaterThan(150000);
|
|
1024
|
+
|
|
1025
|
+
// Measure token usage for new pattern (async, job handles only)
|
|
1026
|
+
const newPattern = {
|
|
1027
|
+
indexJob: { jobHandle: { jobId: 'uuid', pollUrl: '/jobs/uuid' } },
|
|
1028
|
+
swarmJob: { jobHandle: { jobId: 'uuid2', pollUrl: '/jobs/uuid2' } },
|
|
1029
|
+
trainingJob: { jobHandle: { jobId: 'uuid3', pollUrl: '/jobs/uuid3' } },
|
|
1030
|
+
};
|
|
1031
|
+
|
|
1032
|
+
const newTokens = estimateTokens(JSON.stringify(newPattern));
|
|
1033
|
+
expect(newTokens).toBeLessThan(2000);
|
|
1034
|
+
|
|
1035
|
+
// Verify reduction
|
|
1036
|
+
const reduction = ((oldTokens - newTokens) / oldTokens) * 100;
|
|
1037
|
+
expect(reduction).toBeGreaterThan(98);
|
|
1038
|
+
|
|
1039
|
+
console.log(`Token reduction: ${reduction.toFixed(2)}%`);
|
|
1040
|
+
console.log(`Old: ${oldTokens} tokens, New: ${newTokens} tokens`);
|
|
1041
|
+
});
|
|
1042
|
+
});
|
|
1043
|
+
});
|
|
1044
|
+
```
|
|
1045
|
+
|
|
1046
|
+
---
|
|
1047
|
+
|
|
1048
|
+
## Part 5: Performance Benchmarks
|
|
1049
|
+
|
|
1050
|
+
### Expected Performance Improvements
|
|
1051
|
+
|
|
1052
|
+
| Metric | Before (Sync) | After (Async) | Improvement |
|
|
1053
|
+
|--------|---------------|---------------|-------------|
|
|
1054
|
+
| **Token Usage** | 150,000 | 2,000 | 98.7% ↓ |
|
|
1055
|
+
| **Memory Footprint** | 500 MB | 50 MB | 90% ↓ |
|
|
1056
|
+
| **Concurrent Tasks** | 1-2 | 20+ | 10x ↑ |
|
|
1057
|
+
| **Response Latency** | 30-60s | 0.5-2s | 15-30x ↓ |
|
|
1058
|
+
| **Throughput** | 2 tasks/min | 50+ tasks/min | 25x ↑ |
|
|
1059
|
+
|
|
1060
|
+
### Benchmark Suite
|
|
1061
|
+
|
|
1062
|
+
```typescript
|
|
1063
|
+
// tests/benchmarks/async-performance.bench.ts
|
|
1064
|
+
|
|
1065
|
+
import { benchmark, describe } from 'vitest';
|
|
1066
|
+
|
|
1067
|
+
describe('Async Performance Benchmarks', () => {
|
|
1068
|
+
benchmark('spawn-agent:async vs spawn-agent:sync', async () => {
|
|
1069
|
+
// Async version
|
|
1070
|
+
const asyncStart = Date.now();
|
|
1071
|
+
const asyncJob = await orchestrator.spawnAgentAsync(profile);
|
|
1072
|
+
const asyncLatency = Date.now() - asyncStart;
|
|
1073
|
+
|
|
1074
|
+
// Sync version (for comparison)
|
|
1075
|
+
const syncStart = Date.now();
|
|
1076
|
+
const syncResult = await orchestrator.spawnAgent(profile);
|
|
1077
|
+
const syncLatency = Date.now() - syncStart;
|
|
1078
|
+
|
|
1079
|
+
console.log(`Async latency: ${asyncLatency}ms`);
|
|
1080
|
+
console.log(`Sync latency: ${syncLatency}ms`);
|
|
1081
|
+
console.log(`Speedup: ${(syncLatency / asyncLatency).toFixed(2)}x`);
|
|
1082
|
+
});
|
|
1083
|
+
|
|
1084
|
+
benchmark('agentdb:index-corpus token reduction', async () => {
|
|
1085
|
+
// Measure token usage
|
|
1086
|
+
const documents = generateDocuments(10000);
|
|
1087
|
+
|
|
1088
|
+
// Old pattern: Full documents in context
|
|
1089
|
+
const oldContextSize = new Blob([JSON.stringify(documents)]).size;
|
|
1090
|
+
|
|
1091
|
+
// New pattern: Job handle only
|
|
1092
|
+
const jobHandle = await agentDB.indexCorpusAsync('test', documents);
|
|
1093
|
+
const newContextSize = new Blob([JSON.stringify(jobHandle)]).size;
|
|
1094
|
+
|
|
1095
|
+
const reduction = ((oldContextSize - newContextSize) / oldContextSize) * 100;
|
|
1096
|
+
|
|
1097
|
+
console.log(`Old context: ${(oldContextSize / 1024).toFixed(2)} KB`);
|
|
1098
|
+
console.log(`New context: ${(newContextSize / 1024).toFixed(2)} KB`);
|
|
1099
|
+
console.log(`Reduction: ${reduction.toFixed(2)}%`);
|
|
1100
|
+
});
|
|
1101
|
+
});
|
|
1102
|
+
```
|
|
1103
|
+
|
|
1104
|
+
---
|
|
1105
|
+
|
|
1106
|
+
## Part 6: Migration Guide
|
|
1107
|
+
|
|
1108
|
+
### Step-by-Step Migration
|
|
1109
|
+
|
|
1110
|
+
#### Step 1: Update Dependencies
|
|
1111
|
+
|
|
1112
|
+
```bash
|
|
1113
|
+
npm install
|
|
1114
|
+
npm install uuid @types/uuid
|
|
1115
|
+
npm install ioredis @types/ioredis # For distributed job queue (optional)
|
|
1116
|
+
```
|
|
1117
|
+
|
|
1118
|
+
#### Step 2: Enable Async Operations
|
|
1119
|
+
|
|
1120
|
+
```typescript
|
|
1121
|
+
// config.json
|
|
1122
|
+
{
|
|
1123
|
+
"async": {
|
|
1124
|
+
"enabled": true,
|
|
1125
|
+
"maxJobs": 1000,
|
|
1126
|
+
"jobTTL": 86400000
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
```
|
|
1130
|
+
|
|
1131
|
+
#### Step 3: Migrate Tool Calls
|
|
1132
|
+
|
|
1133
|
+
```typescript
|
|
1134
|
+
// Before (Sync)
|
|
1135
|
+
const agent = await mcpClient.callTool('agents/spawn', {
|
|
1136
|
+
type: 'researcher',
|
|
1137
|
+
name: 'Research Agent',
|
|
1138
|
+
});
|
|
1139
|
+
|
|
1140
|
+
// After (Async)
|
|
1141
|
+
const jobHandle = await mcpClient.callTool('agents/spawn:async', {
|
|
1142
|
+
type: 'researcher',
|
|
1143
|
+
name: 'Research Agent',
|
|
1144
|
+
_async: { mode: 'poll' },
|
|
1145
|
+
});
|
|
1146
|
+
|
|
1147
|
+
// Poll for completion
|
|
1148
|
+
const status = await mcpClient.callTool('jobs/status', {
|
|
1149
|
+
jobId: jobHandle.jobHandle.jobId,
|
|
1150
|
+
});
|
|
1151
|
+
```
|
|
1152
|
+
|
|
1153
|
+
#### Step 4: Update Agentic Flow Integration
|
|
1154
|
+
|
|
1155
|
+
```typescript
|
|
1156
|
+
// Update orchestrator initialization
|
|
1157
|
+
const orchestrator = new AgenticFlowOrchestrator({
|
|
1158
|
+
jobManager: new JobManager(),
|
|
1159
|
+
registryClient: new RegistryClient(process.env.MCP_REGISTRY_API_KEY),
|
|
1160
|
+
});
|
|
1161
|
+
|
|
1162
|
+
// Register in MCP Registry
|
|
1163
|
+
await orchestrator.registerInRegistry();
|
|
1164
|
+
```
|
|
1165
|
+
|
|
1166
|
+
#### Step 5: Update AgentDB Integration
|
|
1167
|
+
|
|
1168
|
+
```typescript
|
|
1169
|
+
// Update AgentDB initialization
|
|
1170
|
+
const agentDB = new AgentDB({
|
|
1171
|
+
jobManager: new JobManager(),
|
|
1172
|
+
registryClient: new RegistryClient(process.env.MCP_REGISTRY_API_KEY),
|
|
1173
|
+
});
|
|
1174
|
+
|
|
1175
|
+
// Register in MCP Registry
|
|
1176
|
+
await agentDB.registerInRegistry();
|
|
1177
|
+
```
|
|
1178
|
+
|
|
1179
|
+
---
|
|
1180
|
+
|
|
1181
|
+
## Summary
|
|
1182
|
+
|
|
1183
|
+
This integration plan provides:
|
|
1184
|
+
|
|
1185
|
+
1. **Async Operations**: Job-based execution for long-running tasks
|
|
1186
|
+
2. **Token Reduction**: 98.7% reduction (150k → 2k tokens)
|
|
1187
|
+
3. **Registry Integration**: Auto-discovery via MCP Registry
|
|
1188
|
+
4. **Code Execution**: Data processing in execution environment
|
|
1189
|
+
5. **Performance**: 10-30x latency reduction, 25x throughput increase
|
|
1190
|
+
6. **Testing**: Comprehensive E2E test suite
|
|
1191
|
+
7. **Migration**: Step-by-step upgrade guide
|
|
1192
|
+
|
|
1193
|
+
**Timeline**:
|
|
1194
|
+
- **Phase 0** (This week): Implement async operations
|
|
1195
|
+
- **Phase 1** (Nov 14-25): RC validation and testing
|
|
1196
|
+
- **Phase 2** (After Nov 25): Production rollout
|
|
1197
|
+
|
|
1198
|
+
Ready to start implementation? I can begin with Phase 0A (Async Operations) for Agentic Flow.
|