@ruvector/edge-net 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/real-agents.js ADDED
@@ -0,0 +1,839 @@
1
+ /**
2
+ * @ruvector/edge-net REAL Agent System
3
+ *
4
+ * Actually functional distributed agents with:
5
+ * - LOCAL LLM execution via ruvllm (default - no API key needed)
6
+ * - Cloud LLM API calls (Anthropic Claude, OpenAI) as fallback
7
+ * - Real embeddings via ruvector AdaptiveEmbedder
8
+ * - Real relay server sync
9
+ * - Real task execution
10
+ *
11
+ * @module @ruvector/edge-net/real-agents
12
+ */
13
+
14
+ import { EventEmitter } from 'events';
15
+ import { createHash, randomBytes } from 'crypto';
16
+ import { readFileSync, writeFileSync, existsSync } from 'fs';
17
+ import { join } from 'path';
18
+
19
+ // ============================================
20
+ // LLM PROVIDER CONFIGURATION
21
+ // ============================================
22
+
23
+ const LLM_PROVIDERS = {
24
+ // LOCAL LLM - Default, no API key needed
25
+ local: {
26
+ name: 'RuvLLM Local',
27
+ type: 'local',
28
+ models: {
29
+ fast: 'ruvllm-fast',
30
+ balanced: 'ruvllm-balanced',
31
+ powerful: 'ruvllm-powerful',
32
+ },
33
+ },
34
+ ruvllm: {
35
+ name: 'RuvLLM',
36
+ type: 'local',
37
+ models: {
38
+ fast: 'ruvllm-fast',
39
+ balanced: 'ruvllm-balanced',
40
+ powerful: 'ruvllm-powerful',
41
+ },
42
+ },
43
+ // Cloud providers as fallback (December 2025 models)
44
+ anthropic: {
45
+ name: 'Anthropic Claude',
46
+ type: 'cloud',
47
+ baseUrl: 'https://api.anthropic.com/v1',
48
+ models: {
49
+ fast: 'claude-3-5-haiku-20241022',
50
+ balanced: 'claude-sonnet-4-20250514',
51
+ powerful: 'claude-opus-4-5-20251101',
52
+ },
53
+ headers: (apiKey) => ({
54
+ 'Content-Type': 'application/json',
55
+ 'x-api-key': apiKey,
56
+ 'anthropic-version': '2023-06-01',
57
+ }),
58
+ },
59
+ openai: {
60
+ name: 'OpenAI',
61
+ type: 'cloud',
62
+ baseUrl: 'https://api.openai.com/v1',
63
+ models: {
64
+ fast: 'gpt-4o-mini',
65
+ balanced: 'gpt-5.2',
66
+ powerful: 'gpt-5.2-turbo',
67
+ },
68
+ headers: (apiKey) => ({
69
+ 'Content-Type': 'application/json',
70
+ 'Authorization': `Bearer ${apiKey}`,
71
+ }),
72
+ },
73
+ };
74
+
75
+ // Agent type to system prompt mapping
76
+ const AGENT_PROMPTS = {
77
+ researcher: `You are a research agent. Your task is to analyze, search, summarize, and extract information.
78
+ Be thorough and cite sources when possible. Structure your findings clearly.`,
79
+
80
+ coder: `You are a coding agent. Your task is to write, refactor, debug, and test code.
81
+ Write clean, well-documented code. Follow best practices and explain your approach.`,
82
+
83
+ reviewer: `You are a code review agent. Your task is to review code for quality, security, and best practices.
84
+ Be constructive and specific. Identify issues and suggest improvements.`,
85
+
86
+ tester: `You are a testing agent. Your task is to write tests, validate functionality, and report issues.
87
+ Cover edge cases. Write clear test descriptions.`,
88
+
89
+ analyst: `You are an analysis agent. Your task is to analyze data, generate metrics, and create reports.
90
+ Be data-driven. Present findings with evidence.`,
91
+
92
+ optimizer: `You are an optimization agent. Your task is to profile, identify bottlenecks, and improve performance.
93
+ Quantify improvements. Focus on measurable gains.`,
94
+
95
+ coordinator: `You are a coordination agent. Your task is to orchestrate workflows, route tasks, and manage schedules.
96
+ Be organized and clear about task dependencies.`,
97
+
98
+ embedder: `You are an embedding agent specialized in semantic search and vector operations.
99
+ Generate high-quality embeddings for text. Optimize for similarity matching.`,
100
+ };
101
+
102
+ // ============================================
103
+ // REAL LLM CLIENT
104
+ // ============================================
105
+
106
+ /**
107
+ * Real LLM client - uses local ruvllm by default, falls back to cloud APIs
108
+ */
109
+ export class LLMClient {
110
+ constructor(options = {}) {
111
+ // Default to local ruvllm, fallback to cloud if API key provided
112
+ this.provider = options.provider || 'local';
113
+ this.apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY || process.env.OPENAI_API_KEY;
114
+ this.model = options.model || 'balanced';
115
+ this.maxTokens = options.maxTokens || 4096;
116
+
117
+ // Auto-select cloud provider if API key is set and provider not specified
118
+ if (!options.provider && this.apiKey) {
119
+ this.provider = process.env.ANTHROPIC_API_KEY ? 'anthropic' : 'openai';
120
+ }
121
+
122
+ this.config = LLM_PROVIDERS[this.provider];
123
+ if (!this.config) {
124
+ throw new Error(`Unknown LLM provider: ${this.provider}`);
125
+ }
126
+
127
+ // Initialize local LLM if using local provider
128
+ this.ruvllm = null;
129
+ this.ruvllmInitialized = false;
130
+ }
131
+
132
+ /**
133
+ * Initialize local ruvllm
134
+ */
135
+ async initLocal() {
136
+ if (this.ruvllmInitialized) return;
137
+
138
+ try {
139
+ const ruvllm = await import('@ruvector/ruvllm');
140
+ this.ruvllm = new ruvllm.RuvLLM({
141
+ embeddingDim: 768,
142
+ learningEnabled: true,
143
+ });
144
+ this.ruvllmInitialized = true;
145
+ console.log('[LLM] Initialized local RuvLLM engine');
146
+ } catch (error) {
147
+ console.warn('[LLM] RuvLLM not available:', error.message);
148
+ }
149
+ }
150
+
151
+ /**
152
+ * Call LLM - local or cloud
153
+ */
154
+ async complete(systemPrompt, userMessage, options = {}) {
155
+ const isLocal = this.config.type === 'local';
156
+
157
+ if (isLocal) {
158
+ return this.callLocal(systemPrompt, userMessage, options);
159
+ }
160
+
161
+ if (!this.apiKey) {
162
+ throw new Error('No API key configured. Set ANTHROPIC_API_KEY or OPENAI_API_KEY, or use provider: "local"');
163
+ }
164
+
165
+ const model = this.config.models[options.model || this.model];
166
+
167
+ if (this.provider === 'anthropic') {
168
+ return this.callAnthropic(systemPrompt, userMessage, model, options);
169
+ } else {
170
+ return this.callOpenAI(systemPrompt, userMessage, model, options);
171
+ }
172
+ }
173
+
174
+ /**
175
+ * Call local RuvLLM
176
+ */
177
+ async callLocal(systemPrompt, userMessage, options = {}) {
178
+ await this.initLocal();
179
+
180
+ const modelTier = options.model || this.model;
181
+ const prompt = `${systemPrompt}\n\n${userMessage}`;
182
+
183
+ if (this.ruvllm) {
184
+ // Use real ruvllm engine
185
+ const response = this.ruvllm.query(prompt, {
186
+ maxTokens: options.maxTokens || this.maxTokens,
187
+ temperature: options.temperature || 0.7,
188
+ });
189
+
190
+ return {
191
+ content: response.text,
192
+ model: `ruvllm-${modelTier}`,
193
+ usage: { input_tokens: prompt.length, output_tokens: response.text.length },
194
+ stopReason: 'end',
195
+ confidence: response.confidence,
196
+ local: true,
197
+ };
198
+ }
199
+
200
+ // Fallback: Generate response using local heuristics
201
+ console.log('[LLM] Using fallback local generation');
202
+ const fallbackResponse = this.generateFallbackResponse(systemPrompt, userMessage);
203
+
204
+ return {
205
+ content: fallbackResponse,
206
+ model: `ruvllm-${modelTier}-fallback`,
207
+ usage: { input_tokens: prompt.length, output_tokens: fallbackResponse.length },
208
+ stopReason: 'end',
209
+ local: true,
210
+ fallback: true,
211
+ };
212
+ }
213
+
214
+ /**
215
+ * Generate fallback response for basic tasks
216
+ */
217
+ generateFallbackResponse(systemPrompt, userMessage) {
218
+ // Basic task-specific responses
219
+ if (systemPrompt.includes('research')) {
220
+ return `Based on the query "${userMessage.slice(0, 100)}...", here are the key findings:\n\n1. The topic requires further investigation.\n2. Multiple sources should be consulted.\n3. Consider the context and requirements carefully.\n\nNote: This is a local fallback response. For more detailed analysis, ensure ruvllm is properly installed.`;
221
+ }
222
+
223
+ if (systemPrompt.includes('coding') || systemPrompt.includes('code')) {
224
+ return `Here's a code solution for: ${userMessage.slice(0, 50)}...\n\n\`\`\`javascript\n// Implementation based on the requirements\nfunction solution() {\n // TODO: Implement the specific logic\n console.log('Task:', '${userMessage.slice(0, 30)}...');\n return { success: true };\n}\n\`\`\`\n\nNote: This is a local fallback. Install ruvllm for real code generation.`;
225
+ }
226
+
227
+ if (systemPrompt.includes('review')) {
228
+ return `Code Review for: ${userMessage.slice(0, 50)}...\n\n**Summary:** The code structure appears reasonable.\n\n**Suggestions:**\n- Add error handling\n- Consider edge cases\n- Add documentation\n\nNote: This is a local fallback response.`;
229
+ }
230
+
231
+ if (systemPrompt.includes('test')) {
232
+ return `Test Plan for: ${userMessage.slice(0, 50)}...\n\n\`\`\`javascript\ndescribe('Feature', () => {\n it('should work correctly', () => {\n // Test implementation\n expect(true).toBe(true);\n });\n});\n\`\`\`\n\nNote: This is a local fallback response.`;
233
+ }
234
+
235
+ // Generic response
236
+ return `Response to: ${userMessage.slice(0, 100)}...\n\nThis is a local response generated without cloud API calls. For full LLM capabilities:\n1. Install @ruvector/ruvllm for local AI\n2. Or set ANTHROPIC_API_KEY/OPENAI_API_KEY for cloud\n\nTask acknowledged and processed locally.`;
237
+ }
238
+
239
+ async callAnthropic(systemPrompt, userMessage, model, options = {}) {
240
+ const response = await fetch(`${this.config.baseUrl}/messages`, {
241
+ method: 'POST',
242
+ headers: this.config.headers(this.apiKey),
243
+ body: JSON.stringify({
244
+ model,
245
+ max_tokens: options.maxTokens || this.maxTokens,
246
+ system: systemPrompt,
247
+ messages: [{ role: 'user', content: userMessage }],
248
+ }),
249
+ });
250
+
251
+ if (!response.ok) {
252
+ const error = await response.text();
253
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
254
+ }
255
+
256
+ const data = await response.json();
257
+ return {
258
+ content: data.content[0]?.text || '',
259
+ model,
260
+ usage: data.usage,
261
+ stopReason: data.stop_reason,
262
+ };
263
+ }
264
+
265
+ async callOpenAI(systemPrompt, userMessage, model, options = {}) {
266
+ const response = await fetch(`${this.config.baseUrl}/chat/completions`, {
267
+ method: 'POST',
268
+ headers: this.config.headers(this.apiKey),
269
+ body: JSON.stringify({
270
+ model,
271
+ max_tokens: options.maxTokens || this.maxTokens,
272
+ messages: [
273
+ { role: 'system', content: systemPrompt },
274
+ { role: 'user', content: userMessage },
275
+ ],
276
+ }),
277
+ });
278
+
279
+ if (!response.ok) {
280
+ const error = await response.text();
281
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
282
+ }
283
+
284
+ const data = await response.json();
285
+ return {
286
+ content: data.choices[0]?.message?.content || '',
287
+ model,
288
+ usage: data.usage,
289
+ stopReason: data.choices[0]?.finish_reason,
290
+ };
291
+ }
292
+
293
+ /**
294
+ * Check if LLM is configured
295
+ */
296
+ isConfigured() {
297
+ // Local is always configured
298
+ if (this.config.type === 'local') return true;
299
+ return !!this.apiKey;
300
+ }
301
+
302
+ /**
303
+ * Check if using local provider
304
+ */
305
+ isLocal() {
306
+ return this.config.type === 'local';
307
+ }
308
+ }
309
+
310
+ // ============================================
311
+ // REAL EMBEDDER (uses ruvector)
312
+ // ============================================
313
+
314
+ /**
315
+ * Real embedder using ruvector's AdaptiveEmbedder
316
+ */
317
+ export class RealEmbedder {
318
+ constructor(options = {}) {
319
+ this.embedder = null;
320
+ this.initialized = false;
321
+ this.options = options;
322
+ }
323
+
324
+ async initialize() {
325
+ try {
326
+ // Try to load ruvector's AdaptiveEmbedder
327
+ const { AdaptiveEmbedder } = await import('ruvector');
328
+ this.embedder = new AdaptiveEmbedder();
329
+ // Support both init() and initialize() methods
330
+ if (typeof this.embedder.init === 'function') {
331
+ await this.embedder.init();
332
+ } else if (typeof this.embedder.initialize === 'function') {
333
+ await this.embedder.initialize();
334
+ }
335
+ this.initialized = true;
336
+ console.log('[Embedder] Initialized ruvector AdaptiveEmbedder');
337
+ return true;
338
+ } catch (error) {
339
+ console.warn('[Embedder] ruvector not available, using fallback:', error.message);
340
+ return false;
341
+ }
342
+ }
343
+
344
+ async embed(text) {
345
+ if (this.initialized && this.embedder) {
346
+ return await this.embedder.embed(text);
347
+ }
348
+ // Fallback: Use a simple hash-based pseudo-embedding (NOT for production)
349
+ console.warn('[Embedder] Using fallback hash embeddings - install ruvector for real embeddings');
350
+ return this.fallbackEmbed(text);
351
+ }
352
+
353
+ async embedBatch(texts) {
354
+ if (this.initialized && this.embedder) {
355
+ return await this.embedder.embedBatch(texts);
356
+ }
357
+ return Promise.all(texts.map(t => this.fallbackEmbed(t)));
358
+ }
359
+
360
+ fallbackEmbed(text) {
361
+ // Simple hash-based pseudo-embedding for testing
362
+ // NOT semantically meaningful - use real embedder in production
363
+ const hash = createHash('sha256').update(text).digest();
364
+ const embedding = new Float32Array(384);
365
+ for (let i = 0; i < 384; i++) {
366
+ embedding[i] = (hash[i % 32] - 128) / 128;
367
+ }
368
+ return embedding;
369
+ }
370
+
371
+ async cosineSimilarity(a, b) {
372
+ let dot = 0, normA = 0, normB = 0;
373
+ for (let i = 0; i < a.length; i++) {
374
+ dot += a[i] * b[i];
375
+ normA += a[i] * a[i];
376
+ normB += b[i] * b[i];
377
+ }
378
+ return dot / (Math.sqrt(normA) * Math.sqrt(normB));
379
+ }
380
+ }
381
+
382
+ // ============================================
383
+ // REAL AGENT
384
+ // ============================================
385
+
386
+ /**
387
+ * Real agent that executes tasks via LLM
388
+ */
389
+ export class RealAgent extends EventEmitter {
390
+ constructor(type, options = {}) {
391
+ super();
392
+ this.id = `agent-${type}-${Date.now()}-${randomBytes(4).toString('hex')}`;
393
+ this.type = type;
394
+ this.systemPrompt = AGENT_PROMPTS[type] || AGENT_PROMPTS.coder;
395
+ this.llm = new LLMClient(options);
396
+ this.embedder = null;
397
+ this.status = 'idle';
398
+ this.taskHistory = [];
399
+ this.cost = { inputTokens: 0, outputTokens: 0 };
400
+ }
401
+
402
+ async initialize() {
403
+ if (this.type === 'embedder') {
404
+ this.embedder = new RealEmbedder();
405
+ await this.embedder.initialize();
406
+ }
407
+ return this;
408
+ }
409
+
410
+ /**
411
+ * Execute a task
412
+ */
413
+ async execute(task, context = {}) {
414
+ if (!this.llm.isConfigured() && this.type !== 'embedder') {
415
+ throw new Error('LLM not configured. Set ANTHROPIC_API_KEY or OPENAI_API_KEY');
416
+ }
417
+
418
+ this.status = 'executing';
419
+ this.emit('started', { id: this.id, type: this.type, task });
420
+
421
+ const startTime = Date.now();
422
+
423
+ try {
424
+ let result;
425
+
426
+ if (this.type === 'embedder' && this.embedder) {
427
+ // Embedding task
428
+ result = await this.executeEmbeddingTask(task, context);
429
+ } else {
430
+ // LLM task
431
+ result = await this.executeLLMTask(task, context);
432
+ }
433
+
434
+ const duration = Date.now() - startTime;
435
+
436
+ this.taskHistory.push({
437
+ task,
438
+ result,
439
+ duration,
440
+ timestamp: new Date().toISOString(),
441
+ });
442
+
443
+ this.status = 'idle';
444
+ this.emit('completed', { id: this.id, result, duration });
445
+
446
+ return result;
447
+
448
+ } catch (error) {
449
+ this.status = 'error';
450
+ this.emit('error', { id: this.id, error: error.message });
451
+ throw error;
452
+ }
453
+ }
454
+
455
+ async executeLLMTask(task, context = {}) {
456
+ // Build user message with context
457
+ let userMessage = task;
458
+
459
+ if (context.files && context.files.length > 0) {
460
+ userMessage += '\n\n--- FILES ---\n';
461
+ for (const file of context.files) {
462
+ try {
463
+ const content = readFileSync(file, 'utf-8');
464
+ userMessage += `\n### ${file}\n\`\`\`\n${content.slice(0, 10000)}\n\`\`\`\n`;
465
+ } catch (e) {
466
+ userMessage += `\n### ${file}\n(Could not read file: ${e.message})\n`;
467
+ }
468
+ }
469
+ }
470
+
471
+ if (context.additionalContext) {
472
+ userMessage += `\n\n--- ADDITIONAL CONTEXT ---\n${context.additionalContext}`;
473
+ }
474
+
475
+ const response = await this.llm.complete(this.systemPrompt, userMessage, {
476
+ model: context.model || 'balanced',
477
+ });
478
+
479
+ // Track usage
480
+ if (response.usage) {
481
+ this.cost.inputTokens += response.usage.input_tokens || response.usage.prompt_tokens || 0;
482
+ this.cost.outputTokens += response.usage.output_tokens || response.usage.completion_tokens || 0;
483
+ }
484
+
485
+ return {
486
+ content: response.content,
487
+ model: response.model,
488
+ stopReason: response.stopReason,
489
+ agentId: this.id,
490
+ agentType: this.type,
491
+ };
492
+ }
493
+
494
+ async executeEmbeddingTask(task, context = {}) {
495
+ const texts = context.texts || [task];
496
+ const embeddings = await this.embedder.embedBatch(texts);
497
+
498
+ return {
499
+ embeddings: embeddings.map((e, i) => ({
500
+ text: texts[i].slice(0, 100),
501
+ embedding: Array.from(e).slice(0, 10), // Preview
502
+ dimensions: e.length,
503
+ })),
504
+ count: embeddings.length,
505
+ agentId: this.id,
506
+ agentType: this.type,
507
+ };
508
+ }
509
+
510
+ getStats() {
511
+ return {
512
+ id: this.id,
513
+ type: this.type,
514
+ status: this.status,
515
+ tasksCompleted: this.taskHistory.length,
516
+ cost: this.cost,
517
+ configured: this.llm.isConfigured() || this.type === 'embedder',
518
+ };
519
+ }
520
+ }
521
+
522
+ // ============================================
523
+ // REAL RELAY SYNC CLIENT
524
+ // ============================================
525
+
526
+ /**
527
+ * Real sync client that connects to the actual relay server
528
+ */
529
+ export class RelaySyncClient extends EventEmitter {
530
+ constructor(options = {}) {
531
+ super();
532
+ this.relayUrl = options.relayUrl || 'ws://localhost:8080';
533
+ this.nodeId = options.nodeId || `node-${randomBytes(8).toString('hex')}`;
534
+ this.ws = null;
535
+ this.connected = false;
536
+ this.ledgerState = { earned: {}, spent: {}, balance: 0 };
537
+ this.reconnectAttempts = 0;
538
+ this.maxReconnects = options.maxReconnects || 10;
539
+ }
540
+
541
+ /**
542
+ * Connect to relay server
543
+ */
544
+ async connect() {
545
+ return new Promise((resolve, reject) => {
546
+ try {
547
+ // Use dynamic import for WebSocket in Node
548
+ this.loadWebSocket().then(WebSocket => {
549
+ this.ws = new WebSocket(this.relayUrl);
550
+
551
+ const timeout = setTimeout(() => {
552
+ reject(new Error('Connection timeout'));
553
+ }, 10000);
554
+
555
+ this.ws.onopen = () => {
556
+ clearTimeout(timeout);
557
+ this.connected = true;
558
+ this.reconnectAttempts = 0;
559
+
560
+ // Register with relay
561
+ this.send({
562
+ type: 'register',
563
+ nodeId: this.nodeId,
564
+ capabilities: ['sync', 'agent', 'compute'],
565
+ });
566
+
567
+ this.emit('connected');
568
+ resolve(true);
569
+ };
570
+
571
+ this.ws.onmessage = (event) => {
572
+ this.handleMessage(JSON.parse(event.data));
573
+ };
574
+
575
+ this.ws.onclose = () => {
576
+ this.connected = false;
577
+ this.emit('disconnected');
578
+ this.scheduleReconnect();
579
+ };
580
+
581
+ this.ws.onerror = (error) => {
582
+ clearTimeout(timeout);
583
+ reject(error);
584
+ };
585
+
586
+ }).catch(reject);
587
+ } catch (error) {
588
+ reject(error);
589
+ }
590
+ });
591
+ }
592
+
593
+ async loadWebSocket() {
594
+ if (typeof WebSocket !== 'undefined') {
595
+ return WebSocket;
596
+ }
597
+ const ws = await import('ws');
598
+ return ws.default || ws.WebSocket;
599
+ }
600
+
601
+ scheduleReconnect() {
602
+ if (this.reconnectAttempts < this.maxReconnects) {
603
+ this.reconnectAttempts++;
604
+ const delay = Math.min(1000 * Math.pow(2, this.reconnectAttempts), 30000);
605
+ setTimeout(() => this.connect().catch(() => {}), delay);
606
+ }
607
+ }
608
+
609
+ handleMessage(message) {
610
+ switch (message.type) {
611
+ case 'registered':
612
+ console.log(`[Sync] Registered with relay as ${this.nodeId}`);
613
+ this.emit('registered', message);
614
+ break;
615
+
616
+ case 'ledger_sync':
617
+ this.mergeLedgerState(message.state);
618
+ break;
619
+
620
+ case 'peer_state':
621
+ this.emit('peer_state', message);
622
+ break;
623
+
624
+ case 'time_crystal_sync':
625
+ this.emit('time_crystal', message);
626
+ break;
627
+
628
+ default:
629
+ this.emit('message', message);
630
+ }
631
+ }
632
+
633
+ /**
634
+ * Send message to relay
635
+ */
636
+ send(message) {
637
+ if (this.connected && this.ws?.readyState === 1) {
638
+ this.ws.send(JSON.stringify(message));
639
+ return true;
640
+ }
641
+ return false;
642
+ }
643
+
644
+ /**
645
+ * Sync ledger state with relay
646
+ */
647
+ syncLedger(state) {
648
+ return this.send({
649
+ type: 'ledger_sync',
650
+ nodeId: this.nodeId,
651
+ state,
652
+ timestamp: Date.now(),
653
+ });
654
+ }
655
+
656
+ /**
657
+ * Merge incoming ledger state (CRDT)
658
+ */
659
+ mergeLedgerState(remoteState) {
660
+ if (!remoteState) return;
661
+
662
+ // Merge earned (max wins)
663
+ for (const [key, value] of Object.entries(remoteState.earned || {})) {
664
+ const current = this.ledgerState.earned[key] || 0;
665
+ this.ledgerState.earned[key] = Math.max(current, value);
666
+ }
667
+
668
+ // Merge spent (max wins)
669
+ for (const [key, value] of Object.entries(remoteState.spent || {})) {
670
+ const current = this.ledgerState.spent[key] || 0;
671
+ this.ledgerState.spent[key] = Math.max(current, value);
672
+ }
673
+
674
+ // Recalculate balance
675
+ const totalEarned = Object.values(this.ledgerState.earned).reduce((a, b) => a + b, 0);
676
+ const totalSpent = Object.values(this.ledgerState.spent).reduce((a, b) => a + b, 0);
677
+ this.ledgerState.balance = totalEarned - totalSpent;
678
+
679
+ this.emit('ledger_updated', this.ledgerState);
680
+ }
681
+
682
+ /**
683
+ * Credit rUv
684
+ */
685
+ credit(amount, reason) {
686
+ const key = `${Date.now()}-${reason}`;
687
+ this.ledgerState.earned[key] = amount;
688
+ this.ledgerState.balance += amount;
689
+ this.syncLedger(this.ledgerState);
690
+ return this.ledgerState.balance;
691
+ }
692
+
693
+ /**
694
+ * Spend rUv
695
+ */
696
+ spend(amount, reason) {
697
+ if (this.ledgerState.balance < amount) {
698
+ throw new Error('Insufficient balance');
699
+ }
700
+ const key = `${Date.now()}-${reason}`;
701
+ this.ledgerState.spent[key] = amount;
702
+ this.ledgerState.balance -= amount;
703
+ this.syncLedger(this.ledgerState);
704
+ return this.ledgerState.balance;
705
+ }
706
+
707
+ getBalance() {
708
+ return this.ledgerState.balance;
709
+ }
710
+
711
+ close() {
712
+ if (this.ws) {
713
+ this.ws.close();
714
+ }
715
+ }
716
+ }
717
+
718
+ // ============================================
719
+ // REAL AGENT MANAGER
720
+ // ============================================
721
+
722
+ /**
723
+ * Manager for real agents with actual execution
724
+ */
725
+ export class RealAgentManager extends EventEmitter {
726
+ constructor(options = {}) {
727
+ super();
728
+ this.agents = new Map();
729
+ this.syncClient = null;
730
+ this.embedder = null;
731
+ this.options = options;
732
+ }
733
+
734
+ async initialize() {
735
+ // Initialize embedder
736
+ this.embedder = new RealEmbedder();
737
+ await this.embedder.initialize();
738
+
739
+ // Connect to relay if URL provided
740
+ if (this.options.relayUrl || this.options.enableSync) {
741
+ this.syncClient = new RelaySyncClient({
742
+ relayUrl: this.options.relayUrl || 'ws://localhost:8080',
743
+ nodeId: this.options.nodeId,
744
+ });
745
+
746
+ try {
747
+ await this.syncClient.connect();
748
+ console.log('[AgentManager] Connected to relay server');
749
+ } catch (error) {
750
+ console.warn('[AgentManager] Relay connection failed:', error.message);
751
+ }
752
+ }
753
+
754
+ return this;
755
+ }
756
+
757
+ /**
758
+ * Spawn a real agent
759
+ */
760
+ async spawn(type, options = {}) {
761
+ const agent = new RealAgent(type, {
762
+ provider: options.provider || this.options.provider || 'anthropic',
763
+ apiKey: options.apiKey || this.options.apiKey,
764
+ model: options.model || 'balanced',
765
+ });
766
+
767
+ await agent.initialize();
768
+ this.agents.set(agent.id, agent);
769
+
770
+ // Track agent spawn with credits
771
+ if (this.syncClient?.connected) {
772
+ // Deduct spawn cost
773
+ const spawnCost = { researcher: 1, coder: 2, reviewer: 1.5, tester: 1, analyst: 1, optimizer: 2, coordinator: 3, embedder: 0.5 };
774
+ try {
775
+ this.syncClient.spend(spawnCost[type] || 1, `spawn-${type}`);
776
+ } catch (e) {
777
+ // Continue even if no credits
778
+ }
779
+ }
780
+
781
+ this.emit('agent_spawned', { id: agent.id, type });
782
+ return agent;
783
+ }
784
+
785
+ /**
786
+ * Execute task on agent
787
+ */
788
+ async execute(agentId, task, context = {}) {
789
+ const agent = this.agents.get(agentId);
790
+ if (!agent) {
791
+ throw new Error(`Agent not found: ${agentId}`);
792
+ }
793
+
794
+ const result = await agent.execute(task, context);
795
+
796
+ // Credit for completed task
797
+ if (this.syncClient?.connected) {
798
+ this.syncClient.credit(1, `task-${agent.type}`);
799
+ }
800
+
801
+ return result;
802
+ }
803
+
804
+ /**
805
+ * Quick execute - spawn and run in one call
806
+ */
807
+ async quickExecute(type, task, context = {}) {
808
+ const agent = await this.spawn(type, context);
809
+ return agent.execute(task, context);
810
+ }
811
+
812
+ getAgent(id) {
813
+ return this.agents.get(id);
814
+ }
815
+
816
+ listAgents() {
817
+ return Array.from(this.agents.values()).map(a => a.getStats());
818
+ }
819
+
820
+ getBalance() {
821
+ return this.syncClient?.getBalance() || 0;
822
+ }
823
+
824
+ async close() {
825
+ if (this.syncClient) {
826
+ this.syncClient.close();
827
+ }
828
+ }
829
+ }
830
+
831
+ // ============================================
832
+ // EXPORTS
833
+ // ============================================
834
+
835
+ // Classes are already exported via 'export class' declarations above
836
+ // Only export non-class items here
837
+ export { AGENT_PROMPTS, LLM_PROVIDERS };
838
+
839
+ export default RealAgentManager;