jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/customagent_cognitive_discovery_example.py +343 -0
  3. examples/fastapi_integration_example.py +570 -0
  4. jarviscore/__init__.py +19 -5
  5. jarviscore/cli/smoketest.py +8 -4
  6. jarviscore/core/agent.py +227 -0
  7. jarviscore/core/mesh.py +9 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  9. jarviscore/data/examples/custom_profile_decorator.py +134 -0
  10. jarviscore/data/examples/custom_profile_wrap.py +168 -0
  11. jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
  12. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  13. jarviscore/docs/API_REFERENCE.md +283 -3
  14. jarviscore/docs/CHANGELOG.md +139 -0
  15. jarviscore/docs/CONFIGURATION.md +1 -1
  16. jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
  17. jarviscore/docs/GETTING_STARTED.md +228 -267
  18. jarviscore/docs/TROUBLESHOOTING.md +1 -1
  19. jarviscore/docs/USER_GUIDE.md +153 -8
  20. jarviscore/integrations/__init__.py +16 -0
  21. jarviscore/integrations/fastapi.py +247 -0
  22. jarviscore/p2p/broadcaster.py +10 -3
  23. jarviscore/p2p/coordinator.py +310 -14
  24. jarviscore/p2p/keepalive.py +45 -23
  25. jarviscore/p2p/peer_client.py +311 -12
  26. jarviscore/p2p/swim_manager.py +9 -4
  27. jarviscore/profiles/__init__.py +7 -1
  28. jarviscore/profiles/customagent.py +295 -74
  29. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
  30. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
  31. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
  32. tests/test_13_dx_improvements.py +554 -0
  33. tests/test_14_cloud_deployment.py +403 -0
  34. tests/test_15_llm_cognitive_discovery.py +684 -0
  35. tests/test_16_unified_dx_flow.py +947 -0
  36. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
  37. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,684 @@
1
+ """
2
+ Test 15: LLM Cognitive Discovery - Smart Autonomous Agent Discovery
3
+
4
+ Tests the complete LLM + Cognitive Discovery integration:
5
+ 1. Cognitive context generation with real peers
6
+ 2. LLM receives and understands peer context
7
+ 3. LLM decides to use peer tools
8
+ 4. End-to-end peer communication
9
+
10
+ This test file includes both:
11
+ - Unit tests (always run, use mocks)
12
+ - Integration tests (skip if no LLM API key)
13
+
14
+ Run with: pytest tests/test_15_llm_cognitive_discovery.py -v -s
15
+ """
16
+ import asyncio
17
+ import os
18
+ import sys
19
+ import pytest
20
+ import logging
21
+ from unittest.mock import AsyncMock, MagicMock, patch
22
+
23
+ sys.path.insert(0, '.')
24
+
25
+ # Setup logging
26
+ logging.basicConfig(level=logging.INFO)
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ # ═══════════════════════════════════════════════════════════════════════════════
31
+ # FIXTURES
32
+ # ═══════════════════════════════════════════════════════════════════════════════
33
+
34
+ def get_llm_client():
35
+ """Get configured LLM client from settings."""
36
+ try:
37
+ from jarviscore.config import settings
38
+ from anthropic import Anthropic
39
+
40
+ api_key = (
41
+ settings.claude_api_key or
42
+ os.environ.get("CLAUDE_API_KEY") or
43
+ os.environ.get("ANTHROPIC_API_KEY")
44
+ )
45
+
46
+ if not api_key:
47
+ return None, None, "No API key"
48
+
49
+ endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
50
+ model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
51
+
52
+ if endpoint:
53
+ client = Anthropic(api_key=api_key, base_url=endpoint)
54
+ else:
55
+ client = Anthropic(api_key=api_key)
56
+
57
+ return client, model, None
58
+ except Exception as e:
59
+ return None, None, str(e)
60
+
61
+
62
+ def has_valid_llm_api_key():
63
+ """Check if a valid LLM API key is configured by testing it."""
64
+ try:
65
+ client, model, error = get_llm_client()
66
+ if error:
67
+ return False
68
+
69
+ # Actually validate the key with a minimal request
70
+ client.messages.create(
71
+ model=model,
72
+ max_tokens=10,
73
+ messages=[{"role": "user", "content": "Hi"}]
74
+ )
75
+ return True
76
+ except Exception as e:
77
+ print(f"LLM validation failed: {e}")
78
+ return False
79
+
80
+
81
+ # Cache the result to avoid multiple API calls
82
+ _llm_available = None
83
+
84
+
85
+ def llm_is_available():
86
+ """Check if LLM is available (cached)."""
87
+ global _llm_available
88
+ if _llm_available is None:
89
+ _llm_available = has_valid_llm_api_key()
90
+ return _llm_available
91
+
92
+
93
+ # Skip marker for tests requiring real LLM
94
+ requires_llm = pytest.mark.skipif(
95
+ not llm_is_available(),
96
+ reason="No valid LLM API key configured"
97
+ )
98
+
99
+
100
+ # ═══════════════════════════════════════════════════════════════════════════════
101
+ # TEST: COGNITIVE CONTEXT WITH REAL MESH
102
+ # ═══════════════════════════════════════════════════════════════════════════════
103
+
104
+ class TestCognitiveContextWithRealMesh:
105
+ """Test cognitive context generation in a real mesh."""
106
+
107
+ @pytest.mark.asyncio
108
+ async def test_cognitive_context_reflects_actual_peers(self):
109
+ """Test get_cognitive_context() shows actual mesh peers."""
110
+ from jarviscore import Mesh
111
+ from jarviscore.profiles import CustomAgent
112
+
113
+ class AgentA(CustomAgent):
114
+ role = "analyst"
115
+ capabilities = ["data_analysis", "statistics"]
116
+ description = "Analyzes data and provides insights"
117
+
118
+ async def execute_task(self, task):
119
+ return {"status": "success"}
120
+
121
+ class AgentB(CustomAgent):
122
+ role = "researcher"
123
+ capabilities = ["web_search", "research"]
124
+ description = "Researches topics on the web"
125
+
126
+ async def execute_task(self, task):
127
+ return {"status": "success"}
128
+
129
+ mesh = Mesh(mode="p2p", config={"bind_port": 7970})
130
+ agent_a = mesh.add(AgentA())
131
+ agent_b = mesh.add(AgentB())
132
+
133
+ await mesh.start()
134
+
135
+ try:
136
+ # Get cognitive context from agent_a's perspective
137
+ context = agent_a.peers.get_cognitive_context(format="markdown")
138
+
139
+ # Should see agent_b but not itself
140
+ assert "researcher" in context
141
+ assert "web_search" in context or "research" in context
142
+ # Should not see itself
143
+ # (agent_a is "analyst", context should show OTHER peers)
144
+
145
+ # Get context from agent_b's perspective
146
+ context_b = agent_b.peers.get_cognitive_context(format="markdown")
147
+ assert "analyst" in context_b
148
+ assert "data_analysis" in context_b or "statistics" in context_b
149
+
150
+ finally:
151
+ await mesh.stop()
152
+
153
+ @pytest.mark.asyncio
154
+ async def test_cognitive_context_updates_with_peer_changes(self):
155
+ """Test cognitive context updates when peers join."""
156
+ from jarviscore import Mesh
157
+ from jarviscore.profiles import CustomAgent
158
+
159
+ class Observer(CustomAgent):
160
+ role = "observer"
161
+ capabilities = ["observation"]
162
+ async def execute_task(self, task):
163
+ return {"status": "success"}
164
+
165
+ class LateJoiner(CustomAgent):
166
+ role = "late_joiner"
167
+ capabilities = ["late_capability"]
168
+ async def execute_task(self, task):
169
+ return {"status": "success"}
170
+
171
+ mesh = Mesh(mode="p2p", config={"bind_port": 7971})
172
+ observer = mesh.add(Observer())
173
+
174
+ await mesh.start()
175
+
176
+ try:
177
+ # Initially, observer sees no other peers (only itself in registry)
178
+ peers_before = observer.peers.list_peers()
179
+ local_peer_count_before = len([p for p in peers_before if p.get('location') == 'local'])
180
+
181
+ # Add another agent dynamically
182
+ late_joiner = mesh.add(LateJoiner())
183
+
184
+ # Now observer should see the new peer
185
+ peers_after = observer.peers.list_peers()
186
+
187
+ # Should have one more peer
188
+ assert len(peers_after) > len(peers_before) or any(
189
+ p['role'] == 'late_joiner' for p in peers_after
190
+ )
191
+
192
+ # Cognitive context should include new peer
193
+ context = observer.peers.get_cognitive_context()
194
+ assert "late_joiner" in context or "late_capability" in context
195
+
196
+ finally:
197
+ await mesh.stop()
198
+
199
+
200
+ # ═══════════════════════════════════════════════════════════════════════════════
201
+ # TEST: LISTENERAGENT PEER COMMUNICATION
202
+ # ═══════════════════════════════════════════════════════════════════════════════
203
+
204
+ class TestCustomAgentPeerCommunication:
205
+ """Test CustomAgent handles peer requests correctly."""
206
+
207
+ @pytest.mark.asyncio
208
+ async def test_customagent_receives_and_responds(self):
209
+ """Test CustomAgent receives requests and sends responses."""
210
+ from jarviscore import Mesh
211
+ from jarviscore.profiles import CustomAgent
212
+
213
+ request_received = False
214
+ request_data = None
215
+
216
+ class ResponderAgent(CustomAgent):
217
+ role = "responder"
218
+ capabilities = ["responding"]
219
+ listen_timeout = 0.1
220
+
221
+ async def on_peer_request(self, msg):
222
+ nonlocal request_received, request_data
223
+ request_received = True
224
+ request_data = msg.data
225
+ return {"echo": msg.data.get("message"), "status": "received"}
226
+
227
+ class RequesterAgent(CustomAgent):
228
+ role = "requester"
229
+ capabilities = ["requesting"]
230
+
231
+ async def execute_task(self, task):
232
+ return {"status": "success"}
233
+
234
+ mesh = Mesh(mode="p2p", config={"bind_port": 7972})
235
+ responder = mesh.add(ResponderAgent())
236
+ requester = mesh.add(RequesterAgent())
237
+
238
+ await mesh.start()
239
+
240
+ # Start responder listening in background
241
+ responder_task = asyncio.create_task(responder.run())
242
+
243
+ try:
244
+ # Wait for responder to start
245
+ await asyncio.sleep(0.2)
246
+
247
+ # Send request from requester to responder
248
+ response = await requester.peers.request(
249
+ "responder",
250
+ {"message": "Hello from requester!"},
251
+ timeout=5
252
+ )
253
+
254
+ # Verify responder received the request
255
+ assert request_received is True
256
+ assert request_data["message"] == "Hello from requester!"
257
+
258
+ # Verify response was received
259
+ assert response is not None
260
+ assert response.get("echo") == "Hello from requester!"
261
+ assert response.get("status") == "received"
262
+
263
+ finally:
264
+ responder.request_shutdown()
265
+ responder_task.cancel()
266
+ try:
267
+ await responder_task
268
+ except asyncio.CancelledError:
269
+ pass
270
+ await mesh.stop()
271
+
272
+ @pytest.mark.asyncio
273
+ async def test_cognitive_context_enables_peer_discovery_for_requests(self):
274
+ """Test that cognitive context helps discover correct peer for requests."""
275
+ from jarviscore import Mesh
276
+ from jarviscore.profiles import CustomAgent
277
+
278
+ class AnalystAgent(CustomAgent):
279
+ role = "analyst"
280
+ capabilities = ["data_analysis", "statistics"]
281
+ description = "Expert in data analysis"
282
+ listen_timeout = 0.1
283
+
284
+ async def on_peer_request(self, msg):
285
+ query = msg.data.get("query", "")
286
+ return {"analysis": f"Analyzed: {query}", "confidence": 0.9}
287
+
288
+ class CoordinatorAgent(CustomAgent):
289
+ role = "coordinator"
290
+ capabilities = ["coordination"]
291
+ listen_timeout = 0.1
292
+
293
+ async def on_peer_request(self, msg):
294
+ return {}
295
+
296
+ mesh = Mesh(mode="p2p", config={"bind_port": 7973})
297
+ analyst = mesh.add(AnalystAgent())
298
+ coordinator = mesh.add(CoordinatorAgent())
299
+
300
+ await mesh.start()
301
+ analyst_task = asyncio.create_task(analyst.run())
302
+
303
+ try:
304
+ await asyncio.sleep(0.2)
305
+
306
+ # Coordinator gets cognitive context
307
+ context = coordinator.peers.get_cognitive_context(format="json")
308
+
309
+ import json
310
+ context_data = json.loads(context)
311
+
312
+ # Find analyst in peers
313
+ analyst_peer = None
314
+ for peer in context_data.get("available_peers", []):
315
+ if peer.get("role") == "analyst":
316
+ analyst_peer = peer
317
+ break
318
+
319
+ assert analyst_peer is not None
320
+ assert "data_analysis" in analyst_peer.get("capabilities", [])
321
+
322
+ # Coordinator can now send request to analyst by role
323
+ response = await coordinator.peers.request(
324
+ "analyst",
325
+ {"query": "Analyze Q4 sales"},
326
+ timeout=5
327
+ )
328
+
329
+ assert "analysis" in response
330
+ assert "Q4 sales" in response["analysis"]
331
+
332
+ finally:
333
+ analyst.request_shutdown()
334
+ analyst_task.cancel()
335
+ try:
336
+ await analyst_task
337
+ except asyncio.CancelledError:
338
+ pass
339
+ await mesh.stop()
340
+
341
+
342
+ # ═══════════════════════════════════════════════════════════════════════════════
343
+ # TEST: BUILD SYSTEM PROMPT INTEGRATION
344
+ # ═══════════════════════════════════════════════════════════════════════════════
345
+
346
+ class TestBuildSystemPromptIntegration:
347
+ """Test build_system_prompt with real mesh peers."""
348
+
349
+ @pytest.mark.asyncio
350
+ async def test_build_system_prompt_includes_all_peers(self):
351
+ """Test build_system_prompt includes all mesh peers."""
352
+ from jarviscore import Mesh
353
+ from jarviscore.profiles import CustomAgent
354
+
355
+ class Agent1(CustomAgent):
356
+ role = "writer"
357
+ capabilities = ["writing", "content_creation"]
358
+ description = "Creates written content"
359
+ async def execute_task(self, task):
360
+ return {"status": "success"}
361
+
362
+ class Agent2(CustomAgent):
363
+ role = "editor"
364
+ capabilities = ["editing", "proofreading"]
365
+ description = "Edits and proofreads content"
366
+ async def execute_task(self, task):
367
+ return {"status": "success"}
368
+
369
+ class Agent3(CustomAgent):
370
+ role = "publisher"
371
+ capabilities = ["publishing"]
372
+ description = "Publishes finalized content"
373
+ async def execute_task(self, task):
374
+ return {"status": "success"}
375
+
376
+ mesh = Mesh(mode="p2p", config={"bind_port": 7974})
377
+ writer = mesh.add(Agent1())
378
+ editor = mesh.add(Agent2())
379
+ publisher = mesh.add(Agent3())
380
+
381
+ await mesh.start()
382
+
383
+ try:
384
+ # Build system prompt from writer's perspective
385
+ base_prompt = "You are a helpful writing assistant."
386
+ full_prompt = writer.peers.build_system_prompt(base_prompt)
387
+
388
+ # Should include base prompt
389
+ assert "You are a helpful writing assistant" in full_prompt
390
+
391
+ # Should include peer context header
392
+ assert "AVAILABLE MESH PEERS" in full_prompt
393
+
394
+ # Should include editor and publisher (but not writer itself)
395
+ assert "editor" in full_prompt
396
+ assert "publisher" in full_prompt
397
+
398
+ # Should include capabilities
399
+ assert "editing" in full_prompt or "proofreading" in full_prompt
400
+ assert "publishing" in full_prompt
401
+
402
+ finally:
403
+ await mesh.stop()
404
+
405
+
406
+ # ═══════════════════════════════════════════════════════════════════════════════
407
+ # TEST: LLM INTEGRATION (requires API key)
408
+ # ═══════════════════════════════════════════════════════════════════════════════
409
+
410
+ class TestLLMCognitiveDiscovery:
411
+ """Integration tests with real LLM calls."""
412
+
413
+ @requires_llm
414
+ @pytest.mark.asyncio
415
+ async def test_llm_receives_peer_context_in_prompt(self):
416
+ """Test LLM receives and understands peer context."""
417
+ from jarviscore import Mesh
418
+ from jarviscore.profiles import CustomAgent
419
+
420
+ class SpecialistAgent(CustomAgent):
421
+ role = "data_specialist"
422
+ capabilities = ["data_processing", "analytics"]
423
+ description = "Processes and analyzes data"
424
+ listen_timeout = 0.1
425
+
426
+ async def on_peer_request(self, msg):
427
+ return {"result": "processed"}
428
+
429
+ mesh = Mesh(mode="p2p", config={"bind_port": 7975})
430
+ specialist = mesh.add(SpecialistAgent())
431
+
432
+ await mesh.start()
433
+
434
+ try:
435
+ # Build prompt with peer context
436
+ base_prompt = "List the available specialist agents you can delegate to."
437
+ full_prompt = specialist.peers.build_system_prompt(base_prompt)
438
+
439
+ # Create LLM client using configured settings
440
+ client, model, error = get_llm_client()
441
+ assert client is not None, f"Failed to get LLM client: {error}"
442
+
443
+ # Ask LLM about available peers
444
+ response = client.messages.create(
445
+ model=model,
446
+ max_tokens=256,
447
+ messages=[{"role": "user", "content": "What specialist agents are available to help me?"}],
448
+ system=full_prompt
449
+ )
450
+
451
+ response_text = response.content[0].text.lower()
452
+
453
+ # LLM should understand there are no other peers from specialist's view
454
+ # (specialist only sees itself, no other peers in this test)
455
+ # This validates the LLM received and processed the context
456
+
457
+ assert len(response_text) > 0 # Got a response
458
+
459
+ finally:
460
+ await mesh.stop()
461
+
462
+ @requires_llm
463
+ @pytest.mark.asyncio
464
+ async def test_llm_decides_to_delegate_based_on_context(self):
465
+ """Test LLM autonomously decides to delegate based on peer context."""
466
+ from jarviscore import Mesh
467
+ from jarviscore.profiles import CustomAgent
468
+
469
+ delegation_occurred = False
470
+
471
+ class AnalystAgent(CustomAgent):
472
+ role = "analyst"
473
+ capabilities = ["data_analysis", "statistics", "insights"]
474
+ description = "Expert data analyst"
475
+ listen_timeout = 0.1
476
+
477
+ async def on_peer_request(self, msg):
478
+ nonlocal delegation_occurred
479
+ delegation_occurred = True
480
+ return {
481
+ "analysis": "Sales are up 15% quarter over quarter",
482
+ "insights": ["Positive trend", "Growth accelerating"]
483
+ }
484
+
485
+ class CoordinatorAgent(CustomAgent):
486
+ role = "coordinator"
487
+ capabilities = ["coordination", "delegation"]
488
+ listen_timeout = 0.1
489
+
490
+ async def on_peer_request(self, msg):
491
+ return {}
492
+
493
+ mesh = Mesh(mode="p2p", config={"bind_port": 7976})
494
+ analyst = mesh.add(AnalystAgent())
495
+ coordinator = mesh.add(CoordinatorAgent())
496
+
497
+ await mesh.start()
498
+ analyst_task = asyncio.create_task(analyst.run())
499
+
500
+ try:
501
+ await asyncio.sleep(0.2)
502
+
503
+ # Build coordinator's system prompt with peer awareness
504
+ base_prompt = """You are a coordinator. When users ask for data analysis,
505
+ you MUST use the ask_peer tool to delegate to the analyst.
506
+ Always delegate analysis tasks - never try to do them yourself."""
507
+
508
+ system_prompt = coordinator.peers.build_system_prompt(base_prompt)
509
+
510
+ # Verify analyst is in the context
511
+ assert "analyst" in system_prompt
512
+ assert "data_analysis" in system_prompt or "statistics" in system_prompt
513
+
514
+ # Create LLM client using configured settings
515
+ client, model, error = get_llm_client()
516
+ assert client is not None, f"Failed to get LLM client: {error}"
517
+
518
+ tools = [{
519
+ "name": "ask_peer",
520
+ "description": "Delegate a task to a specialist agent",
521
+ "input_schema": {
522
+ "type": "object",
523
+ "properties": {
524
+ "role": {"type": "string", "description": "Role of agent to ask"},
525
+ "question": {"type": "string", "description": "Question for the agent"}
526
+ },
527
+ "required": ["role", "question"]
528
+ }
529
+ }]
530
+
531
+ # Ask for analysis - LLM should decide to delegate
532
+ response = client.messages.create(
533
+ model=model,
534
+ max_tokens=256,
535
+ messages=[{"role": "user", "content": "Please analyze the Q4 sales data"}],
536
+ system=system_prompt,
537
+ tools=tools
538
+ )
539
+
540
+ # Check if LLM decided to use ask_peer tool
541
+ tool_used = False
542
+ for block in response.content:
543
+ if block.type == "tool_use" and block.name == "ask_peer":
544
+ tool_used = True
545
+ # Execute the peer request
546
+ args = block.input
547
+ peer_response = await coordinator.peers.request(
548
+ args.get("role", "analyst"),
549
+ {"question": args.get("question", "")},
550
+ timeout=5
551
+ )
552
+ assert "analysis" in peer_response
553
+ break
554
+
555
+ # LLM should have decided to use the tool
556
+ assert tool_used, "LLM should have decided to delegate to analyst"
557
+ assert delegation_occurred, "Analyst should have received the request"
558
+
559
+ finally:
560
+ analyst.request_shutdown()
561
+ analyst_task.cancel()
562
+ try:
563
+ await analyst_task
564
+ except asyncio.CancelledError:
565
+ pass
566
+ await mesh.stop()
567
+
568
+
569
+ # ═══════════════════════════════════════════════════════════════════════════════
570
+ # TEST: END-TO-END COGNITIVE DISCOVERY FLOW
571
+ # ═══════════════════════════════════════════════════════════════════════════════
572
+
573
+ class TestEndToEndCognitiveDiscovery:
574
+ """End-to-end tests for the complete cognitive discovery flow."""
575
+
576
+ @pytest.mark.asyncio
577
+ async def test_full_flow_without_llm(self):
578
+ """Test complete flow with mock LLM decisions."""
579
+ from jarviscore import Mesh
580
+ from jarviscore.profiles import CustomAgent
581
+
582
+ analyst_requests = []
583
+ scout_requests = []
584
+
585
+ class AnalystAgent(CustomAgent):
586
+ role = "analyst"
587
+ capabilities = ["analysis"]
588
+ listen_timeout = 0.1
589
+
590
+ async def on_peer_request(self, msg):
591
+ analyst_requests.append(msg.data)
592
+ return {"analysis_result": "Data analyzed successfully"}
593
+
594
+ class ScoutAgent(CustomAgent):
595
+ role = "scout"
596
+ capabilities = ["research"]
597
+ listen_timeout = 0.1
598
+
599
+ async def on_peer_request(self, msg):
600
+ scout_requests.append(msg.data)
601
+ return {"research_result": "Research completed"}
602
+
603
+ class OrchestratorAgent(CustomAgent):
604
+ role = "orchestrator"
605
+ capabilities = ["orchestration"]
606
+ listen_timeout = 0.1
607
+
608
+ async def on_peer_request(self, msg):
609
+ return {}
610
+
611
+ async def process_with_mock_llm(self, query: str) -> dict:
612
+ """Simulate LLM decision-making based on cognitive context."""
613
+ # Get cognitive context
614
+ context = self.peers.get_cognitive_context(format="json")
615
+
616
+ import json
617
+ peers = json.loads(context).get("available_peers", [])
618
+
619
+ # Mock LLM logic: route based on keywords
620
+ if "analyze" in query.lower() or "data" in query.lower():
621
+ # Find analyst
622
+ analyst = next((p for p in peers if p["role"] == "analyst"), None)
623
+ if analyst:
624
+ return await self.peers.request("analyst", {"query": query}, timeout=5)
625
+
626
+ if "research" in query.lower() or "find" in query.lower():
627
+ # Find scout
628
+ scout = next((p for p in peers if p["role"] == "scout"), None)
629
+ if scout:
630
+ return await self.peers.request("scout", {"query": query}, timeout=5)
631
+
632
+ return {"result": "Handled directly"}
633
+
634
+ mesh = Mesh(mode="p2p", config={"bind_port": 7977})
635
+ analyst = mesh.add(AnalystAgent())
636
+ scout = mesh.add(ScoutAgent())
637
+ orchestrator = mesh.add(OrchestratorAgent())
638
+
639
+ await mesh.start()
640
+
641
+ # Start listeners
642
+ analyst_task = asyncio.create_task(analyst.run())
643
+ scout_task = asyncio.create_task(scout.run())
644
+
645
+ try:
646
+ await asyncio.sleep(0.3)
647
+
648
+ # Test 1: Query that should go to analyst
649
+ result1 = await orchestrator.process_with_mock_llm("Please analyze the sales data")
650
+ assert "analysis_result" in result1
651
+ assert len(analyst_requests) == 1
652
+
653
+ # Test 2: Query that should go to scout
654
+ result2 = await orchestrator.process_with_mock_llm("Research competitors")
655
+ assert "research_result" in result2
656
+ assert len(scout_requests) == 1
657
+
658
+ # Test 3: Query handled directly
659
+ result3 = await orchestrator.process_with_mock_llm("What is your name?")
660
+ assert "result" in result3
661
+ assert result3["result"] == "Handled directly"
662
+
663
+ finally:
664
+ analyst.request_shutdown()
665
+ scout.request_shutdown()
666
+ analyst_task.cancel()
667
+ scout_task.cancel()
668
+ try:
669
+ await analyst_task
670
+ except asyncio.CancelledError:
671
+ pass
672
+ try:
673
+ await scout_task
674
+ except asyncio.CancelledError:
675
+ pass
676
+ await mesh.stop()
677
+
678
+
679
+ # ═══════════════════════════════════════════════════════════════════════════════
680
+ # RUN TESTS
681
+ # ═══════════════════════════════════════════════════════════════════════════════
682
+
683
+ if __name__ == "__main__":
684
+ pytest.main([__file__, "-v", "-s"])