jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +730 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +730 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +1362 -0
  21. jarviscore/docs/GETTING_STARTED.md +107 -14
  22. jarviscore/docs/TROUBLESHOOTING.md +145 -7
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. jarviscore_framework-0.2.1.dist-info/METADATA +144 -0
  31. jarviscore_framework-0.2.1.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/METADATA +0 -137
  85. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  86. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1301 @@
1
+ """
2
+ Test 5: Integration Test - Multiple LLM-Powered Agents in Mesh
3
+
4
+ Demonstrates the COMPLETE developer experience with MULTIPLE agents,
5
+ all being FULL MESH PARTICIPANTS that can both SEND and RECEIVE.
6
+
7
+ KEY CONCEPTS:
8
+ 1. Every agent is LLM-powered
9
+ 2. Every agent can SEND requests (via ask_peer)
10
+ 3. Every agent can RECEIVE requests (via run() loop)
11
+ 4. Every agent has get_tools() with peer tools
12
+ 5. The role defines what they're GOOD at, not communication direction
13
+
14
+ REAL-WORLD SCENARIO:
15
+ - Analyst: Good at analysis, but might ask researcher for data
16
+ - Researcher: Good at research, but might ask analyst to interpret
17
+ - Assistant: Good at chat/search, coordinates between specialists
18
+
19
+ All three can talk to each other in any direction!
20
+
21
+ FILE STRUCTURE:
22
+ project/
23
+ ├── main.py # Entry file (mesh setup)
24
+ ├── agents/
25
+ │ ├── analyst.py # LLM agent - good at analysis
26
+ │ ├── researcher.py # LLM agent - good at research
27
+ │ └── assistant.py # LLM agent - good at chat/search
28
+ └── ...
29
+ """
30
+ import asyncio
31
+ import sys
32
+ import pytest
33
+ sys.path.insert(0, '.')
34
+
35
+ from jarviscore.core.agent import Agent
36
+ from jarviscore.core.mesh import Mesh
37
+ from jarviscore.p2p.peer_client import PeerClient
38
+
39
+
40
+ # ═══════════════════════════════════════════════════════════════════════════════
41
+ # AGENTS - All follow the SAME pattern, different capabilities
42
+ # ═══════════════════════════════════════════════════════════════════════════════
43
+
44
+ class Analyst(Agent):
45
+ """
46
+ Analyst - Good at analysis, can also ask other peers.
47
+
48
+ This agent might receive an analysis request, realize it needs
49
+ more data, and ask the researcher for help.
50
+ """
51
+ role = "analyst"
52
+ capabilities = ["analysis", "synthesis", "reporting"]
53
+
54
+ def __init__(self, agent_id=None):
55
+ super().__init__(agent_id)
56
+ self.analyses_count = 0
57
+ self.requests_received = []
58
+ self.requests_sent = []
59
+
60
+ def analyze(self, data: str) -> dict:
61
+ """Core capability - analyze data."""
62
+ self.analyses_count += 1
63
+ return {
64
+ "response": f"Analysis #{self.analyses_count}: '{data}' shows positive trends",
65
+ "confidence": 0.87
66
+ }
67
+
68
+ def get_tools(self) -> list:
69
+ """Return tools for LLM - includes peer tools."""
70
+ tools = [
71
+ {
72
+ "name": "analyze",
73
+ "description": "Analyze data and return insights",
74
+ "input_schema": {
75
+ "type": "object",
76
+ "properties": {"data": {"type": "string"}},
77
+ "required": ["data"]
78
+ }
79
+ }
80
+ ]
81
+ if self.peers:
82
+ tools.extend(self.peers.as_tool().schema)
83
+ return tools
84
+
85
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
86
+ """Execute tool - routes to local or peer tools."""
87
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
88
+ self.requests_sent.append({"tool": tool_name, "args": args})
89
+ return await self.peers.as_tool().execute(tool_name, args)
90
+ if tool_name == "analyze":
91
+ return str(self.analyze(args.get("data", "")))
92
+ return f"Unknown: {tool_name}"
93
+
94
+ async def run(self):
95
+ """Listen and respond to requests."""
96
+ while not self.shutdown_requested:
97
+ msg = await self.peers.receive(timeout=0.5)
98
+ if msg is None:
99
+ continue
100
+ if msg.is_request:
101
+ self.requests_received.append(msg.data)
102
+ result = self.analyze(msg.data.get("query", ""))
103
+ await self.peers.respond(msg, result)
104
+
105
+ async def execute_task(self, task): return {}
106
+
107
+
108
+ class Researcher(Agent):
109
+ """
110
+ Researcher - Good at research, can also ask other peers.
111
+
112
+ This agent might receive a research request, get results,
113
+ and ask the analyst to interpret them.
114
+ """
115
+ role = "researcher"
116
+ capabilities = ["research", "data_collection", "summarization"]
117
+
118
+ def __init__(self, agent_id=None):
119
+ super().__init__(agent_id)
120
+ self.research_count = 0
121
+ self.requests_received = []
122
+ self.requests_sent = []
123
+
124
+ def research(self, topic: str) -> dict:
125
+ """Core capability - research a topic."""
126
+ self.research_count += 1
127
+ return {
128
+ "response": f"Research #{self.research_count}: Found 5 papers on '{topic}'",
129
+ "sources": ["paper1.pdf", "paper2.pdf"]
130
+ }
131
+
132
+ def get_tools(self) -> list:
133
+ """Return tools for LLM - includes peer tools."""
134
+ tools = [
135
+ {
136
+ "name": "research",
137
+ "description": "Research a topic and find relevant sources",
138
+ "input_schema": {
139
+ "type": "object",
140
+ "properties": {"topic": {"type": "string"}},
141
+ "required": ["topic"]
142
+ }
143
+ }
144
+ ]
145
+ if self.peers:
146
+ tools.extend(self.peers.as_tool().schema)
147
+ return tools
148
+
149
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
150
+ """Execute tool - routes to local or peer tools."""
151
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
152
+ self.requests_sent.append({"tool": tool_name, "args": args})
153
+ return await self.peers.as_tool().execute(tool_name, args)
154
+ if tool_name == "research":
155
+ return str(self.research(args.get("topic", "")))
156
+ return f"Unknown: {tool_name}"
157
+
158
+ async def run(self):
159
+ """Listen and respond to requests."""
160
+ while not self.shutdown_requested:
161
+ msg = await self.peers.receive(timeout=0.5)
162
+ if msg is None:
163
+ continue
164
+ if msg.is_request:
165
+ self.requests_received.append(msg.data)
166
+ result = self.research(msg.data.get("query", ""))
167
+ await self.peers.respond(msg, result)
168
+
169
+ async def execute_task(self, task): return {}
170
+
171
+
172
+ class Assistant(Agent):
173
+ """
174
+ Assistant - Good at chat/search, coordinates between specialists.
175
+
176
+ This agent might receive a complex request and delegate parts
177
+ to analyst and researcher, then combine the results.
178
+ """
179
+ role = "assistant"
180
+ capabilities = ["chat", "search", "coordination"]
181
+
182
+ def __init__(self, agent_id=None):
183
+ super().__init__(agent_id)
184
+ self.search_count = 0
185
+ self.requests_received = []
186
+ self.requests_sent = []
187
+
188
+ def search(self, query: str) -> str:
189
+ """Core capability - search the web."""
190
+ self.search_count += 1
191
+ return f"Search #{self.search_count}: Results for '{query}'"
192
+
193
+ def get_tools(self) -> list:
194
+ """Return tools for LLM - includes peer tools."""
195
+ tools = [
196
+ {
197
+ "name": "search",
198
+ "description": "Search the web for information",
199
+ "input_schema": {
200
+ "type": "object",
201
+ "properties": {"query": {"type": "string"}},
202
+ "required": ["query"]
203
+ }
204
+ }
205
+ ]
206
+ if self.peers:
207
+ tools.extend(self.peers.as_tool().schema)
208
+ return tools
209
+
210
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
211
+ """Execute tool - routes to local or peer tools."""
212
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
213
+ self.requests_sent.append({"tool": tool_name, "args": args})
214
+ return await self.peers.as_tool().execute(tool_name, args)
215
+ if tool_name == "search":
216
+ return self.search(args.get("query", ""))
217
+ return f"Unknown: {tool_name}"
218
+
219
+ async def run(self):
220
+ """Listen and respond to requests."""
221
+ while not self.shutdown_requested:
222
+ msg = await self.peers.receive(timeout=0.5)
223
+ if msg is None:
224
+ continue
225
+ if msg.is_request:
226
+ self.requests_received.append(msg.data)
227
+ result = {"response": self.search(msg.data.get("query", ""))}
228
+ await self.peers.respond(msg, result)
229
+
230
+ async def execute_task(self, task): return {}
231
+
232
+
233
+ # ═══════════════════════════════════════════════════════════════════════════════
234
+ # FIXTURES
235
+ # ═══════════════════════════════════════════════════════════════════════════════
236
+
237
+ @pytest.fixture
238
+ def mesh():
239
+ """Create a fresh mesh."""
240
+ return Mesh(mode="p2p")
241
+
242
+
243
+ @pytest.fixture
244
+ def wired_mesh(mesh):
245
+ """Create mesh with all three agents wired up."""
246
+ analyst = mesh.add(Analyst)
247
+ researcher = mesh.add(Researcher)
248
+ assistant = mesh.add(Assistant)
249
+
250
+ for agent in mesh.agents:
251
+ agent.peers = PeerClient(
252
+ coordinator=None,
253
+ agent_id=agent.agent_id,
254
+ agent_role=agent.role,
255
+ agent_registry=mesh._agent_registry,
256
+ node_id="local"
257
+ )
258
+
259
+ return mesh, analyst, researcher, assistant
260
+
261
+
262
+ # ═══════════════════════════════════════════════════════════════════════════════
263
+ # TESTS
264
+ # ═══════════════════════════════════════════════════════════════════════════════
265
+
266
+ class TestMeshSetup:
267
+ """Tests for basic mesh setup with multiple agents."""
268
+
269
+ def test_all_agents_registered(self, wired_mesh):
270
+ """All agents should be registered in the mesh."""
271
+ mesh, analyst, researcher, assistant = wired_mesh
272
+ assert len(mesh.agents) == 3
273
+
274
+ def test_all_agents_have_peers(self, wired_mesh):
275
+ """All agents should have peer client injected."""
276
+ mesh, analyst, researcher, assistant = wired_mesh
277
+ for agent in [analyst, researcher, assistant]:
278
+ assert agent.peers is not None
279
+
280
+ def test_all_agents_see_each_other(self, wired_mesh):
281
+ """Each agent should see the other two in their peer list."""
282
+ mesh, analyst, researcher, assistant = wired_mesh
283
+
284
+ analyst_peers = [p["role"] for p in analyst.peers.list_peers()]
285
+ assert "researcher" in analyst_peers
286
+ assert "assistant" in analyst_peers
287
+
288
+ researcher_peers = [p["role"] for p in researcher.peers.list_peers()]
289
+ assert "analyst" in researcher_peers
290
+ assert "assistant" in researcher_peers
291
+
292
+ assistant_peers = [p["role"] for p in assistant.peers.list_peers()]
293
+ assert "analyst" in assistant_peers
294
+ assert "researcher" in assistant_peers
295
+
296
+
297
+ class TestAllAgentsHavePeerTools:
298
+ """Tests that ALL agents have peer tools in their toolset."""
299
+
300
+ def test_analyst_has_peer_tools(self, wired_mesh):
301
+ """Analyst should have ask_peer, broadcast_update, list_peers."""
302
+ mesh, analyst, researcher, assistant = wired_mesh
303
+ tools = analyst.get_tools()
304
+ tool_names = [t["name"] for t in tools]
305
+
306
+ assert "analyze" in tool_names # Local
307
+ assert "ask_peer" in tool_names # Peer
308
+ assert "broadcast_update" in tool_names
309
+ assert "list_peers" in tool_names
310
+
311
+ def test_researcher_has_peer_tools(self, wired_mesh):
312
+ """Researcher should have ask_peer, broadcast_update, list_peers."""
313
+ mesh, analyst, researcher, assistant = wired_mesh
314
+ tools = researcher.get_tools()
315
+ tool_names = [t["name"] for t in tools]
316
+
317
+ assert "research" in tool_names # Local
318
+ assert "ask_peer" in tool_names # Peer
319
+ assert "broadcast_update" in tool_names
320
+ assert "list_peers" in tool_names
321
+
322
+ def test_assistant_has_peer_tools(self, wired_mesh):
323
+ """Assistant should have ask_peer, broadcast_update, list_peers."""
324
+ mesh, analyst, researcher, assistant = wired_mesh
325
+ tools = assistant.get_tools()
326
+ tool_names = [t["name"] for t in tools]
327
+
328
+ assert "search" in tool_names # Local
329
+ assert "ask_peer" in tool_names # Peer
330
+ assert "broadcast_update" in tool_names
331
+ assert "list_peers" in tool_names
332
+
333
+
334
+ class TestBidirectionalCommunication:
335
+ """Tests that prove ANY agent can talk to ANY other agent."""
336
+
337
+ @pytest.mark.asyncio
338
+ async def test_analyst_asks_researcher(self, wired_mesh):
339
+ """Analyst can ask researcher for data."""
340
+ mesh, analyst, researcher, assistant = wired_mesh
341
+
342
+ researcher_task = asyncio.create_task(researcher.run())
343
+ await asyncio.sleep(0.1)
344
+
345
+ try:
346
+ result = await analyst.execute_tool("ask_peer", {
347
+ "role": "researcher",
348
+ "question": "Find papers on market trends"
349
+ })
350
+ assert "Research" in result
351
+ assert len(analyst.requests_sent) == 1
352
+
353
+ finally:
354
+ researcher.request_shutdown()
355
+ researcher_task.cancel()
356
+ try: await researcher_task
357
+ except asyncio.CancelledError: pass
358
+
359
+ @pytest.mark.asyncio
360
+ async def test_researcher_asks_analyst(self, wired_mesh):
361
+ """Researcher can ask analyst to interpret data."""
362
+ mesh, analyst, researcher, assistant = wired_mesh
363
+
364
+ analyst_task = asyncio.create_task(analyst.run())
365
+ await asyncio.sleep(0.1)
366
+
367
+ try:
368
+ result = await researcher.execute_tool("ask_peer", {
369
+ "role": "analyst",
370
+ "question": "Interpret these research findings"
371
+ })
372
+ assert "Analysis" in result
373
+ assert len(researcher.requests_sent) == 1
374
+
375
+ finally:
376
+ analyst.request_shutdown()
377
+ analyst_task.cancel()
378
+ try: await analyst_task
379
+ except asyncio.CancelledError: pass
380
+
381
+ @pytest.mark.asyncio
382
+ async def test_assistant_coordinates_both(self, wired_mesh):
383
+ """Assistant can ask both analyst and researcher."""
384
+ mesh, analyst, researcher, assistant = wired_mesh
385
+
386
+ analyst_task = asyncio.create_task(analyst.run())
387
+ researcher_task = asyncio.create_task(researcher.run())
388
+ await asyncio.sleep(0.1)
389
+
390
+ try:
391
+ # Ask researcher
392
+ r1 = await assistant.execute_tool("ask_peer", {
393
+ "role": "researcher",
394
+ "question": "Research AI trends"
395
+ })
396
+ assert "Research" in r1
397
+
398
+ # Ask analyst
399
+ r2 = await assistant.execute_tool("ask_peer", {
400
+ "role": "analyst",
401
+ "question": "Analyze the findings"
402
+ })
403
+ assert "Analysis" in r2
404
+
405
+ assert len(assistant.requests_sent) == 2
406
+
407
+ finally:
408
+ analyst.request_shutdown()
409
+ researcher.request_shutdown()
410
+ analyst_task.cancel()
411
+ researcher_task.cancel()
412
+ for t in [analyst_task, researcher_task]:
413
+ try: await t
414
+ except asyncio.CancelledError: pass
415
+
416
+
417
+ class TestMultiAgentScenario:
418
+ """Tests for realistic multi-agent scenarios."""
419
+
420
+ @pytest.mark.asyncio
421
+ async def test_chain_of_requests(self, wired_mesh):
422
+ """
423
+ Test a chain: Assistant → Researcher → (gets data) → Assistant asks Analyst
424
+
425
+ This proves complex multi-agent workflows work.
426
+ """
427
+ mesh, analyst, researcher, assistant = wired_mesh
428
+
429
+ analyst_task = asyncio.create_task(analyst.run())
430
+ researcher_task = asyncio.create_task(researcher.run())
431
+ await asyncio.sleep(0.1)
432
+
433
+ try:
434
+ # Step 1: Assistant asks researcher
435
+ research_result = await assistant.execute_tool("ask_peer", {
436
+ "role": "researcher",
437
+ "question": "Find data on Q4 sales"
438
+ })
439
+ assert "Research" in research_result
440
+
441
+ # Step 2: Assistant asks analyst to interpret
442
+ analysis_result = await assistant.execute_tool("ask_peer", {
443
+ "role": "analyst",
444
+ "question": f"Interpret: {research_result}"
445
+ })
446
+ assert "Analysis" in analysis_result
447
+
448
+ # Step 3: Assistant broadcasts completion
449
+ broadcast_result = await assistant.execute_tool("broadcast_update", {
450
+ "message": "Research and analysis complete!"
451
+ })
452
+ assert "Broadcast" in broadcast_result
453
+
454
+ finally:
455
+ analyst.request_shutdown()
456
+ researcher.request_shutdown()
457
+ for t in [analyst_task, researcher_task]:
458
+ t.cancel()
459
+ try: await t
460
+ except asyncio.CancelledError: pass
461
+
462
+ @pytest.mark.asyncio
463
+ async def test_all_agents_can_receive_while_sending(self, wired_mesh):
464
+ """
465
+ All agents running their run() loops while also sending.
466
+
467
+ This proves true bidirectional communication.
468
+ """
469
+ mesh, analyst, researcher, assistant = wired_mesh
470
+
471
+ # Start all run loops
472
+ analyst_task = asyncio.create_task(analyst.run())
473
+ researcher_task = asyncio.create_task(researcher.run())
474
+ assistant_task = asyncio.create_task(assistant.run())
475
+ await asyncio.sleep(0.1)
476
+
477
+ try:
478
+ # Analyst sends to researcher
479
+ r1 = await analyst.execute_tool("ask_peer", {
480
+ "role": "researcher", "question": "Get data"
481
+ })
482
+ assert "Research" in r1
483
+
484
+ # Researcher sends to assistant
485
+ r2 = await researcher.execute_tool("ask_peer", {
486
+ "role": "assistant", "question": "Search for more"
487
+ })
488
+ assert "Search" in r2
489
+
490
+ # Assistant sends to analyst
491
+ r3 = await assistant.execute_tool("ask_peer", {
492
+ "role": "analyst", "question": "Analyze this"
493
+ })
494
+ assert "Analysis" in r3
495
+
496
+ # Verify all received requests
497
+ assert len(researcher.requests_received) >= 1 # From analyst
498
+ assert len(assistant.requests_received) >= 1 # From researcher
499
+ assert len(analyst.requests_received) >= 1 # From assistant
500
+
501
+ finally:
502
+ for agent in [analyst, researcher, assistant]:
503
+ agent.request_shutdown()
504
+ for t in [analyst_task, researcher_task, assistant_task]:
505
+ t.cancel()
506
+ try: await t
507
+ except asyncio.CancelledError: pass
508
+
509
+
510
+ class TestLLMToolDispatch:
511
+ """Tests simulating LLM tool dispatch patterns."""
512
+
513
+ @pytest.mark.asyncio
514
+ async def test_llm_decides_to_ask_peer(self, wired_mesh):
515
+ """
516
+ Simulate LLM deciding to use ask_peer tool.
517
+
518
+ This is what happens in real code:
519
+ 1. LLM receives request
520
+ 2. LLM sees tools including ask_peer
521
+ 3. LLM decides to delegate to specialist
522
+ 4. Tool is executed
523
+ 5. Result returned to LLM
524
+ """
525
+ mesh, analyst, researcher, assistant = wired_mesh
526
+
527
+ analyst_task = asyncio.create_task(analyst.run())
528
+ await asyncio.sleep(0.1)
529
+
530
+ try:
531
+ # Step 1: Get tools (what LLM sees)
532
+ tools = assistant.get_tools()
533
+ tool_names = [t["name"] for t in tools]
534
+ assert "ask_peer" in tool_names
535
+
536
+ # Step 2: Simulate LLM decision
537
+ llm_decision = {
538
+ "tool": "ask_peer",
539
+ "args": {"role": "analyst", "question": "Analyze data"}
540
+ }
541
+
542
+ # Step 3: Execute
543
+ result = await assistant.execute_tool(
544
+ llm_decision["tool"],
545
+ llm_decision["args"]
546
+ )
547
+
548
+ # Step 4: Result is string for LLM
549
+ assert isinstance(result, str)
550
+
551
+ finally:
552
+ analyst.request_shutdown()
553
+ analyst_task.cancel()
554
+ try: await analyst_task
555
+ except asyncio.CancelledError: pass
556
+
557
+
558
+ # ═══════════════════════════════════════════════════════════════════════════════
559
+ # LLM SIMULATION - Tests the COMPLETE LLM tool-use loop
560
+ # ═══════════════════════════════════════════════════════════════════════════════
561
+
562
+ class MockLLM:
563
+ """
564
+ Simulates LLM behavior for testing the complete tool-use flow.
565
+
566
+ In real-world:
567
+ - LLM receives: system prompt + tools + user message
568
+ - LLM decides: use a tool OR respond directly
569
+ - LLM returns: tool_call OR text response
570
+
571
+ This mock makes deterministic decisions based on keywords:
572
+ - "analyze" → delegate to analyst (unless I AM the analyst)
573
+ - "research" → delegate to researcher (unless I AM the researcher)
574
+ - "search" → use local search tool
575
+ - otherwise → respond directly
576
+ """
577
+
578
+ def __init__(self, agent_role: str):
579
+ self.agent_role = agent_role
580
+ self.calls = [] # Track all LLM calls for verification
581
+
582
+ def chat(self, messages: list, tools: list) -> dict:
583
+ """
584
+ Simulate LLM chat completion.
585
+
586
+ Returns either:
587
+ {"type": "tool_use", "tool": "name", "args": {...}}
588
+ {"type": "text", "content": "response"}
589
+ """
590
+ self.calls.append({"messages": messages, "tools": tools})
591
+
592
+ # Get the last user message
593
+ user_msg = ""
594
+ for msg in reversed(messages):
595
+ if msg.get("role") == "user":
596
+ user_msg = msg.get("content", "").lower()
597
+ break
598
+
599
+ # Get available tool names
600
+ tool_names = [t["name"] for t in tools]
601
+
602
+ # Decision logic (simulates LLM reasoning)
603
+ # KEY: If I AM the specialist, I process locally. Otherwise delegate.
604
+
605
+ if "analyze" in user_msg:
606
+ if self.agent_role == "analyst":
607
+ # I AM the analyst - process locally
608
+ return {
609
+ "type": "text",
610
+ "content": f"[analyst] Analysis complete: '{user_msg}' shows positive trends with 87% confidence"
611
+ }
612
+ elif "ask_peer" in tool_names:
613
+ # Delegate to analyst
614
+ return {
615
+ "type": "tool_use",
616
+ "tool": "ask_peer",
617
+ "args": {"role": "analyst", "question": user_msg}
618
+ }
619
+
620
+ elif "research" in user_msg:
621
+ if self.agent_role == "researcher":
622
+ # I AM the researcher - process locally
623
+ return {
624
+ "type": "text",
625
+ "content": f"[researcher] Research complete: Found 5 papers on '{user_msg}'"
626
+ }
627
+ elif "ask_peer" in tool_names:
628
+ # Delegate to researcher
629
+ return {
630
+ "type": "tool_use",
631
+ "tool": "ask_peer",
632
+ "args": {"role": "researcher", "question": user_msg}
633
+ }
634
+
635
+ elif "search" in user_msg and "search" in tool_names:
636
+ # LLM decides to use local search tool
637
+ return {
638
+ "type": "tool_use",
639
+ "tool": "search",
640
+ "args": {"query": user_msg}
641
+ }
642
+
643
+ elif "calculate" in user_msg and "calculate" in tool_names:
644
+ # LLM decides to use local calculate tool
645
+ expr = user_msg.split("calculate")[-1].strip()
646
+ return {
647
+ "type": "tool_use",
648
+ "tool": "calculate",
649
+ "args": {"expression": expr or "1+1"}
650
+ }
651
+
652
+ # Default: respond directly
653
+ return {
654
+ "type": "text",
655
+ "content": f"[{self.agent_role}] I can help with that: {user_msg}"
656
+ }
657
+
658
+ def incorporate_tool_result(self, tool_name: str, result: str) -> str:
659
+ """
660
+ Simulate LLM incorporating tool result into final response.
661
+
662
+ In real-world, this would be another LLM call with the tool result.
663
+ """
664
+ return f"Based on the {tool_name} result: {result}"
665
+
666
+
667
+ class LLMPoweredAgent(Agent):
668
+ """
669
+ Agent with ACTUAL LLM integration (mocked for testing).
670
+
671
+ This is what a real-world agent looks like:
672
+ 1. Has a MockLLM (would be real LLM in production)
673
+ 2. chat() method that drives the LLM loop
674
+ 3. Handles tool calls and incorporates results
675
+ """
676
+ role = "llm_agent"
677
+ capabilities = ["chat", "delegate"]
678
+
679
+ def __init__(self, agent_id=None, role_name="llm_agent"):
680
+ super().__init__(agent_id)
681
+ self.role = role_name
682
+ self.llm = MockLLM(role_name)
683
+ self.conversation_history = []
684
+ self.tool_calls_made = []
685
+
686
+ def get_tools(self) -> list:
687
+ """Return tools for LLM."""
688
+ tools = [
689
+ {
690
+ "name": "search",
691
+ "description": "Search for information",
692
+ "input_schema": {
693
+ "type": "object",
694
+ "properties": {"query": {"type": "string"}},
695
+ "required": ["query"]
696
+ }
697
+ },
698
+ {
699
+ "name": "calculate",
700
+ "description": "Calculate a math expression",
701
+ "input_schema": {
702
+ "type": "object",
703
+ "properties": {"expression": {"type": "string"}},
704
+ "required": ["expression"]
705
+ }
706
+ }
707
+ ]
708
+ if self.peers:
709
+ tools.extend(self.peers.as_tool().schema)
710
+ return tools
711
+
712
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
713
+ """Execute a tool."""
714
+ self.tool_calls_made.append({"tool": tool_name, "args": args})
715
+
716
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
717
+ return await self.peers.as_tool().execute(tool_name, args)
718
+ if tool_name == "search":
719
+ return f"Search results for: {args.get('query', '')}"
720
+ if tool_name == "calculate":
721
+ try:
722
+ return f"Result: {eval(args.get('expression', '0'))}"
723
+ except:
724
+ return "Error in calculation"
725
+ return f"Unknown tool: {tool_name}"
726
+
727
+ async def chat(self, user_message: str) -> str:
728
+ """
729
+ Complete LLM chat loop with tool use.
730
+
731
+ This is THE KEY METHOD that shows real-world LLM tool use:
732
+ 1. Add user message to history
733
+ 2. Call LLM with messages + tools
734
+ 3. If LLM returns tool_use → execute tool → call LLM again with result
735
+ 4. If LLM returns text → return as final response
736
+ """
737
+ # Step 1: Add user message
738
+ self.conversation_history.append({
739
+ "role": "user",
740
+ "content": user_message
741
+ })
742
+
743
+ # Step 2: Get tools and call LLM
744
+ tools = self.get_tools()
745
+ llm_response = self.llm.chat(self.conversation_history, tools)
746
+
747
+ # Step 3: Handle tool use (may loop multiple times)
748
+ max_iterations = 5 # Prevent infinite loops
749
+ iteration = 0
750
+
751
+ while llm_response["type"] == "tool_use" and iteration < max_iterations:
752
+ iteration += 1
753
+
754
+ # Execute the tool
755
+ tool_name = llm_response["tool"]
756
+ tool_args = llm_response["args"]
757
+ tool_result = await self.execute_tool(tool_name, tool_args)
758
+
759
+ # Add tool call and result to history
760
+ self.conversation_history.append({
761
+ "role": "assistant",
762
+ "content": f"[Tool: {tool_name}] {tool_args}"
763
+ })
764
+ self.conversation_history.append({
765
+ "role": "tool",
766
+ "content": tool_result
767
+ })
768
+
769
+ # LLM incorporates result (simulated as another call)
770
+ final_response = self.llm.incorporate_tool_result(tool_name, tool_result)
771
+ llm_response = {"type": "text", "content": final_response}
772
+
773
+ # Step 4: Return final response
774
+ final = llm_response["content"]
775
+ self.conversation_history.append({
776
+ "role": "assistant",
777
+ "content": final
778
+ })
779
+
780
+ return final
781
+
782
+ async def run(self):
783
+ """Listen for incoming requests."""
784
+ while not self.shutdown_requested:
785
+ msg = await self.peers.receive(timeout=0.5)
786
+ if msg is None:
787
+ continue
788
+ if msg.is_request:
789
+ # Process with LLM
790
+ response = await self.chat(msg.data.get("query", ""))
791
+ await self.peers.respond(msg, {"response": response})
792
+
793
+ async def execute_task(self, task): return {}
794
+
795
+
796
+ class TestLLMToolUseLoop:
797
+ """
798
+ Tests the COMPLETE LLM tool-use loop.
799
+
800
+ These tests prove that the ENTIRE flow works:
801
+ User message → LLM sees tools → LLM decides → Tool executes → Result back to LLM
802
+ """
803
+
804
+ @pytest.fixture
805
+ def llm_mesh(self):
806
+ """Create mesh with LLM-powered agents."""
807
+ mesh = Mesh(mode="p2p")
808
+
809
+ # Create agents with specific roles
810
+ assistant = LLMPoweredAgent(role_name="assistant")
811
+ analyst = LLMPoweredAgent(role_name="analyst")
812
+ researcher = LLMPoweredAgent(role_name="researcher")
813
+
814
+ # Manually register (registry stores lists of agents per role)
815
+ mesh._agent_registry["assistant"] = [assistant]
816
+ mesh._agent_registry["analyst"] = [analyst]
817
+ mesh._agent_registry["researcher"] = [researcher]
818
+ mesh.agents = [assistant, analyst, researcher]
819
+
820
+ # Wire up peers
821
+ for agent in mesh.agents:
822
+ agent.peers = PeerClient(
823
+ coordinator=None,
824
+ agent_id=agent.agent_id,
825
+ agent_role=agent.role,
826
+ agent_registry=mesh._agent_registry,
827
+ node_id="local"
828
+ )
829
+
830
+ return mesh, assistant, analyst, researcher
831
+
832
+ @pytest.mark.asyncio
833
+ async def test_llm_uses_local_tool(self, llm_mesh):
834
+ """
835
+ LLM decides to use a LOCAL tool (search).
836
+
837
+ Flow: User asks to search → LLM sees search tool → LLM uses it → Result returned
838
+ """
839
+ mesh, assistant, analyst, researcher = llm_mesh
840
+
841
+ # User asks to search
842
+ response = await assistant.chat("Please search for Python tutorials")
843
+
844
+ # Verify LLM used the search tool
845
+ assert len(assistant.tool_calls_made) == 1
846
+ assert assistant.tool_calls_made[0]["tool"] == "search"
847
+ assert "search" in response.lower() or "result" in response.lower()
848
+
849
+ @pytest.mark.asyncio
850
+ async def test_llm_delegates_to_peer(self, llm_mesh):
851
+ """
852
+ LLM decides to delegate to a PEER (analyst).
853
+
854
+ Flow: User asks for analysis → LLM sees ask_peer tool → LLM delegates → Peer responds
855
+ """
856
+ mesh, assistant, analyst, researcher = llm_mesh
857
+
858
+ # Start analyst listening
859
+ analyst_task = asyncio.create_task(analyst.run())
860
+ await asyncio.sleep(0.1)
861
+
862
+ try:
863
+ # User asks for analysis
864
+ response = await assistant.chat("Please analyze the sales data")
865
+
866
+ # Verify LLM delegated to analyst
867
+ assert len(assistant.tool_calls_made) >= 1
868
+ peer_calls = [c for c in assistant.tool_calls_made if c["tool"] == "ask_peer"]
869
+ assert len(peer_calls) == 1
870
+ assert peer_calls[0]["args"]["role"] == "analyst"
871
+
872
+ # Verify response mentions the delegation
873
+ assert "ask_peer" in response.lower() or "result" in response.lower()
874
+
875
+ finally:
876
+ analyst.request_shutdown()
877
+ analyst_task.cancel()
878
+ try: await analyst_task
879
+ except asyncio.CancelledError: pass
880
+
881
+ @pytest.mark.asyncio
882
+ async def test_llm_responds_directly_when_appropriate(self, llm_mesh):
883
+ """
884
+ LLM responds directly without using tools.
885
+
886
+ Flow: User says hello → LLM doesn't need tools → LLM responds directly
887
+ """
888
+ mesh, assistant, analyst, researcher = llm_mesh
889
+
890
+ # User sends simple greeting
891
+ response = await assistant.chat("Hello, how are you?")
892
+
893
+ # Verify LLM did NOT use any tools
894
+ assert len(assistant.tool_calls_made) == 0
895
+
896
+ # Verify response is direct
897
+ assert "assistant" in response.lower() or "help" in response.lower()
898
+
899
+ @pytest.mark.asyncio
900
+ async def test_llm_sees_correct_tools(self, llm_mesh):
901
+ """
902
+ Verify LLM receives the correct tools including peer tools.
903
+ """
904
+ mesh, assistant, analyst, researcher = llm_mesh
905
+
906
+ tools = assistant.get_tools()
907
+ tool_names = [t["name"] for t in tools]
908
+
909
+ # Should have local tools
910
+ assert "search" in tool_names
911
+ assert "calculate" in tool_names
912
+
913
+ # Should have peer tools
914
+ assert "ask_peer" in tool_names
915
+ assert "broadcast_update" in tool_names
916
+ assert "list_peers" in tool_names
917
+
918
+ @pytest.mark.asyncio
919
+ async def test_llm_conversation_history_tracks_tool_use(self, llm_mesh):
920
+ """
921
+ Verify conversation history includes tool calls and results.
922
+ """
923
+ mesh, assistant, analyst, researcher = llm_mesh
924
+
925
+ # Make a request that uses a tool
926
+ await assistant.chat("Please search for AI news")
927
+
928
+ # Check conversation history
929
+ history = assistant.conversation_history
930
+
931
+ # Should have: user message, tool call, tool result, assistant response
932
+ assert len(history) >= 3
933
+
934
+ # Find tool-related entries
935
+ tool_entries = [h for h in history if "Tool:" in h.get("content", "")]
936
+ tool_results = [h for h in history if h.get("role") == "tool"]
937
+
938
+ assert len(tool_entries) >= 1 # Tool was called
939
+ assert len(tool_results) >= 1 # Result was recorded
940
+
941
+ @pytest.mark.asyncio
942
+ async def test_multi_agent_llm_conversation(self, llm_mesh):
943
+ """
944
+ Test a complex scenario where multiple agents use LLM to communicate.
945
+
946
+ Flow:
947
+ 1. Assistant receives request needing research
948
+ 2. Assistant's LLM delegates to researcher
949
+ 3. Researcher processes and responds
950
+ 4. Assistant's LLM incorporates result
951
+ """
952
+ mesh, assistant, analyst, researcher = llm_mesh
953
+
954
+ # Start both specialist agents
955
+ analyst_task = asyncio.create_task(analyst.run())
956
+ researcher_task = asyncio.create_task(researcher.run())
957
+ await asyncio.sleep(0.1)
958
+
959
+ try:
960
+ # User asks for research
961
+ response = await assistant.chat("Please research quantum computing")
962
+
963
+ # Verify delegation happened
964
+ peer_calls = [c for c in assistant.tool_calls_made if c["tool"] == "ask_peer"]
965
+ assert len(peer_calls) >= 1
966
+ assert peer_calls[0]["args"]["role"] == "researcher"
967
+
968
+ finally:
969
+ analyst.request_shutdown()
970
+ researcher.request_shutdown()
971
+ for t in [analyst_task, researcher_task]:
972
+ t.cancel()
973
+ try: await t
974
+ except asyncio.CancelledError: pass
975
+
976
+
977
+ # ═══════════════════════════════════════════════════════════════════════════════
978
+ # FULL INTEGRATION - Complete scenario
979
+ # ═══════════════════════════════════════════════════════════════════════════════
980
+
981
+ async def test_full_integration():
982
+ """Complete integration test with all agents."""
983
+ print("\n" + "="*70)
984
+ print("FULL INTEGRATION: All Agents as Equal Mesh Participants")
985
+ print("="*70)
986
+
987
+ mesh = Mesh(mode="p2p")
988
+
989
+ analyst = mesh.add(Analyst)
990
+ researcher = mesh.add(Researcher)
991
+ assistant = mesh.add(Assistant)
992
+
993
+ for agent in mesh.agents:
994
+ agent.peers = PeerClient(
995
+ coordinator=None,
996
+ agent_id=agent.agent_id,
997
+ agent_role=agent.role,
998
+ agent_registry=mesh._agent_registry,
999
+ node_id="local"
1000
+ )
1001
+
1002
+ # Start all listeners
1003
+ tasks = [
1004
+ asyncio.create_task(analyst.run()),
1005
+ asyncio.create_task(researcher.run()),
1006
+ asyncio.create_task(assistant.run())
1007
+ ]
1008
+ await asyncio.sleep(0.1)
1009
+
1010
+ print("\n[1] All agents see each other")
1011
+ for agent in [analyst, researcher, assistant]:
1012
+ peers = [p["role"] for p in agent.peers.list_peers()]
1013
+ print(f" {agent.role} sees: {peers}")
1014
+
1015
+ print("\n[2] All agents have peer tools")
1016
+ for agent in [analyst, researcher, assistant]:
1017
+ tools = [t["name"] for t in agent.get_tools()]
1018
+ has_peer_tools = "ask_peer" in tools
1019
+ print(f" {agent.role}: {tools} (peer tools: {has_peer_tools})")
1020
+
1021
+ print("\n[3] Bidirectional communication")
1022
+
1023
+ # Analyst → Researcher
1024
+ r = await analyst.execute_tool("ask_peer", {"role": "researcher", "question": "Get data"})
1025
+ print(f" Analyst → Researcher: {r[:40]}...")
1026
+
1027
+ # Researcher → Analyst
1028
+ r = await researcher.execute_tool("ask_peer", {"role": "analyst", "question": "Interpret"})
1029
+ print(f" Researcher → Analyst: {r[:40]}...")
1030
+
1031
+ # Assistant → Both
1032
+ r = await assistant.execute_tool("ask_peer", {"role": "analyst", "question": "Analyze"})
1033
+ print(f" Assistant → Analyst: {r[:40]}...")
1034
+ r = await assistant.execute_tool("ask_peer", {"role": "researcher", "question": "Research"})
1035
+ print(f" Assistant → Researcher: {r[:40]}...")
1036
+
1037
+ print("\n[4] Request counts")
1038
+ print(f" Analyst received: {len(analyst.requests_received)}, sent: {len(analyst.requests_sent)}")
1039
+ print(f" Researcher received: {len(researcher.requests_received)}, sent: {len(researcher.requests_sent)}")
1040
+ print(f" Assistant received: {len(assistant.requests_received)}, sent: {len(assistant.requests_sent)}")
1041
+
1042
+ # Cleanup
1043
+ for agent in [analyst, researcher, assistant]:
1044
+ agent.request_shutdown()
1045
+ for t in tasks:
1046
+ t.cancel()
1047
+ try: await t
1048
+ except asyncio.CancelledError: pass
1049
+
1050
+ print("\n" + "="*70)
1051
+ print("INTEGRATION TEST PASSED!")
1052
+ print("="*70)
1053
+
1054
+
1055
+ # ═══════════════════════════════════════════════════════════════════════════════
1056
+ # LLM TOOL-USE DEMO - Shows the complete flow with logging
1057
+ # ═══════════════════════════════════════════════════════════════════════════════
1058
+
1059
+ class MockLLMWithLogging(MockLLM):
1060
+ """MockLLM with detailed logging for demo purposes."""
1061
+
1062
+ def chat(self, messages: list, tools: list) -> dict:
1063
+ """Chat with detailed logging."""
1064
+ # Get the last user message
1065
+ user_msg = ""
1066
+ for msg in reversed(messages):
1067
+ if msg.get("role") == "user":
1068
+ user_msg = msg.get("content", "")
1069
+ break
1070
+
1071
+ print(f" │")
1072
+ print(f" ├─→ [LLM RECEIVES] Message: \"{user_msg}\"")
1073
+ print(f" │ Tools available: {[t['name'] for t in tools]}")
1074
+
1075
+ # Call parent to get decision
1076
+ result = super().chat(messages, tools)
1077
+
1078
+ if result["type"] == "tool_use":
1079
+ print(f" │")
1080
+ print(f" ├─→ [LLM DECIDES] Use tool: {result['tool']}")
1081
+ print(f" │ Args: {result['args']}")
1082
+ else:
1083
+ print(f" │")
1084
+ print(f" ├─→ [LLM DECIDES] Respond directly (no tool needed)")
1085
+
1086
+ return result
1087
+
1088
+ def incorporate_tool_result(self, tool_name: str, result: str) -> str:
1089
+ """Incorporate with logging."""
1090
+ print(f" │")
1091
+ print(f" ├─→ [LLM RECEIVES RESULT] From {tool_name}:")
1092
+ print(f" │ Result: \"{result[:60]}...\"" if len(result) > 60 else f" │ Result: \"{result}\"")
1093
+
1094
+ final = super().incorporate_tool_result(tool_name, result)
1095
+
1096
+ print(f" │")
1097
+ print(f" └─→ [LLM RESPONDS] \"{final[:60]}...\"" if len(final) > 60 else f" └─→ [LLM RESPONDS] \"{final}\"")
1098
+
1099
+ return final
1100
+
1101
+
1102
+ class LLMPoweredAgentWithLogging(LLMPoweredAgent):
1103
+ """LLM-powered agent with detailed logging."""
1104
+
1105
+ def __init__(self, agent_id=None, role_name="llm_agent"):
1106
+ super().__init__(agent_id, role_name)
1107
+ self.llm = MockLLMWithLogging(role_name) # Use logging version
1108
+
1109
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
1110
+ """Execute tool with logging."""
1111
+ print(f" │")
1112
+ print(f" ├─→ [EXECUTE TOOL] {tool_name}")
1113
+
1114
+ result = await super().execute_tool(tool_name, args)
1115
+
1116
+ if tool_name == "ask_peer":
1117
+ print(f" │ Sent to peer: {args.get('role')}")
1118
+ print(f" │ Peer responded: \"{result[:50]}...\"" if len(result) > 50 else f" │ Peer responded: \"{result}\"")
1119
+
1120
+ return result
1121
+
1122
+
1123
+ async def demo_llm_tool_use():
1124
+ """Demo the complete LLM tool-use flow with detailed logging."""
1125
+ print("\n" + "="*70)
1126
+ print("LLM TOOL-USE FLOW DEMO")
1127
+ print("="*70)
1128
+
1129
+ print("""
1130
+ This demo shows EXACTLY what happens when an LLM uses tools:
1131
+
1132
+ ┌─────────────────────────────────────────────────────────────────┐
1133
+ │ User Message │
1134
+ │ ↓ │
1135
+ │ LLM sees tools (local + peer) │
1136
+ │ ↓ │
1137
+ │ LLM decides: use tool OR respond directly │
1138
+ │ ↓ │
1139
+ │ If tool: execute → get result → LLM incorporates │
1140
+ │ ↓ │
1141
+ │ Final response to user │
1142
+ └─────────────────────────────────────────────────────────────────┘
1143
+ """)
1144
+
1145
+ # Setup mesh with logging agents
1146
+ mesh = Mesh(mode="p2p")
1147
+
1148
+ assistant = LLMPoweredAgentWithLogging(role_name="assistant")
1149
+ analyst = LLMPoweredAgentWithLogging(role_name="analyst")
1150
+ researcher = LLMPoweredAgentWithLogging(role_name="researcher")
1151
+
1152
+ mesh._agent_registry["assistant"] = [assistant]
1153
+ mesh._agent_registry["analyst"] = [analyst]
1154
+ mesh._agent_registry["researcher"] = [researcher]
1155
+ mesh.agents = [assistant, analyst, researcher]
1156
+
1157
+ for agent in mesh.agents:
1158
+ agent.peers = PeerClient(
1159
+ coordinator=None,
1160
+ agent_id=agent.agent_id,
1161
+ agent_role=agent.role,
1162
+ agent_registry=mesh._agent_registry,
1163
+ node_id="local"
1164
+ )
1165
+
1166
+ # Start listeners
1167
+ analyst_task = asyncio.create_task(analyst.run())
1168
+ researcher_task = asyncio.create_task(researcher.run())
1169
+ await asyncio.sleep(0.1)
1170
+
1171
+ # ─────────────────────────────────────────────────────────────────
1172
+ # SCENARIO 1: LLM uses LOCAL tool
1173
+ # ─────────────────────────────────────────────────────────────────
1174
+ print("\n" + "─"*70)
1175
+ print("SCENARIO 1: User asks to SEARCH (LLM uses LOCAL tool)")
1176
+ print("─"*70)
1177
+ print(f"\n[USER] → Assistant: \"Please search for Python tutorials\"")
1178
+ print(f"\n[ASSISTANT LLM FLOW]")
1179
+
1180
+ response = await assistant.chat("Please search for Python tutorials")
1181
+
1182
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
1183
+ print(f"\n✓ LLM decided to use LOCAL tool (search)")
1184
+
1185
+ # Reset for next scenario
1186
+ assistant.tool_calls_made = []
1187
+ assistant.conversation_history = []
1188
+
1189
+ # ─────────────────────────────────────────────────────────────────
1190
+ # SCENARIO 2: LLM delegates to PEER
1191
+ # ─────────────────────────────────────────────────────────────────
1192
+ print("\n" + "─"*70)
1193
+ print("SCENARIO 2: User asks for ANALYSIS (LLM delegates to PEER)")
1194
+ print("─"*70)
1195
+ print(f"\n[USER] → Assistant: \"Please analyze the Q4 sales data\"")
1196
+ print(f"\n[ASSISTANT LLM FLOW]")
1197
+
1198
+ response = await assistant.chat("Please analyze the Q4 sales data")
1199
+
1200
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
1201
+ print(f"\n✓ LLM decided to delegate to PEER (analyst) via ask_peer tool")
1202
+
1203
+ # Reset for next scenario
1204
+ assistant.tool_calls_made = []
1205
+ assistant.conversation_history = []
1206
+
1207
+ # ─────────────────────────────────────────────────────────────────
1208
+ # SCENARIO 3: LLM responds directly
1209
+ # ─────────────────────────────────────────────────────────────────
1210
+ print("\n" + "─"*70)
1211
+ print("SCENARIO 3: User says HELLO (LLM responds directly, no tools)")
1212
+ print("─"*70)
1213
+ print(f"\n[USER] → Assistant: \"Hello, how are you?\"")
1214
+ print(f"\n[ASSISTANT LLM FLOW]")
1215
+
1216
+ response = await assistant.chat("Hello, how are you?")
1217
+
1218
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
1219
+ print(f"\n✓ LLM decided NO tool needed, responded directly")
1220
+
1221
+ # ─────────────────────────────────────────────────────────────────
1222
+ # SCENARIO 4: Multi-step - research then analyze
1223
+ # ─────────────────────────────────────────────────────────────────
1224
+ print("\n" + "─"*70)
1225
+ print("SCENARIO 4: User asks for RESEARCH (LLM delegates to researcher)")
1226
+ print("─"*70)
1227
+ print(f"\n[USER] → Assistant: \"Please research AI trends\"")
1228
+ print(f"\n[ASSISTANT LLM FLOW]")
1229
+
1230
+ response = await assistant.chat("Please research AI trends")
1231
+
1232
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
1233
+ print(f"\n✓ LLM decided to delegate to PEER (researcher) via ask_peer tool")
1234
+
1235
+ # Cleanup
1236
+ analyst.request_shutdown()
1237
+ researcher.request_shutdown()
1238
+ for t in [analyst_task, researcher_task]:
1239
+ t.cancel()
1240
+ try: await t
1241
+ except asyncio.CancelledError: pass
1242
+
1243
+ print("\n" + "="*70)
1244
+ print("LLM TOOL-USE DEMO COMPLETE!")
1245
+ print("="*70)
1246
+
1247
+
1248
+ # ═══════════════════════════════════════════════════════════════════════════════
1249
+ # MANUAL RUN
1250
+ # ═══════════════════════════════════════════════════════════════════════════════
1251
+
1252
+ if __name__ == "__main__":
1253
+ # Run mesh integration first
1254
+ asyncio.run(test_full_integration())
1255
+
1256
+ print("""
1257
+ ═══════════════════════════════════════════════════════════════════════════════
1258
+ KEY INSIGHT: ALL agents are EQUAL mesh participants
1259
+ ═══════════════════════════════════════════════════════════════════════════════
1260
+
1261
+ Every agent (Analyst, Researcher, Assistant):
1262
+ ├── Has an LLM for reasoning
1263
+ ├── Has get_tools() with LOCAL + PEER tools
1264
+ ├── Can SEND via ask_peer, broadcast_update
1265
+ ├── Can RECEIVE via run() loop
1266
+ └── The role just defines what they're GOOD at
1267
+
1268
+ Communication is bidirectional:
1269
+ ├── Analyst ←→ Researcher
1270
+ ├── Researcher ←→ Assistant
1271
+ └── Assistant ←→ Analyst
1272
+
1273
+ This is the power of the mesh:
1274
+ - No hierarchies
1275
+ - No "sender" vs "receiver" types
1276
+ - Every agent is a full participant
1277
+ ═══════════════════════════════════════════════════════════════════════════════
1278
+ """)
1279
+
1280
+ # Run LLM tool-use demo
1281
+ asyncio.run(demo_llm_tool_use())
1282
+
1283
+ print("""
1284
+ ═══════════════════════════════════════════════════════════════════════════════
1285
+ KEY INSIGHT: LLM TOOL-USE FLOW
1286
+ ═══════════════════════════════════════════════════════════════════════════════
1287
+
1288
+ The LLM sees ALL tools (local + peer) and DECIDES:
1289
+
1290
+ ┌─────────────────────────────────────────────────────────────────────────┐
1291
+ │ "search for X" → LLM uses LOCAL tool (search) │
1292
+ │ "analyze X" → LLM uses PEER tool (ask_peer → analyst) │
1293
+ │ "research X" → LLM uses PEER tool (ask_peer → researcher) │
1294
+ │ "hello" → LLM responds DIRECTLY (no tool needed) │
1295
+ └─────────────────────────────────────────────────────────────────────────┘
1296
+
1297
+ The framework provides the tools.
1298
+ The LLM decides WHEN and HOW to use them.
1299
+ PeerTool makes other agents available as tools!
1300
+ ═══════════════════════════════════════════════════════════════════════════════
1301
+ """)