jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +730 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +730 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +1362 -0
  21. jarviscore/docs/GETTING_STARTED.md +107 -14
  22. jarviscore/docs/TROUBLESHOOTING.md +145 -7
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. jarviscore_framework-0.2.1.dist-info/METADATA +144 -0
  31. jarviscore_framework-0.2.1.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/METADATA +0 -137
  85. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  86. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1002 @@
1
+ """
2
+ Test 4: Assistant WITH JarvisCore Framework
3
+
4
+ Demonstrates an LLM-POWERED AGENT that can BOTH send AND receive.
5
+
6
+ KEY CONCEPT - All agents are equal participants:
7
+ - Every agent has an LLM for reasoning
8
+ - Every agent can SEND requests to peers (via ask_peer tool)
9
+ - Every agent can RECEIVE requests from peers (via run() loop)
10
+ - The "role" defines what they're GOOD at, not communication direction
11
+
12
+ This test shows the ASSISTANT role - good at search, calculate, chat.
13
+ But it's the SAME PATTERN as the analyst - full bidirectional communication.
14
+
15
+ BEFORE (Standalone):
16
+ - Assistant has search(), calculate() capabilities
17
+ - Assistant has get_tools() for its LLM
18
+ - Cannot communicate with other agents
19
+
20
+ AFTER (With Framework):
21
+ - Same search(), calculate() capabilities
22
+ - get_tools() NOW includes peer tools
23
+ - Can RECEIVE requests and process them with LLM
24
+ - Can SEND requests to other peers
25
+ - Full mesh participant
26
+
27
+ DEVELOPER CHANGES REQUIRED (same for ALL agents):
28
+ 1. Inherit from Agent
29
+ 2. Add `role` and `capabilities` class attributes
30
+ 3. Modify get_tools() to include self.peers.as_tool().schema
31
+ 4. Modify execute_tool() to dispatch peer tools
32
+ 5. Add async def run() loop for incoming requests
33
+ 6. Add async def execute_task() (required by base class)
34
+ """
35
+ import asyncio
36
+ import sys
37
+ import pytest
38
+ sys.path.insert(0, '.')
39
+
40
+ from jarviscore.core.agent import Agent
41
+ from jarviscore.core.mesh import Mesh
42
+ from jarviscore.p2p.peer_client import PeerClient
43
+
44
+
45
+ # ═══════════════════════════════════════════════════════════════════════════════
46
+ # FIXTURES
47
+ # ═══════════════════════════════════════════════════════════════════════════════
48
+
49
+ @pytest.fixture
50
+ def mesh():
51
+ """Create a fresh mesh for each test."""
52
+ return Mesh(mode="p2p")
53
+
54
+
55
+ @pytest.fixture
56
+ def assistant(mesh):
57
+ """Create an assistant added to mesh."""
58
+ return mesh.add(ConnectedAssistant)
59
+
60
+
61
+ @pytest.fixture
62
+ def assistant_with_peers(mesh, assistant):
63
+ """Create assistant with peer client injected."""
64
+ assistant.peers = PeerClient(
65
+ coordinator=None,
66
+ agent_id=assistant.agent_id,
67
+ agent_role=assistant.role,
68
+ agent_registry=mesh._agent_registry,
69
+ node_id="local"
70
+ )
71
+ return assistant
72
+
73
+
74
+ # ═══════════════════════════════════════════════════════════════════════════════
75
+ # THE AGENT - LLM-powered agent that can BOTH send AND receive
76
+ # ═══════════════════════════════════════════════════════════════════════════════
77
+
78
+ class ConnectedAssistant(Agent):
79
+ """
80
+ Assistant agent - AFTER installing jarviscore.
81
+
82
+ This is a FULL LLM-POWERED AGENT that can:
83
+ - Use its own tools (search, calculate)
84
+ - Ask other peers for help (ask_peer)
85
+ - Receive and process requests from other agents
86
+ - Broadcast updates to all peers
87
+
88
+ Same pattern as Analyst - the only difference is what it's GOOD at.
89
+ """
90
+ # Identity for mesh registration
91
+ role = "assistant"
92
+ capabilities = ["chat", "search", "calculate"]
93
+
94
+ def __init__(self, agent_id=None):
95
+ super().__init__(agent_id)
96
+ self.requests_processed = []
97
+ self.received_broadcasts = []
98
+
99
+ # ─────────────────────────────────────────────────────────────────
100
+ # CORE CAPABILITIES - What this agent is good at
101
+ # ─────────────────────────────────────────────────────────────────
102
+
103
+ def search(self, query: str) -> str:
104
+ """Search the web for information. (Core capability)"""
105
+ return f"Search results for '{query}': Found 10 relevant articles."
106
+
107
+ def calculate(self, expression: str) -> str:
108
+ """Calculate a math expression. (Core capability)"""
109
+ try:
110
+ result = eval(expression)
111
+ return f"Result: {result}"
112
+ except Exception as e:
113
+ return f"Error: {e}"
114
+
115
+ # ─────────────────────────────────────────────────────────────────
116
+ # LLM TOOL INTERFACE - What LLM can use
117
+ # ─────────────────────────────────────────────────────────────────
118
+
119
+ def get_tools(self) -> list:
120
+ """
121
+ Return tool definitions for THIS AGENT'S LLM.
122
+
123
+ Includes:
124
+ - Local tools (search, calculate)
125
+ - Peer tools (ask_peer, broadcast_update, list_peers)
126
+
127
+ The LLM decides which tools to use based on the task.
128
+ """
129
+ tools = [
130
+ {
131
+ "name": "search",
132
+ "description": "Search the web for information",
133
+ "input_schema": {
134
+ "type": "object",
135
+ "properties": {
136
+ "query": {"type": "string", "description": "Search query"}
137
+ },
138
+ "required": ["query"]
139
+ }
140
+ },
141
+ {
142
+ "name": "calculate",
143
+ "description": "Calculate a math expression",
144
+ "input_schema": {
145
+ "type": "object",
146
+ "properties": {
147
+ "expression": {"type": "string", "description": "Math expression"}
148
+ },
149
+ "required": ["expression"]
150
+ }
151
+ }
152
+ ]
153
+
154
+ # Add peer tools if connected to mesh
155
+ if self.peers:
156
+ tools.extend(self.peers.as_tool().schema)
157
+
158
+ return tools
159
+
160
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
161
+ """
162
+ Execute a tool by name.
163
+
164
+ This is called when the LLM decides to use a tool.
165
+ Routes to local tools or peer tools as appropriate.
166
+ """
167
+ # Peer tools
168
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
169
+ return await self.peers.as_tool().execute(tool_name, args)
170
+
171
+ # Local tools
172
+ if tool_name == "search":
173
+ return self.search(args.get("query", ""))
174
+ elif tool_name == "calculate":
175
+ return self.calculate(args.get("expression", ""))
176
+
177
+ return f"Unknown tool: {tool_name}"
178
+
179
+ # ─────────────────────────────────────────────────────────────────
180
+ # MESSAGE HANDLING - Process incoming requests with LLM
181
+ # ─────────────────────────────────────────────────────────────────
182
+
183
+ async def run(self):
184
+ """
185
+ Main loop - receive and process requests.
186
+
187
+ When a request comes in, the LLM decides how to handle it.
188
+ The LLM might:
189
+ - Use local tools (search, calculate)
190
+ - Ask other peers for help (ask_peer)
191
+ - Combine multiple tool calls
192
+ """
193
+ self._logger.info(f"Assistant {self.agent_id} listening...")
194
+
195
+ while not self.shutdown_requested:
196
+ msg = await self.peers.receive(timeout=0.5)
197
+ if msg is None:
198
+ continue
199
+
200
+ if msg.is_request:
201
+ # Process request with LLM
202
+ query = msg.data.get("query", "")
203
+ self._logger.info(f"Request from {msg.sender}: {query}")
204
+
205
+ # Simulate LLM deciding how to respond
206
+ # In real code: response = await self.llm.chat(query, tools=self.get_tools())
207
+ # For testing, we'll use search as default action
208
+ result = {"response": self.search(query)}
209
+ self.requests_processed.append({"from": msg.sender, "query": query})
210
+
211
+ await self.peers.respond(msg, result)
212
+
213
+ elif msg.is_notify:
214
+ self._logger.info(f"Broadcast: {msg.data}")
215
+ self.received_broadcasts.append(msg.data)
216
+
217
+ async def execute_task(self, task: dict) -> dict:
218
+ """Required by Agent base class."""
219
+ return {"status": "success"}
220
+
221
+
222
+ # ═══════════════════════════════════════════════════════════════════════════════
223
+ # TESTS - Organized by what they verify
224
+ # ═══════════════════════════════════════════════════════════════════════════════
225
+
226
+ class TestFrameworkIntegration:
227
+ """Tests that verify the agent integrates correctly with jarviscore."""
228
+
229
+ def test_inherits_from_agent(self):
230
+ """Agent must inherit from jarviscore.Agent."""
231
+ assert issubclass(ConnectedAssistant, Agent)
232
+
233
+ def test_has_required_attributes(self):
234
+ """Agent must declare role and capabilities."""
235
+ assert ConnectedAssistant.role == "assistant"
236
+ assert len(ConnectedAssistant.capabilities) > 0
237
+
238
+ def test_can_be_added_to_mesh(self, mesh):
239
+ """Agent can be registered with the mesh."""
240
+ assistant = mesh.add(ConnectedAssistant)
241
+ assert assistant in mesh.agents
242
+ assert assistant.agent_id is not None
243
+
244
+
245
+ class TestLocalTools:
246
+ """Tests for the agent's local tools."""
247
+
248
+ def test_search_works(self, assistant):
249
+ """Core search capability should work."""
250
+ result = assistant.search("python tutorials")
251
+ assert "python tutorials" in result
252
+ assert "Found" in result
253
+
254
+ def test_calculate_works(self, assistant):
255
+ """Core calculate capability should work."""
256
+ result = assistant.calculate("2 + 2")
257
+ assert "4" in result
258
+
259
+ def test_get_tools_returns_local_tools(self, assistant):
260
+ """get_tools() should return local tools (before peers injected)."""
261
+ tools = assistant.get_tools()
262
+ tool_names = [t["name"] for t in tools]
263
+
264
+ assert "search" in tool_names
265
+ assert "calculate" in tool_names
266
+
267
+
268
+ class TestPeerToolsIntegration:
269
+ """Tests for peer tools being added to the agent's toolset."""
270
+
271
+ def test_get_tools_includes_peer_tools_when_connected(self, assistant_with_peers, mesh):
272
+ """After peer injection, get_tools() should include peer tools."""
273
+ # Add another agent
274
+ class Analyst(Agent):
275
+ role = "analyst"
276
+ capabilities = ["analysis"]
277
+ async def execute_task(self, task): return {}
278
+
279
+ analyst = mesh.add(Analyst)
280
+ analyst.peers = PeerClient(
281
+ coordinator=None,
282
+ agent_id=analyst.agent_id,
283
+ agent_role=analyst.role,
284
+ agent_registry=mesh._agent_registry,
285
+ node_id="local"
286
+ )
287
+
288
+ tools = assistant_with_peers.get_tools()
289
+ tool_names = [t["name"] for t in tools]
290
+
291
+ # Local tools
292
+ assert "search" in tool_names
293
+ assert "calculate" in tool_names
294
+
295
+ # Peer tools
296
+ assert "ask_peer" in tool_names
297
+ assert "broadcast_update" in tool_names
298
+ assert "list_peers" in tool_names
299
+
300
+ def test_ask_peer_schema_shows_available_peers(self, assistant_with_peers, mesh):
301
+ """ask_peer tool should show other agents in the enum."""
302
+ class Analyst(Agent):
303
+ role = "analyst"
304
+ capabilities = ["analysis"]
305
+ async def execute_task(self, task): return {}
306
+
307
+ analyst = mesh.add(Analyst)
308
+ analyst.peers = PeerClient(
309
+ coordinator=None,
310
+ agent_id=analyst.agent_id,
311
+ agent_role=analyst.role,
312
+ agent_registry=mesh._agent_registry,
313
+ node_id="local"
314
+ )
315
+
316
+ tools = assistant_with_peers.get_tools()
317
+ ask_peer = next(t for t in tools if t["name"] == "ask_peer")
318
+
319
+ role_enum = ask_peer["input_schema"]["properties"]["role"]["enum"]
320
+ assert "analyst" in role_enum
321
+
322
+ @pytest.mark.asyncio
323
+ async def test_assistant_can_ask_analyst(self, assistant_with_peers, mesh):
324
+ """Assistant should be able to ask analyst for help."""
325
+ class Analyst(Agent):
326
+ role = "analyst"
327
+ capabilities = ["analysis"]
328
+ async def execute_task(self, task): return {}
329
+ async def run(self):
330
+ while not self.shutdown_requested:
331
+ msg = await self.peers.receive(timeout=0.5)
332
+ if msg and msg.is_request:
333
+ await self.peers.respond(msg, {
334
+ "response": f"Analysis of: {msg.data.get('query')}"
335
+ })
336
+
337
+ analyst = mesh.add(Analyst)
338
+ analyst.peers = PeerClient(
339
+ coordinator=None,
340
+ agent_id=analyst.agent_id,
341
+ agent_role=analyst.role,
342
+ agent_registry=mesh._agent_registry,
343
+ node_id="local"
344
+ )
345
+
346
+ analyst_task = asyncio.create_task(analyst.run())
347
+ await asyncio.sleep(0.1)
348
+
349
+ try:
350
+ result = await assistant_with_peers.execute_tool("ask_peer", {
351
+ "role": "analyst",
352
+ "question": "Analyze Q4 sales"
353
+ })
354
+
355
+ assert "Analysis" in result
356
+
357
+ finally:
358
+ analyst.request_shutdown()
359
+ analyst_task.cancel()
360
+ try:
361
+ await analyst_task
362
+ except asyncio.CancelledError:
363
+ pass
364
+
365
+
366
+ class TestReceivingRequests:
367
+ """Tests for the agent receiving and processing requests."""
368
+
369
+ @pytest.fixture
370
+ def requester(self, mesh):
371
+ """Create another agent to send requests."""
372
+ class Requester(Agent):
373
+ role = "requester"
374
+ capabilities = ["requesting"]
375
+ async def execute_task(self, task): return {}
376
+
377
+ req = mesh.add(Requester)
378
+ req.peers = PeerClient(
379
+ coordinator=None,
380
+ agent_id=req.agent_id,
381
+ agent_role=req.role,
382
+ agent_registry=mesh._agent_registry,
383
+ node_id="local"
384
+ )
385
+ return req
386
+
387
+ @pytest.mark.asyncio
388
+ async def test_assistant_receives_and_responds(self, assistant_with_peers, requester):
389
+ """Assistant should receive request, process with LLM, and respond."""
390
+ assistant_task = asyncio.create_task(assistant_with_peers.run())
391
+ await asyncio.sleep(0.1)
392
+
393
+ try:
394
+ response = await requester.peers.request("assistant", {
395
+ "query": "Find information about AI"
396
+ }, timeout=5.0)
397
+
398
+ assert response is not None
399
+ assert "response" in response
400
+ assert len(assistant_with_peers.requests_processed) == 1
401
+
402
+ finally:
403
+ assistant_with_peers.request_shutdown()
404
+ assistant_task.cancel()
405
+ try:
406
+ await assistant_task
407
+ except asyncio.CancelledError:
408
+ pass
409
+
410
+ @pytest.mark.asyncio
411
+ async def test_assistant_receives_broadcasts(self, assistant_with_peers, requester):
412
+ """Assistant should receive broadcast notifications."""
413
+ assistant_task = asyncio.create_task(assistant_with_peers.run())
414
+ await asyncio.sleep(0.1)
415
+
416
+ try:
417
+ await requester.peers.broadcast({"message": "System update!"})
418
+ await asyncio.sleep(0.2)
419
+
420
+ assert len(assistant_with_peers.received_broadcasts) == 1
421
+
422
+ finally:
423
+ assistant_with_peers.request_shutdown()
424
+ assistant_task.cancel()
425
+ try:
426
+ await assistant_task
427
+ except asyncio.CancelledError:
428
+ pass
429
+
430
+
431
+ class TestBidirectionalCommunication:
432
+ """Tests proving the agent can BOTH send AND receive."""
433
+
434
+ @pytest.mark.asyncio
435
+ async def test_assistant_full_mesh_participant(self, mesh):
436
+ """
437
+ Assistant can SEND to analyst AND RECEIVE from others.
438
+
439
+ This proves the agent is a full mesh participant.
440
+ """
441
+ # Create assistant
442
+ assistant = mesh.add(ConnectedAssistant)
443
+ assistant.peers = PeerClient(
444
+ coordinator=None,
445
+ agent_id=assistant.agent_id,
446
+ agent_role=assistant.role,
447
+ agent_registry=mesh._agent_registry,
448
+ node_id="local"
449
+ )
450
+
451
+ # Create analyst that assistant can ask
452
+ class Analyst(Agent):
453
+ role = "analyst"
454
+ capabilities = ["analysis"]
455
+ async def execute_task(self, task): return {}
456
+ async def run(self):
457
+ while not self.shutdown_requested:
458
+ msg = await self.peers.receive(timeout=0.5)
459
+ if msg and msg.is_request:
460
+ await self.peers.respond(msg, {"response": "Analysis data"})
461
+
462
+ analyst = mesh.add(Analyst)
463
+ analyst.peers = PeerClient(
464
+ coordinator=None,
465
+ agent_id=analyst.agent_id,
466
+ agent_role=analyst.role,
467
+ agent_registry=mesh._agent_registry,
468
+ node_id="local"
469
+ )
470
+
471
+ # Create requester
472
+ class Requester(Agent):
473
+ role = "requester"
474
+ capabilities = ["requesting"]
475
+ async def execute_task(self, task): return {}
476
+
477
+ requester = mesh.add(Requester)
478
+ requester.peers = PeerClient(
479
+ coordinator=None,
480
+ agent_id=requester.agent_id,
481
+ agent_role=requester.role,
482
+ agent_registry=mesh._agent_registry,
483
+ node_id="local"
484
+ )
485
+
486
+ # Start agents
487
+ analyst_task = asyncio.create_task(analyst.run())
488
+ assistant_task = asyncio.create_task(assistant.run())
489
+ await asyncio.sleep(0.1)
490
+
491
+ try:
492
+ # Assistant SENDS to analyst
493
+ result = await assistant.execute_tool("ask_peer", {
494
+ "role": "analyst",
495
+ "question": "Get analysis"
496
+ })
497
+ assert "Analysis data" in result
498
+
499
+ # Assistant RECEIVES from requester
500
+ response = await requester.peers.request("assistant", {
501
+ "query": "Search for AI"
502
+ }, timeout=5.0)
503
+ assert response is not None
504
+
505
+ finally:
506
+ assistant.request_shutdown()
507
+ analyst.request_shutdown()
508
+ assistant_task.cancel()
509
+ analyst_task.cancel()
510
+ for t in [assistant_task, analyst_task]:
511
+ try:
512
+ await t
513
+ except asyncio.CancelledError:
514
+ pass
515
+
516
+
517
+ class TestToolSchemaFormat:
518
+ """Tests that tool schemas are valid for LLM consumption."""
519
+
520
+ def test_schema_has_required_fields(self, assistant_with_peers, mesh):
521
+ """All tools should have proper Anthropic schema format."""
522
+ class Analyst(Agent):
523
+ role = "analyst"
524
+ capabilities = ["analysis"]
525
+ async def execute_task(self, task): return {}
526
+
527
+ mesh.add(Analyst)
528
+
529
+ tools = assistant_with_peers.get_tools()
530
+
531
+ for tool in tools:
532
+ assert "name" in tool
533
+ assert "description" in tool
534
+ if tool["name"] in ["search", "calculate", "ask_peer", "broadcast_update"]:
535
+ assert "input_schema" in tool
536
+ assert tool["input_schema"]["type"] == "object"
537
+ assert "properties" in tool["input_schema"]
538
+
539
+
540
+ # ═══════════════════════════════════════════════════════════════════════════════
541
+ # MANUAL RUN
542
+ # ═══════════════════════════════════════════════════════════════════════════════
543
+
544
+ async def _run_integration_test():
545
+ """Full integration showing assistant as a complete mesh participant."""
546
+ print("\n[Integration: Assistant as full participant]")
547
+
548
+ mesh = Mesh(mode="p2p")
549
+
550
+ # Create assistant
551
+ assistant = mesh.add(ConnectedAssistant)
552
+
553
+ # Create analyst for assistant to talk to
554
+ class Analyst(Agent):
555
+ role = "analyst"
556
+ capabilities = ["analysis"]
557
+ async def execute_task(self, task): return {}
558
+ async def run(self):
559
+ while not self.shutdown_requested:
560
+ msg = await self.peers.receive(timeout=0.5)
561
+ if msg and msg.is_request:
562
+ await self.peers.respond(msg, {
563
+ "response": f"Analysis: {msg.data.get('query')}"
564
+ })
565
+
566
+ analyst = mesh.add(Analyst)
567
+
568
+ # Create requester that will ask assistant
569
+ class Requester(Agent):
570
+ role = "requester"
571
+ capabilities = ["requesting"]
572
+ async def execute_task(self, task): return {}
573
+
574
+ requester = mesh.add(Requester)
575
+
576
+ # Inject peers
577
+ for agent in mesh.agents:
578
+ agent.peers = PeerClient(
579
+ coordinator=None,
580
+ agent_id=agent.agent_id,
581
+ agent_role=agent.role,
582
+ agent_registry=mesh._agent_registry,
583
+ node_id="local"
584
+ )
585
+
586
+ # Start agents
587
+ assistant_task = asyncio.create_task(assistant.run())
588
+ analyst_task = asyncio.create_task(analyst.run())
589
+ await asyncio.sleep(0.1)
590
+
591
+ try:
592
+ # Show assistant's tools (includes peer tools)
593
+ tools = assistant.get_tools()
594
+ print(f" Assistant tools: {[t['name'] for t in tools]}")
595
+ assert "ask_peer" in [t['name'] for t in tools]
596
+ print(" ✓ Assistant has peer tools")
597
+
598
+ # Assistant SENDS to analyst
599
+ result = await assistant.execute_tool("ask_peer", {
600
+ "role": "analyst",
601
+ "question": "Analyze market data"
602
+ })
603
+ print(f" ✓ Assistant asked analyst: {result}")
604
+
605
+ # Assistant RECEIVES from requester
606
+ response = await requester.peers.request("assistant", {
607
+ "query": "Search for trends"
608
+ }, timeout=5.0)
609
+ print(f" ✓ Assistant received and responded: {response['response'][:40]}...")
610
+
611
+ # Assistant broadcasts
612
+ result = await assistant.execute_tool("broadcast_update", {
613
+ "message": "Task complete!"
614
+ })
615
+ print(f" ✓ Assistant broadcast: {result}")
616
+
617
+ print("\n PROVED: Assistant can SEND, RECEIVE, and BROADCAST")
618
+
619
+ finally:
620
+ assistant.request_shutdown()
621
+ analyst.request_shutdown()
622
+ assistant_task.cancel()
623
+ analyst_task.cancel()
624
+ for t in [assistant_task, analyst_task]:
625
+ try:
626
+ await t
627
+ except asyncio.CancelledError:
628
+ pass
629
+
630
+
631
+ # ═══════════════════════════════════════════════════════════════════════════════
632
+ # LLM TOOL-USE DEMO - Shows how Assistant's LLM decides to use tools
633
+ # ═══════════════════════════════════════════════════════════════════════════════
634
+
635
+ class AssistantMockLLM:
636
+ """
637
+ Simulates Assistant's LLM decision-making.
638
+
639
+ The Assistant's LLM might decide to:
640
+ - Use LOCAL tool (search) for web searches
641
+ - Use LOCAL tool (calculate) for math
642
+ - Use PEER tool (ask_peer → analyst) for analysis
643
+ - Respond directly for simple queries
644
+ """
645
+
646
+ def __init__(self):
647
+ self.calls = []
648
+
649
+ def chat(self, messages: list, tools: list) -> dict:
650
+ """Simulate LLM deciding what tool to use."""
651
+ self.calls.append({"messages": messages, "tools": tools})
652
+
653
+ user_msg = ""
654
+ for msg in reversed(messages):
655
+ if msg.get("role") == "user":
656
+ user_msg = msg.get("content", "").lower()
657
+ break
658
+
659
+ tool_names = [t["name"] for t in tools]
660
+
661
+ # Assistant's decision logic
662
+ if "search" in user_msg and "search" in tool_names:
663
+ # Use LOCAL search tool
664
+ return {
665
+ "type": "tool_use",
666
+ "tool": "search",
667
+ "args": {"query": user_msg}
668
+ }
669
+ elif "calculate" in user_msg or any(c in user_msg for c in ['+', '-', '*', '/']):
670
+ if "calculate" in tool_names:
671
+ # Extract expression or use placeholder
672
+ expr = user_msg.split("calculate")[-1].strip() if "calculate" in user_msg else "2+2"
673
+ return {
674
+ "type": "tool_use",
675
+ "tool": "calculate",
676
+ "args": {"expression": expr or "2+2"}
677
+ }
678
+ elif "analyze" in user_msg or "analysis" in user_msg:
679
+ if "ask_peer" in tool_names:
680
+ # Need analysis - ask analyst
681
+ return {
682
+ "type": "tool_use",
683
+ "tool": "ask_peer",
684
+ "args": {"role": "analyst", "question": user_msg}
685
+ }
686
+
687
+ # Default: respond directly
688
+ return {
689
+ "type": "text",
690
+ "content": f"[assistant] I can help with that: {user_msg}"
691
+ }
692
+
693
+ def incorporate_tool_result(self, tool_name: str, result: str) -> str:
694
+ return f"Based on {tool_name}: {result}"
695
+
696
+
697
+ class LLMPoweredAssistant(ConnectedAssistant):
698
+ """Assistant with full LLM simulation for demo."""
699
+
700
+ def __init__(self, agent_id=None):
701
+ super().__init__(agent_id)
702
+ self.llm = AssistantMockLLM()
703
+ self.conversation_history = []
704
+ self.tool_calls_made = []
705
+
706
+ async def chat(self, user_message: str) -> str:
707
+ """Complete LLM chat loop."""
708
+ self.conversation_history.append({"role": "user", "content": user_message})
709
+
710
+ tools = self.get_tools()
711
+ llm_response = self.llm.chat(self.conversation_history, tools)
712
+
713
+ if llm_response["type"] == "tool_use":
714
+ tool_name = llm_response["tool"]
715
+ tool_args = llm_response["args"]
716
+ self.tool_calls_made.append({"tool": tool_name, "args": tool_args})
717
+
718
+ tool_result = await self.execute_tool(tool_name, tool_args)
719
+
720
+ self.conversation_history.append({"role": "assistant", "content": f"[Tool: {tool_name}]"})
721
+ self.conversation_history.append({"role": "tool", "content": tool_result})
722
+
723
+ final = self.llm.incorporate_tool_result(tool_name, tool_result)
724
+ else:
725
+ final = llm_response["content"]
726
+
727
+ self.conversation_history.append({"role": "assistant", "content": final})
728
+ return final
729
+
730
+
731
+ async def demo_assistant_llm_flow():
732
+ """Demo showing how Assistant's LLM decides to use tools."""
733
+ print("\n" + "="*70)
734
+ print("ASSISTANT LLM TOOL-USE FLOW")
735
+ print("="*70)
736
+
737
+ print("""
738
+ This shows how the ASSISTANT's LLM decides which tools to use:
739
+
740
+ ┌─────────────────────────────────────────────────────────────────┐
741
+ │ "search for X" → LLM uses LOCAL tool (search) │
742
+ │ "calculate X" → LLM uses LOCAL tool (calculate) │
743
+ │ "analyze X" → LLM uses PEER tool (ask_peer→analyst) │
744
+ │ "hello" → LLM responds DIRECTLY │
745
+ └─────────────────────────────────────────────────────────────────┘
746
+ """)
747
+
748
+ mesh = Mesh(mode="p2p")
749
+
750
+ assistant = LLMPoweredAssistant()
751
+ mesh._agent_registry["assistant"] = [assistant]
752
+
753
+ # Add analyst for assistant to delegate to
754
+ class Analyst(Agent):
755
+ role = "analyst"
756
+ capabilities = ["analysis"]
757
+ async def execute_task(self, task): return {}
758
+ async def run(self):
759
+ while not self.shutdown_requested:
760
+ msg = await self.peers.receive(timeout=0.5)
761
+ if msg and msg.is_request:
762
+ await self.peers.respond(msg, {
763
+ "response": f"[analyst] Analysis complete: {msg.data.get('query')} shows positive trends"
764
+ })
765
+
766
+ analyst = Analyst()
767
+ mesh._agent_registry["analyst"] = [analyst]
768
+ mesh.agents = [assistant, analyst]
769
+
770
+ for agent in mesh.agents:
771
+ agent.peers = PeerClient(
772
+ coordinator=None,
773
+ agent_id=agent.agent_id,
774
+ agent_role=agent.role,
775
+ agent_registry=mesh._agent_registry,
776
+ node_id="local"
777
+ )
778
+
779
+ analyst_task = asyncio.create_task(analyst.run())
780
+ await asyncio.sleep(0.1)
781
+
782
+ # ─────────────────────────────────────────────────────────────────
783
+ # SCENARIO 1: Assistant uses LOCAL search tool
784
+ # ─────────────────────────────────────────────────────────────────
785
+ print("\n" + "─"*70)
786
+ print("SCENARIO 1: Request to SEARCH (Assistant uses LOCAL tool)")
787
+ print("─"*70)
788
+ print(f"\n[USER] → Assistant: \"Please search for Python tutorials\"")
789
+ print(f"\n[ASSISTANT LLM FLOW]")
790
+
791
+ tools = assistant.get_tools()
792
+ print(f" │")
793
+ print(f" ├─→ [LLM RECEIVES] Message: \"Please search for Python tutorials\"")
794
+ print(f" │ Tools available: {[t['name'] for t in tools]}")
795
+
796
+ response = await assistant.chat("Please search for Python tutorials")
797
+
798
+ print(f" │")
799
+ print(f" ├─→ [LLM DECIDES] Use LOCAL tool: search")
800
+ print(f" │ Args: {assistant.tool_calls_made[-1]['args']}")
801
+ print(f" │")
802
+ print(f" ├─→ [EXECUTE LOCAL TOOL] search")
803
+ print(f" │")
804
+ print(f" └─→ [LLM RESPONDS] \"{response[:60]}...\"")
805
+
806
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
807
+ print(f"\n✓ Assistant's LLM used LOCAL tool (search)")
808
+
809
+ assistant.tool_calls_made = []
810
+ assistant.conversation_history = []
811
+
812
+ # ─────────────────────────────────────────────────────────────────
813
+ # SCENARIO 2: Assistant uses LOCAL calculate tool
814
+ # ─────────────────────────────────────────────────────────────────
815
+ print("\n" + "─"*70)
816
+ print("SCENARIO 2: Math request (Assistant uses LOCAL calculate tool)")
817
+ print("─"*70)
818
+ print(f"\n[USER] → Assistant: \"Please calculate 15% of 200\"")
819
+ print(f"\n[ASSISTANT LLM FLOW]")
820
+
821
+ print(f" │")
822
+ print(f" ├─→ [LLM RECEIVES] Message: \"Please calculate 15% of 200\"")
823
+ print(f" │ Tools available: {[t['name'] for t in tools]}")
824
+
825
+ response = await assistant.chat("Please calculate 200 * 0.15")
826
+
827
+ print(f" │")
828
+ print(f" ├─→ [LLM DECIDES] Use LOCAL tool: calculate")
829
+ print(f" │ Args: {assistant.tool_calls_made[-1]['args']}")
830
+ print(f" │")
831
+ print(f" ├─→ [EXECUTE LOCAL TOOL] calculate")
832
+ print(f" │")
833
+ print(f" └─→ [LLM RESPONDS] \"{response}\"")
834
+
835
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
836
+ print(f"\n✓ Assistant's LLM used LOCAL tool (calculate)")
837
+
838
+ assistant.tool_calls_made = []
839
+ assistant.conversation_history = []
840
+
841
+ # ─────────────────────────────────────────────────────────────────
842
+ # SCENARIO 3: Assistant delegates to analyst
843
+ # ─────────────────────────────────────────────────────────────────
844
+ print("\n" + "─"*70)
845
+ print("SCENARIO 3: Analysis request (Assistant delegates to ANALYST)")
846
+ print("─"*70)
847
+ print(f"\n[USER] → Assistant: \"Please analyze the sales data\"")
848
+ print(f"\n[ASSISTANT LLM FLOW]")
849
+
850
+ print(f" │")
851
+ print(f" ├─→ [LLM RECEIVES] Message: \"Please analyze the sales data\"")
852
+ print(f" │ Tools available: {[t['name'] for t in tools]}")
853
+
854
+ response = await assistant.chat("Please analyze the sales data")
855
+
856
+ print(f" │")
857
+ print(f" ├─→ [LLM DECIDES] Use PEER tool: ask_peer → analyst")
858
+ print(f" │ Args: {assistant.tool_calls_made[-1]['args']}")
859
+ print(f" │")
860
+ print(f" ├─→ [EXECUTE PEER TOOL] ask_peer")
861
+ print(f" │ Sending to: analyst")
862
+ print(f" │ Analyst responds: \"Analysis complete...\"")
863
+ print(f" │")
864
+ print(f" └─→ [LLM RESPONDS] \"{response[:60]}...\"")
865
+
866
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
867
+ print(f"\n✓ Assistant's LLM delegated to PEER (analyst)")
868
+
869
+ assistant.tool_calls_made = []
870
+ assistant.conversation_history = []
871
+
872
+ # ─────────────────────────────────────────────────────────────────
873
+ # SCENARIO 4: Assistant responds directly
874
+ # ─────────────────────────────────────────────────────────────────
875
+ print("\n" + "─"*70)
876
+ print("SCENARIO 4: Simple greeting (Assistant responds DIRECTLY)")
877
+ print("─"*70)
878
+ print(f"\n[USER] → Assistant: \"Hello, how are you?\"")
879
+ print(f"\n[ASSISTANT LLM FLOW]")
880
+
881
+ print(f" │")
882
+ print(f" ├─→ [LLM RECEIVES] Message: \"Hello, how are you?\"")
883
+ print(f" │ Tools available: {[t['name'] for t in tools]}")
884
+
885
+ response = await assistant.chat("Hello, how are you?")
886
+
887
+ print(f" │")
888
+ print(f" └─→ [LLM DECIDES] Respond directly (no tool needed)")
889
+
890
+ print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
891
+ print(f"\n✓ Assistant's LLM responded DIRECTLY (no tools)")
892
+
893
+ # Cleanup
894
+ analyst.request_shutdown()
895
+ analyst_task.cancel()
896
+ try: await analyst_task
897
+ except asyncio.CancelledError: pass
898
+
899
+ print("\n" + "="*70)
900
+ print("ASSISTANT LLM DEMO COMPLETE!")
901
+ print("="*70)
902
+
903
+
904
+ if __name__ == "__main__":
905
+ print("\n" + "="*60)
906
+ print("TEST 4: ASSISTANT AS FULL LLM-POWERED MESH PARTICIPANT")
907
+ print("="*60)
908
+
909
+ print("\n[Framework Integration]")
910
+ t = TestFrameworkIntegration()
911
+ t.test_inherits_from_agent()
912
+ print("✓ Inherits from Agent")
913
+ t.test_has_required_attributes()
914
+ print("✓ Has role and capabilities")
915
+
916
+ mesh1 = Mesh(mode="p2p")
917
+ t.test_can_be_added_to_mesh(mesh1)
918
+ print("✓ Can be added to mesh")
919
+
920
+ print("\n[Local Tools]")
921
+ mesh2 = Mesh(mode="p2p")
922
+ assistant = mesh2.add(ConnectedAssistant)
923
+ t2 = TestLocalTools()
924
+ t2.test_search_works(assistant)
925
+ print("✓ search() works")
926
+ t2.test_calculate_works(assistant)
927
+ print("✓ calculate() works")
928
+ t2.test_get_tools_returns_local_tools(assistant)
929
+ print("✓ get_tools() returns local tools")
930
+
931
+ print("\n[Peer Tools Integration]")
932
+ mesh3 = Mesh(mode="p2p")
933
+ assistant3 = mesh3.add(ConnectedAssistant)
934
+ assistant3.peers = PeerClient(
935
+ coordinator=None,
936
+ agent_id=assistant3.agent_id,
937
+ agent_role=assistant3.role,
938
+ agent_registry=mesh3._agent_registry,
939
+ node_id="local"
940
+ )
941
+
942
+ class TempAnalyst(Agent):
943
+ role = "analyst"
944
+ capabilities = ["analysis"]
945
+ async def execute_task(self, task): return {}
946
+
947
+ other = mesh3.add(TempAnalyst)
948
+ other.peers = PeerClient(
949
+ coordinator=None,
950
+ agent_id=other.agent_id,
951
+ agent_role=other.role,
952
+ agent_registry=mesh3._agent_registry,
953
+ node_id="local"
954
+ )
955
+
956
+ tools = assistant3.get_tools()
957
+ tool_names = [t["name"] for t in tools]
958
+ assert "ask_peer" in tool_names
959
+ assert "broadcast_update" in tool_names
960
+ assert "list_peers" in tool_names
961
+ print(f"✓ get_tools() includes peer tools: {tool_names}")
962
+
963
+ print("\n[Bidirectional Communication]")
964
+ asyncio.run(_run_integration_test())
965
+
966
+ print("\n" + "="*60)
967
+ print("ALL TESTS PASSED!")
968
+ print("="*60)
969
+ print("""
970
+ KEY INSIGHT: Every agent is a FULL MESH PARTICIPANT
971
+
972
+ The assistant can:
973
+ ├── Use LOCAL tools (search, calculate)
974
+ ├── SEND to peers (ask_peer → analyst)
975
+ ├── RECEIVE from peers (requester → assistant)
976
+ └── BROADCAST to all (broadcast_update)
977
+
978
+ SAME PATTERN as the analyst!
979
+ The role ("assistant") defines what it's GOOD at,
980
+ NOT whether it sends or receives.
981
+ """)
982
+
983
+ # Run LLM tool-use demo
984
+ asyncio.run(demo_assistant_llm_flow())
985
+
986
+ print("""
987
+ ═══════════════════════════════════════════════════════════════════════════════
988
+ KEY INSIGHT: ASSISTANT LLM TOOL-USE DECISIONS
989
+ ═══════════════════════════════════════════════════════════════════════════════
990
+
991
+ The Assistant's LLM sees tools and DECIDES:
992
+
993
+ ┌─────────────────────────────────────────────────────────────────────────┐
994
+ │ "search for X" → Use LOCAL tool (search) │
995
+ │ "calculate X" → Use LOCAL tool (calculate) │
996
+ │ "analyze X" → Use PEER tool (ask_peer → analyst) │
997
+ │ "hello" → Respond DIRECTLY (no tool needed) │
998
+ └─────────────────────────────────────────────────────────────────────────┘
999
+
1000
+ The Assistant is GOOD at search/calculate, but can delegate analysis!
1001
+ ═══════════════════════════════════════════════════════════════════════════════
1002
+ """)