jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +730 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +730 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +1362 -0
  21. jarviscore/docs/GETTING_STARTED.md +107 -14
  22. jarviscore/docs/TROUBLESHOOTING.md +145 -7
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. jarviscore_framework-0.2.1.dist-info/METADATA +144 -0
  31. jarviscore_framework-0.2.1.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/METADATA +0 -137
  85. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  86. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1362 @@
1
+ # CustomAgent Guide
2
+
3
+ CustomAgent lets you integrate your **existing agent code** with JarvisCore's networking and orchestration capabilities.
4
+
5
+ **You keep**: Your execution logic, LLM calls, and business logic.
6
+ **Framework provides**: Agent discovery, peer communication, workflow orchestration, and multi-node deployment.
7
+
8
+ ---
9
+
10
+ ## Table of Contents
11
+
12
+ 1. [Prerequisites](#prerequisites)
13
+ 2. [Choose Your Mode](#choose-your-mode)
14
+ 3. [P2P Mode](#p2p-mode)
15
+ 4. [Distributed Mode](#distributed-mode)
16
+ 5. [API Reference](#api-reference)
17
+ 6. [Multi-Node Deployment](#multi-node-deployment)
18
+ 7. [Error Handling](#error-handling)
19
+ 8. [Troubleshooting](#troubleshooting)
20
+
21
+ ---
22
+
23
+ ## Prerequisites
24
+
25
+ ### Installation
26
+
27
+ ```bash
28
+ pip install jarviscore-framework
29
+ ```
30
+
31
+ ### Your LLM Client
32
+
33
+ Throughout this guide, we use `MyLLMClient()` as a placeholder for your LLM. Replace it with your actual client:
34
+
35
+ ```python
36
+ # Example: OpenAI
37
+ from openai import OpenAI
38
+ client = OpenAI()
39
+
40
+ def chat(prompt: str) -> str:
41
+ response = client.chat.completions.create(
42
+ model="gpt-4",
43
+ messages=[{"role": "user", "content": prompt}]
44
+ )
45
+ return response.choices[0].message.content
46
+
47
+ # Example: Anthropic
48
+ from anthropic import Anthropic
49
+ client = Anthropic()
50
+
51
+ def chat(prompt: str) -> str:
52
+ response = client.messages.create(
53
+ model="claude-3-sonnet-20240229",
54
+ max_tokens=1024,
55
+ messages=[{"role": "user", "content": prompt}]
56
+ )
57
+ return response.content[0].text
58
+
59
+ # Example: Local/Custom
60
+ class MyLLMClient:
61
+ def chat(self, prompt: str) -> str:
62
+ # Your implementation
63
+ return "response"
64
+ ```
65
+
66
+ ---
67
+
68
+ ## Choose Your Mode
69
+
70
+ ```
71
+ ┌─────────────────────────────────────────────────────────────┐
72
+ │ Which mode should I use? │
73
+ └─────────────────────────────────────────────────────────────┘
74
+
75
+
76
+ ┌───────────────────────────────┐
77
+ │ Do agents need to coordinate │
78
+ │ continuously in real-time? │
79
+ └───────────────────────────────┘
80
+ │ │
81
+ YES NO
82
+ │ │
83
+ ▼ ▼
84
+ ┌──────────┐ ┌───────────────────────┐
85
+ │ P2P Mode │ │ Do you have task │
86
+ └──────────┘ │ pipelines with │
87
+ │ dependencies? │
88
+ └───────────────────────┘
89
+ │ │
90
+ YES NO
91
+ │ │
92
+ ▼ ▼
93
+ ┌────────────┐ ┌──────────┐
94
+ │Distributed │ │ P2P Mode │
95
+ │ Mode │ └──────────┘
96
+ └────────────┘
97
+ ```
98
+
99
+ ### Quick Comparison
100
+
101
+ | Feature | P2P Mode | Distributed Mode |
102
+ |---------|----------|------------------|
103
+ | **Primary method** | `run()` - continuous loop | `execute_task()` - on-demand |
104
+ | **Communication** | Direct peer messaging | Workflow orchestration |
105
+ | **Best for** | Chatbots, real-time agents | Pipelines, batch processing |
106
+ | **Coordination** | Agents self-coordinate | Framework coordinates |
107
+ | **Supports workflows** | No | Yes |
108
+
109
+ ---
110
+
111
+ ## P2P Mode
112
+
113
+ P2P mode is for agents that run continuously and communicate directly with each other.
114
+
115
+ ### Migration Overview
116
+
117
+ ```
118
+ YOUR PROJECT STRUCTURE
119
+ ──────────────────────────────────────────────────────────────────
120
+
121
+ BEFORE (standalone): AFTER (with JarvisCore):
122
+ ├── my_agent.py ├── agents.py ← Modified agent code
123
+ └── (run directly) └── main.py ← NEW entry point
124
+
125
+
126
+ This is now how you
127
+ start your agents
128
+ ```
129
+
130
+ ### Step 1: Install the Framework
131
+
132
+ ```bash
133
+ pip install jarviscore-framework
134
+ ```
135
+
136
+ ### Step 2: Your Existing Code (Before)
137
+
138
+ Let's say you have a standalone agent like this:
139
+
140
+ ```python
141
+ # my_agent.py (YOUR EXISTING CODE)
142
+ class MyResearcher:
143
+ """Your existing agent - runs standalone."""
144
+
145
+ def __init__(self):
146
+ self.llm = MyLLMClient()
147
+
148
+ def research(self, query: str) -> str:
149
+ return self.llm.chat(f"Research: {query}")
150
+
151
+ # You currently run it directly:
152
+ if __name__ == "__main__":
153
+ agent = MyResearcher()
154
+ result = agent.research("What is AI?")
155
+ print(result)
156
+ ```
157
+
158
+ ### Step 3: Modify Your Agent Code → `agents.py`
159
+
160
+ Convert your existing class to inherit from `CustomAgent`:
161
+
162
+ ```python
163
+ # agents.py (MODIFIED VERSION OF YOUR CODE)
164
+ import asyncio
165
+ from jarviscore.profiles import CustomAgent
166
+
167
+
168
+ class ResearcherAgent(CustomAgent):
169
+ """Your agent, now framework-integrated."""
170
+
171
+ # NEW: Required class attributes for discovery
172
+ role = "researcher"
173
+ capabilities = ["research", "analysis"]
174
+
175
+ async def setup(self):
176
+ """NEW: Called once on startup. Move your __init__ logic here."""
177
+ await super().setup()
178
+ self.llm = MyLLMClient() # Your existing initialization
179
+
180
+ async def run(self):
181
+ """NEW: Main loop - replaces your if __name__ == '__main__' block."""
182
+ while not self.shutdown_requested:
183
+ if self.peers:
184
+ msg = await self.peers.receive(timeout=0.5)
185
+ if msg and msg.is_request:
186
+ query = msg.data.get("question", "")
187
+ # YOUR EXISTING LOGIC:
188
+ result = self.llm.chat(f"Research: {query}")
189
+ await self.peers.respond(msg, {"response": result})
190
+ await asyncio.sleep(0.1)
191
+
192
+ async def execute_task(self, task: dict) -> dict:
193
+ """
194
+ Required by base Agent class (@abstractmethod).
195
+
196
+ In P2P mode, your main logic lives in run(), not here.
197
+ This must exist because Python requires all abstract methods
198
+ to be implemented, or you get TypeError on instantiation.
199
+ """
200
+ return {"status": "success", "note": "This agent uses run() for P2P mode"}
201
+ ```
202
+
203
+ **What changed:**
204
+
205
+ | Before | After |
206
+ |--------|-------|
207
+ | `class MyResearcher:` | `class ResearcherAgent(CustomAgent):` |
208
+ | `def __init__(self):` | `async def setup(self):` + `await super().setup()` |
209
+ | `if __name__ == "__main__":` | `async def run(self):` loop |
210
+ | Direct method calls | Peer message handling |
211
+
212
+ > **Note**: This is a minimal example. For the full pattern with **LLM-driven peer communication** (where your LLM autonomously decides when to call other agents), see the [Complete Example](#complete-example-llm-driven-peer-communication) below.
213
+
214
+ ### Step 4: Create New Entry Point → `main.py`
215
+
216
+ **This is your NEW main file.** Instead of running `python my_agent.py`, you'll run `python main.py`.
217
+
218
+ ```python
219
+ # main.py (NEW FILE - YOUR NEW ENTRY POINT)
220
+ import asyncio
221
+ from jarviscore import Mesh
222
+ from agents import ResearcherAgent
223
+
224
+
225
+ async def main():
226
+ # Create the mesh network
227
+ mesh = Mesh(
228
+ mode="p2p",
229
+ config={
230
+ "bind_port": 7950, # Port for P2P communication
231
+ "node_name": "my-node", # Identifies this node in the network
232
+ }
233
+ )
234
+
235
+ # Register your agent(s)
236
+ mesh.add(ResearcherAgent)
237
+
238
+ # Start the mesh (calls setup() on all agents)
239
+ await mesh.start()
240
+
241
+ # Run forever - agents handle their own work in run() loops
242
+ await mesh.run_forever()
243
+
244
+
245
+ if __name__ == "__main__":
246
+ asyncio.run(main())
247
+ ```
248
+
249
+ **Why a new entry file?**
250
+
251
+ | Reason | Explanation |
252
+ |--------|-------------|
253
+ | **Mesh setup** | The Mesh handles networking, discovery, and lifecycle |
254
+ | **Multiple agents** | You can add many agents to one mesh |
255
+ | **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
256
+ | **Standard pattern** | Consistent entry point across all JarvisCore projects |
257
+
258
+ ### Step 5: Run Your Agents
259
+
260
+ ```bash
261
+ # OLD WAY (no longer used):
262
+ # python my_agent.py
263
+
264
+ # NEW WAY:
265
+ python main.py
266
+ ```
267
+
268
+ ---
269
+
270
+ ### Complete Example: LLM-Driven Peer Communication
271
+
272
+ This is the **key pattern** for P2P mode. Your LLM gets peer tools added to its toolset, and it **autonomously decides** when to ask other agents for help.
273
+
274
+ ```
275
+ ┌─────────────────────────────────────────────────────────────────┐
276
+ │ LLM-DRIVEN PEER COMMUNICATION │
277
+ ├─────────────────────────────────────────────────────────────────┤
278
+ │ │
279
+ │ User: "Analyze this sales data" │
280
+ │ │ │
281
+ │ ▼ │
282
+ │ ┌─────────────────────────────────────┐ │
283
+ │ │ ASSISTANT'S LLM │ │
284
+ │ │ │ │
285
+ │ │ Tools available: │ │
286
+ │ │ - web_search (local) │ │
287
+ │ │ - ask_peer (peer) ◄── NEW! │ │
288
+ │ │ - broadcast (peer) ◄── NEW! │ │
289
+ │ │ │ │
290
+ │ │ LLM decides: "I need analysis │ │
291
+ │ │ help, let me ask the analyst" │ │
292
+ │ └─────────────────────────────────────┘ │
293
+ │ │ │
294
+ │ ▼ uses ask_peer tool │
295
+ │ ┌─────────────────────────────────────┐ │
296
+ │ │ ANALYST AGENT │ │
297
+ │ │ (processes with its own LLM) │ │
298
+ │ └─────────────────────────────────────┘ │
299
+ │ │ │
300
+ │ ▼ returns analysis │
301
+ │ ┌─────────────────────────────────────┐ │
302
+ │ │ ASSISTANT'S LLM │ │
303
+ │ │ "Based on the analyst's findings, │ │
304
+ │ │ here's your answer..." │ │
305
+ │ └─────────────────────────────────────┘ │
306
+ │ │
307
+ └─────────────────────────────────────────────────────────────────┘
308
+ ```
309
+
310
+ **The key insight**: You add peer tools to your LLM's toolset. The LLM decides when to use them.
311
+
312
+ ```python
313
+ # agents.py
314
+ import asyncio
315
+ from jarviscore.profiles import CustomAgent
316
+
317
+
318
+ class AnalystAgent(CustomAgent):
319
+ """
320
+ Analyst agent - specialists in data analysis.
321
+
322
+ This agent:
323
+ 1. Listens for incoming requests from peers
324
+ 2. Processes requests using its own LLM
325
+ 3. Responds with analysis results
326
+ """
327
+ role = "analyst"
328
+ capabilities = ["analysis", "data_interpretation", "reporting"]
329
+
330
+ async def setup(self):
331
+ await super().setup()
332
+ self.llm = MyLLMClient() # Your LLM client
333
+
334
+ def get_tools(self) -> list:
335
+ """
336
+ Tools available to THIS agent's LLM.
337
+
338
+ The analyst has local analysis tools.
339
+ It can also ask other peers if needed.
340
+ """
341
+ tools = [
342
+ {
343
+ "name": "statistical_analysis",
344
+ "description": "Run statistical analysis on numeric data",
345
+ "input_schema": {
346
+ "type": "object",
347
+ "properties": {
348
+ "data": {"type": "string", "description": "Data to analyze"}
349
+ },
350
+ "required": ["data"]
351
+ }
352
+ }
353
+ ]
354
+
355
+ # ADD PEER TOOLS - so LLM can ask other agents if needed
356
+ if self.peers:
357
+ tools.extend(self.peers.as_tool().schema)
358
+
359
+ return tools
360
+
361
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
362
+ """
363
+ Execute a tool by name.
364
+
365
+ Routes to peer tools or local tools as appropriate.
366
+ """
367
+ # PEER TOOLS - check and execute
368
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
369
+ return await self.peers.as_tool().execute(tool_name, args)
370
+
371
+ # LOCAL TOOLS
372
+ if tool_name == "statistical_analysis":
373
+ data = args.get("data", "")
374
+ return f"Analysis of '{data}': mean=150.3, std=23.4, trend=positive"
375
+
376
+ return f"Unknown tool: {tool_name}"
377
+
378
+ async def process_with_llm(self, query: str) -> str:
379
+ """Process a request using LLM with tools."""
380
+ system_prompt = """You are an expert data analyst.
381
+ You have tools for statistical analysis.
382
+ Analyze data thoroughly and provide insights."""
383
+
384
+ tools = self.get_tools()
385
+ messages = [{"role": "user", "content": query}]
386
+
387
+ # Call LLM with tools
388
+ response = self.llm.chat(messages, tools=tools, system=system_prompt)
389
+
390
+ # Handle tool use if LLM decides to use a tool
391
+ if response.get("type") == "tool_use":
392
+ tool_result = await self.execute_tool(
393
+ response["tool_name"],
394
+ response["tool_args"]
395
+ )
396
+ # Continue conversation with tool result
397
+ response = self.llm.continue_with_tool_result(
398
+ messages, response["tool_use_id"], tool_result
399
+ )
400
+
401
+ return response.get("content", "Analysis complete.")
402
+
403
+ async def run(self):
404
+ """Listen for incoming requests from peers."""
405
+ while not self.shutdown_requested:
406
+ if self.peers:
407
+ msg = await self.peers.receive(timeout=0.5)
408
+ if msg and msg.is_request:
409
+ query = msg.data.get("question", msg.data.get("query", ""))
410
+
411
+ # Process with LLM
412
+ result = await self.process_with_llm(query)
413
+
414
+ await self.peers.respond(msg, {"response": result})
415
+ await asyncio.sleep(0.1)
416
+
417
+ async def execute_task(self, task: dict) -> dict:
418
+ """Required by base class."""
419
+ return {"status": "success"}
420
+
421
+
422
+ class AssistantAgent(CustomAgent):
423
+ """
424
+ Assistant agent - coordinates with other specialists.
425
+
426
+ This agent:
427
+ 1. Has its own LLM for reasoning
428
+ 2. Has peer tools (ask_peer, broadcast) in its toolset
429
+ 3. LLM AUTONOMOUSLY decides when to ask other agents
430
+ """
431
+ role = "assistant"
432
+ capabilities = ["chat", "coordination", "search"]
433
+
434
+ async def setup(self):
435
+ await super().setup()
436
+ self.llm = MyLLMClient() # Your LLM client
437
+ self.tool_calls = [] # Track tool usage
438
+
439
+ def get_tools(self) -> list:
440
+ """
441
+ Tools available to THIS agent's LLM.
442
+
443
+ IMPORTANT: This includes PEER TOOLS!
444
+ The LLM sees ask_peer, broadcast_update, list_peers
445
+ and decides when to use them.
446
+ """
447
+ # Local tools
448
+ tools = [
449
+ {
450
+ "name": "web_search",
451
+ "description": "Search the web for information",
452
+ "input_schema": {
453
+ "type": "object",
454
+ "properties": {
455
+ "query": {"type": "string", "description": "Search query"}
456
+ },
457
+ "required": ["query"]
458
+ }
459
+ }
460
+ ]
461
+
462
+ # ADD PEER TOOLS TO LLM'S TOOLSET
463
+ # This is the key! LLM will see:
464
+ # - ask_peer: Ask another agent for help
465
+ # - broadcast_update: Send message to all peers
466
+ # - list_peers: See available agents
467
+ if self.peers:
468
+ tools.extend(self.peers.as_tool().schema)
469
+
470
+ return tools
471
+
472
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
473
+ """
474
+ Execute a tool by name.
475
+
476
+ When LLM calls ask_peer, this routes to the peer system.
477
+ """
478
+ self.tool_calls.append({"tool": tool_name, "args": args})
479
+
480
+ # PEER TOOLS - route to peer system
481
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
482
+ return await self.peers.as_tool().execute(tool_name, args)
483
+
484
+ # LOCAL TOOLS
485
+ if tool_name == "web_search":
486
+ return f"Search results for '{args.get('query')}': Found 10 articles."
487
+
488
+ return f"Unknown tool: {tool_name}"
489
+
490
+ async def chat(self, user_message: str) -> str:
491
+ """
492
+ Complete LLM chat with autonomous tool use.
493
+
494
+ The LLM sees all tools (including peer tools) and decides
495
+ which to use. If user asks for analysis, LLM will use
496
+ ask_peer to contact the analyst.
497
+ """
498
+ # System prompt tells LLM about its capabilities
499
+ system_prompt = """You are a helpful assistant.
500
+
501
+ You have access to these capabilities:
502
+ - web_search: Search the web for information
503
+ - ask_peer: Ask specialist agents for help (e.g., analyst for data analysis)
504
+ - broadcast_update: Send updates to all connected agents
505
+ - list_peers: See what other agents are available
506
+
507
+ When a user needs data analysis, USE ask_peer to ask the analyst.
508
+ When a user needs web information, USE web_search.
509
+ Be concise in your responses."""
510
+
511
+ tools = self.get_tools()
512
+ messages = [{"role": "user", "content": user_message}]
513
+
514
+ # Call LLM - it will decide which tools to use
515
+ response = self.llm.chat(messages, tools=tools, system=system_prompt)
516
+
517
+ # Handle tool use loop
518
+ while response.get("type") == "tool_use":
519
+ tool_name = response["tool_name"]
520
+ tool_args = response["tool_args"]
521
+
522
+ # Execute the tool (might be ask_peer!)
523
+ tool_result = await self.execute_tool(tool_name, tool_args)
524
+
525
+ # Continue conversation with tool result
526
+ response = self.llm.continue_with_tool_result(
527
+ messages, response["tool_use_id"], tool_result, tools
528
+ )
529
+
530
+ return response.get("content", "")
531
+
532
+ async def run(self):
533
+ """Main loop - listen for incoming requests."""
534
+ while not self.shutdown_requested:
535
+ if self.peers:
536
+ msg = await self.peers.receive(timeout=0.5)
537
+ if msg and msg.is_request:
538
+ query = msg.data.get("query", "")
539
+ result = await self.chat(query)
540
+ await self.peers.respond(msg, {"response": result})
541
+ await asyncio.sleep(0.1)
542
+
543
+ async def execute_task(self, task: dict) -> dict:
544
+ """Required by base class."""
545
+ return {"status": "success"}
546
+ ```
547
+
548
+ ```python
549
+ # main.py
550
+ import asyncio
551
+ from jarviscore import Mesh
552
+ from agents import AnalystAgent, AssistantAgent
553
+
554
+
555
+ async def main():
556
+ mesh = Mesh(
557
+ mode="p2p",
558
+ config={
559
+ "bind_port": 7950,
560
+ "node_name": "my-agents",
561
+ }
562
+ )
563
+
564
+ # Add both agents
565
+ mesh.add(AnalystAgent)
566
+ assistant = mesh.add(AssistantAgent)
567
+
568
+ await mesh.start()
569
+
570
+ # Start analyst listening in background
571
+ analyst = mesh.get_agent("analyst")
572
+ analyst_task = asyncio.create_task(analyst.run())
573
+
574
+ # Give time for setup
575
+ await asyncio.sleep(0.5)
576
+
577
+ # User asks a question - LLM will autonomously decide to use ask_peer
578
+ print("User: Please analyze the Q4 sales trends")
579
+ response = await assistant.chat("Please analyze the Q4 sales trends")
580
+ print(f"Assistant: {response}")
581
+
582
+ # Check what tools were used
583
+ print(f"\nTools used: {assistant.tool_calls}")
584
+ # Output: [{'tool': 'ask_peer', 'args': {'role': 'analyst', 'question': '...'}}]
585
+
586
+ # Cleanup
587
+ analyst.request_shutdown()
588
+ analyst_task.cancel()
589
+ await mesh.stop()
590
+
591
+
592
+ if __name__ == "__main__":
593
+ asyncio.run(main())
594
+ ```
595
+
596
+ ### Key Concepts for P2P Mode
597
+
598
+ #### Adding Peer Tools to Your LLM
599
+
600
+ This is the most important pattern. Add peer tools to `get_tools()`:
601
+
602
+ ```python
603
+ def get_tools(self) -> list:
604
+ tools = [
605
+ # Your local tools...
606
+ ]
607
+
608
+ # ADD PEER TOOLS - LLM will see ask_peer, broadcast, list_peers
609
+ if self.peers:
610
+ tools.extend(self.peers.as_tool().schema)
611
+
612
+ return tools
613
+ ```
614
+
615
+ #### Routing Tool Execution
616
+
617
+ Route tool calls to either peer tools or local tools:
618
+
619
+ ```python
620
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
621
+ # Check peer tools first
622
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
623
+ return await self.peers.as_tool().execute(tool_name, args)
624
+
625
+ # Then local tools
626
+ if tool_name == "my_local_tool":
627
+ return self.my_local_tool(args)
628
+
629
+ return f"Unknown tool: {tool_name}"
630
+ ```
631
+
632
+ #### System Prompt for Peer Awareness
633
+
634
+ Tell the LLM about peer capabilities:
635
+
636
+ ```python
637
+ system_prompt = """You are a helpful assistant.
638
+
639
+ You have access to:
640
+ - ask_peer: Ask specialist agents for help
641
+ - broadcast_update: Send updates to all agents
642
+
643
+ When a user needs specialized help, USE ask_peer to contact the right agent."""
644
+ ```
645
+
646
+ #### The `run()` Loop
647
+
648
+ Listen for incoming requests and process with LLM:
649
+
650
+ ```python
651
+ async def run(self):
652
+ while not self.shutdown_requested:
653
+ if self.peers:
654
+ msg = await self.peers.receive(timeout=0.5)
655
+ if msg and msg.is_request:
656
+ result = await self.process_with_llm(msg.data)
657
+ await self.peers.respond(msg, {"response": result})
658
+ await asyncio.sleep(0.1)
659
+ ```
660
+
661
+ ---
662
+
663
+ ## Distributed Mode
664
+
665
+ Distributed mode is for task pipelines where the framework orchestrates execution order and passes data between steps.
666
+
667
+ ### Migration Overview
668
+
669
+ ```
670
+ YOUR PROJECT STRUCTURE
671
+ ──────────────────────────────────────────────────────────────────
672
+
673
+ BEFORE (standalone): AFTER (with JarvisCore):
674
+ ├── pipeline.py ├── agents.py ← Modified agent code
675
+ └── (manual orchestration) └── main.py ← NEW entry point
676
+
677
+
678
+ This is now how you
679
+ start your pipeline
680
+ ```
681
+
682
+ ### Step 1: Install the Framework
683
+
684
+ ```bash
685
+ pip install jarviscore-framework
686
+ ```
687
+
688
+ ### Step 2: Your Existing Code (Before)
689
+
690
+ Let's say you have a manual pipeline like this:
691
+
692
+ ```python
693
+ # pipeline.py (YOUR EXISTING CODE)
694
+ class Researcher:
695
+ def execute(self, task: str) -> dict:
696
+ return {"output": f"Research on: {task}"}
697
+
698
+ class Writer:
699
+ def execute(self, task: str, context: dict = None) -> dict:
700
+ return {"output": f"Article based on: {context}"}
701
+
702
+ # Manual orchestration - you pass data between steps yourself:
703
+ if __name__ == "__main__":
704
+ researcher = Researcher()
705
+ writer = Writer()
706
+
707
+ research = researcher.execute("AI trends")
708
+ article = writer.execute("Write article", context=research) # Manual!
709
+ print(article)
710
+ ```
711
+
712
+ **Problems with this approach:**
713
+ - You manually pass context between steps
714
+ - No dependency management
715
+ - Hard to run on multiple machines
716
+ - No automatic retries on failure
717
+
718
+ ### Step 3: Modify Your Agent Code → `agents.py`
719
+
720
+ Convert your existing classes to inherit from `CustomAgent`:
721
+
722
+ ```python
723
+ # agents.py (MODIFIED VERSION OF YOUR CODE)
724
+ from jarviscore.profiles import CustomAgent
725
+
726
+
727
+ class ResearcherAgent(CustomAgent):
728
+ """Your researcher, now framework-integrated."""
729
+
730
+ # NEW: Required class attributes
731
+ role = "researcher"
732
+ capabilities = ["research"]
733
+
734
+ async def setup(self):
735
+ """NEW: Called once on startup."""
736
+ await super().setup()
737
+ # Your initialization here (DB connections, LLM clients, etc.)
738
+
739
+ async def execute_task(self, task: dict) -> dict:
740
+ """
741
+ MODIFIED: Now receives a task dict, returns a result dict.
742
+
743
+ The framework calls this method - you don't call it manually.
744
+ """
745
+ task_desc = task.get("task", "")
746
+
747
+ # YOUR EXISTING LOGIC:
748
+ result = f"Research on: {task_desc}"
749
+
750
+ # NEW: Return format for framework
751
+ return {
752
+ "status": "success",
753
+ "output": result
754
+ }
755
+
756
+
757
+ class WriterAgent(CustomAgent):
758
+ """Your writer, now framework-integrated."""
759
+
760
+ role = "writer"
761
+ capabilities = ["writing"]
762
+
763
+ async def setup(self):
764
+ await super().setup()
765
+
766
+ async def execute_task(self, task: dict) -> dict:
767
+ """
768
+ Context from previous steps is AUTOMATICALLY injected.
769
+ No more manual passing!
770
+ """
771
+ task_desc = task.get("task", "")
772
+ context = task.get("context", {}) # ← Framework injects this!
773
+
774
+ # YOUR EXISTING LOGIC:
775
+ research_output = context.get("research", {}).get("output", "")
776
+ result = f"Article based on: {research_output}"
777
+
778
+ return {
779
+ "status": "success",
780
+ "output": result
781
+ }
782
+ ```
783
+
784
+ **What changed:**
785
+
786
+ | Before | After |
787
+ |--------|-------|
788
+ | `class Researcher:` | `class ResearcherAgent(CustomAgent):` |
789
+ | `def execute(self, task):` | `async def execute_task(self, task: dict):` |
790
+ | Return anything | Return `{"status": "...", "output": ...}` |
791
+ | Manual `context=research` | Framework auto-injects via `depends_on` |
792
+
793
+ ### Step 4: Create New Entry Point → `main.py`
794
+
795
+ **This is your NEW main file.** Instead of running `python pipeline.py`, you'll run `python main.py`.
796
+
797
+ ```python
798
+ # main.py (NEW FILE - YOUR NEW ENTRY POINT)
799
+ import asyncio
800
+ from jarviscore import Mesh
801
+ from agents import ResearcherAgent, WriterAgent
802
+
803
+
804
+ async def main():
805
+ # Create the mesh network
806
+ mesh = Mesh(
807
+ mode="distributed",
808
+ config={
809
+ "bind_port": 7950,
810
+ "node_name": "pipeline-node",
811
+ }
812
+ )
813
+
814
+ # Register your agents
815
+ mesh.add(ResearcherAgent)
816
+ mesh.add(WriterAgent)
817
+
818
+ # Start the mesh (calls setup() on all agents)
819
+ await mesh.start()
820
+
821
+ # Define your workflow - framework handles orchestration!
822
+ results = await mesh.workflow("content-pipeline", [
823
+ {
824
+ "id": "research", # Step identifier
825
+ "agent": "researcher", # Which agent handles this
826
+ "task": "AI trends 2024" # Task description
827
+ },
828
+ {
829
+ "id": "write",
830
+ "agent": "writer",
831
+ "task": "Write a blog post",
832
+ "depends_on": ["research"] # ← Framework auto-injects research output!
833
+ }
834
+ ])
835
+
836
+ # Results in workflow order
837
+ print("Research:", results[0]["output"])
838
+ print("Article:", results[1]["output"])
839
+
840
+ await mesh.stop()
841
+
842
+
843
+ if __name__ == "__main__":
844
+ asyncio.run(main())
845
+ ```
846
+
847
+ **Why a new entry file?**
848
+
849
+ | Reason | Explanation |
850
+ |--------|-------------|
851
+ | **Workflow orchestration** | `mesh.workflow()` handles dependencies, ordering, retries |
852
+ | **No manual context passing** | `depends_on` automatically injects previous step outputs |
853
+ | **Multiple agents** | Register all agents in one place |
854
+ | **Multi-node ready** | Same code works across machines with `seed_nodes` config |
855
+ | **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
856
+
857
+ ### Step 5: Run Your Pipeline
858
+
859
+ ```bash
860
+ # OLD WAY (no longer used):
861
+ # python pipeline.py
862
+
863
+ # NEW WAY:
864
+ python main.py
865
+ ```
866
+
867
+ ---
868
+
869
+ ### Complete Example: Three-Stage Content Pipeline
870
+
871
+ This example shows a research → write → review pipeline.
872
+
873
+ ```python
874
+ # agents.py
875
+ from jarviscore.profiles import CustomAgent
876
+
877
+
878
+ class ResearcherAgent(CustomAgent):
879
+ """Researches topics and returns findings."""
880
+
881
+ role = "researcher"
882
+ capabilities = ["research"]
883
+
884
+ async def setup(self):
885
+ await super().setup()
886
+ # self.llm = MyLLMClient()
887
+
888
+ async def execute_task(self, task: dict) -> dict:
889
+ topic = task.get("task", "")
890
+
891
+ # Your research logic
892
+ findings = f"Research findings on: {topic}"
893
+ # findings = self.llm.chat(f"Research: {topic}")
894
+
895
+ return {
896
+ "status": "success",
897
+ "output": findings
898
+ }
899
+
900
+
901
+ class WriterAgent(CustomAgent):
902
+ """Writes content based on research."""
903
+
904
+ role = "writer"
905
+ capabilities = ["writing"]
906
+
907
+ async def setup(self):
908
+ await super().setup()
909
+ # self.llm = MyLLMClient()
910
+
911
+ async def execute_task(self, task: dict) -> dict:
912
+ instruction = task.get("task", "")
913
+ context = task.get("context", {}) # Output from depends_on steps
914
+
915
+ # Combine context from previous steps
916
+ research = context.get("research", {}).get("output", "")
917
+
918
+ # Your writing logic
919
+ article = f"Article based on: {research}\nTopic: {instruction}"
920
+ # article = self.llm.chat(f"Based on: {research}\nWrite: {instruction}")
921
+
922
+ return {
923
+ "status": "success",
924
+ "output": article
925
+ }
926
+
927
+
928
+ class EditorAgent(CustomAgent):
929
+ """Reviews and polishes content."""
930
+
931
+ role = "editor"
932
+ capabilities = ["editing", "review"]
933
+
934
+ async def setup(self):
935
+ await super().setup()
936
+
937
+ async def execute_task(self, task: dict) -> dict:
938
+ instruction = task.get("task", "")
939
+ context = task.get("context", {})
940
+
941
+ # Get output from the writing step
942
+ draft = context.get("write", {}).get("output", "")
943
+
944
+ # Your editing logic
945
+ polished = f"[EDITED] {draft}"
946
+
947
+ return {
948
+ "status": "success",
949
+ "output": polished
950
+ }
951
+ ```
952
+
953
+ ```python
954
+ # main.py
955
+ import asyncio
956
+ from jarviscore import Mesh
957
+ from agents import ResearcherAgent, WriterAgent, EditorAgent
958
+
959
+
960
+ async def main():
961
+ mesh = Mesh(
962
+ mode="distributed",
963
+ config={
964
+ "bind_port": 7950,
965
+ "node_name": "content-node",
966
+ }
967
+ )
968
+
969
+ mesh.add(ResearcherAgent)
970
+ mesh.add(WriterAgent)
971
+ mesh.add(EditorAgent)
972
+
973
+ await mesh.start()
974
+
975
+ # Define a multi-step workflow with dependencies
976
+ results = await mesh.workflow("content-pipeline", [
977
+ {
978
+ "id": "research", # Unique step identifier
979
+ "agent": "researcher", # Which agent handles this
980
+ "task": "AI trends in 2024" # Task description
981
+ },
982
+ {
983
+ "id": "write",
984
+ "agent": "writer",
985
+ "task": "Write a blog post about the research",
986
+ "depends_on": ["research"] # Wait for research, inject its output
987
+ },
988
+ {
989
+ "id": "edit",
990
+ "agent": "editor",
991
+ "task": "Polish and improve the article",
992
+ "depends_on": ["write"] # Wait for writing step
993
+ }
994
+ ])
995
+
996
+ # Results are in workflow order
997
+ print("Research:", results[0]["output"])
998
+ print("Draft:", results[1]["output"])
999
+ print("Final:", results[2]["output"])
1000
+
1001
+ await mesh.stop()
1002
+
1003
+
1004
+ if __name__ == "__main__":
1005
+ asyncio.run(main())
1006
+ ```
1007
+
1008
+ ### Key Concepts for Distributed Mode
1009
+
1010
+ #### The `execute_task()` Method
1011
+
1012
+ Called by the workflow engine when a task is assigned to your agent.
1013
+
1014
+ ```python
1015
+ async def execute_task(self, task: dict) -> dict:
1016
+ # task dict contains:
1017
+ # - "id": str - the step ID from the workflow
1018
+ # - "task": str - the task description
1019
+ # - "context": dict - outputs from depends_on steps (keyed by step ID)
1020
+
1021
+ return {
1022
+ "status": "success", # or "error"
1023
+ "output": result, # your result data
1024
+ # "error": "message" # if status is "error"
1025
+ }
1026
+ ```
1027
+
1028
+ #### The `task` Dictionary Structure
1029
+
1030
+ ```python
1031
+ {
1032
+ "id": "step_id", # Step identifier from workflow
1033
+ "task": "task description", # What to do
1034
+ "context": { # Outputs from dependencies
1035
+ "previous_step_id": {
1036
+ "status": "success",
1037
+ "output": "..." # Whatever previous step returned
1038
+ }
1039
+ }
1040
+ }
1041
+ ```
1042
+
1043
+ #### Workflow Step Definition
1044
+
1045
+ ```python
1046
+ {
1047
+ "id": "unique_step_id", # Required: unique identifier
1048
+ "agent": "agent_role", # Required: which agent handles this
1049
+ "task": "description", # Required: task description
1050
+ "depends_on": ["step1", ...] # Optional: steps that must complete first
1051
+ }
1052
+ ```
1053
+
1054
+ #### Parallel Execution
1055
+
1056
+ Steps without `depends_on` or with satisfied dependencies run in parallel:
1057
+
1058
+ ```python
1059
+ results = await mesh.workflow("parallel-example", [
1060
+ {"id": "a", "agent": "worker", "task": "Task A"}, # Runs immediately
1061
+ {"id": "b", "agent": "worker", "task": "Task B"}, # Runs in parallel with A
1062
+ {"id": "c", "agent": "worker", "task": "Task C",
1063
+ "depends_on": ["a", "b"]}, # Waits for A and B
1064
+ ])
1065
+ ```
1066
+
1067
+ ---
1068
+
1069
+ ## API Reference
1070
+
1071
+ ### CustomAgent Class Attributes
1072
+
1073
+ | Attribute | Type | Required | Description |
1074
+ |-----------|------|----------|-------------|
1075
+ | `role` | `str` | Yes | Unique identifier for this agent type (e.g., `"researcher"`) |
1076
+ | `capabilities` | `list[str]` | Yes | List of capabilities for discovery (e.g., `["research", "analysis"]`) |
1077
+
1078
+ ### CustomAgent Methods
1079
+
1080
+ | Method | Mode | Description |
1081
+ |--------|------|-------------|
1082
+ | `setup()` | Both | Called once on startup. Initialize resources here. Always call `await super().setup()` |
1083
+ | `run()` | P2P | Main loop for continuous operation. Required for P2P mode |
1084
+ | `execute_task(task)` | Distributed | Handle a workflow task. Required for Distributed mode |
1085
+
1086
+ ### Why `execute_task()` is Required in P2P Mode
1087
+
1088
+ You may notice that P2P agents must implement `execute_task()` even though they primarily use `run()`. Here's why:
1089
+
1090
+ ```
1091
+ Agent (base class)
1092
+
1093
+ ├── @abstractmethod execute_task() ← Python REQUIRES this to be implemented
1094
+
1095
+ └── run() ← Optional, default does nothing
1096
+ ```
1097
+
1098
+ **The technical reason:**
1099
+
1100
+ 1. `Agent.execute_task()` is declared as `@abstractmethod` in `core/agent.py`
1101
+ 2. Python's ABC (Abstract Base Class) requires ALL abstract methods to be implemented
1102
+ 3. If you don't implement it, Python raises:
1103
+ ```
1104
+ TypeError: Can't instantiate abstract class MyAgent with abstract method execute_task
1105
+ ```
1106
+
1107
+ **The design reason:**
1108
+
1109
+ - **Unified interface**: All agents can be called via `execute_task()`, regardless of mode
1110
+ - **Flexibility**: A P2P agent can still participate in workflows if needed
1111
+ - **Testing**: You can test any agent by calling `execute_task()` directly
1112
+
1113
+ **What to put in it for P2P mode:**
1114
+
1115
+ ```python
1116
+ async def execute_task(self, task: dict) -> dict:
1117
+ """Minimal implementation - main logic is in run()."""
1118
+ return {"status": "success", "note": "This agent uses run() for P2P mode"}
1119
+ ```
1120
+
1121
+ ### Peer Tools (P2P Mode)
1122
+
1123
+ Access via `self.peers.as_tool().execute(tool_name, params)`:
1124
+
1125
+ | Tool | Parameters | Description |
1126
+ |------|------------|-------------|
1127
+ | `ask_peer` | `{"role": str, "question": str}` | Send a request to a peer by role and wait for response |
1128
+ | `broadcast` | `{"message": str}` | Send a message to all connected peers |
1129
+ | `list_peers` | `{}` | Get list of available peers and their capabilities |
1130
+
1131
+ ### Mesh Configuration
1132
+
1133
+ ```python
1134
+ mesh = Mesh(
1135
+ mode="p2p" | "distributed",
1136
+ config={
1137
+ "bind_host": "0.0.0.0", # IP to bind to (default: "127.0.0.1")
1138
+ "bind_port": 7950, # Port to listen on
1139
+ "node_name": "my-node", # Human-readable node name
1140
+ "seed_nodes": "ip:port,ip:port", # Comma-separated list of known nodes
1141
+ }
1142
+ )
1143
+ ```
1144
+
1145
+ ### Mesh Methods
1146
+
1147
+ | Method | Description |
1148
+ |--------|-------------|
1149
+ | `mesh.add(AgentClass)` | Register an agent class |
1150
+ | `mesh.start()` | Initialize and start all agents |
1151
+ | `mesh.stop()` | Gracefully shut down all agents |
1152
+ | `mesh.run_forever()` | Block until shutdown signal |
1153
+ | `mesh.serve_forever()` | Same as `run_forever()` |
1154
+ | `mesh.get_agent(role)` | Get agent instance by role |
1155
+ | `mesh.workflow(name, steps)` | Run a workflow (Distributed mode) |
1156
+
1157
+ ---
1158
+
1159
+ ## Multi-Node Deployment
1160
+
1161
+ Run agents across multiple machines. Nodes discover each other via seed nodes.
1162
+
1163
+ ### Machine 1: Research Node
1164
+
1165
+ ```python
1166
+ # research_node.py
1167
+ import asyncio
1168
+ from jarviscore import Mesh
1169
+ from agents import ResearcherAgent
1170
+
1171
+
1172
+ async def main():
1173
+ mesh = Mesh(
1174
+ mode="distributed",
1175
+ config={
1176
+ "bind_host": "0.0.0.0", # Accept connections from any IP
1177
+ "bind_port": 7950,
1178
+ "node_name": "research-node",
1179
+ }
1180
+ )
1181
+
1182
+ mesh.add(ResearcherAgent)
1183
+ await mesh.start()
1184
+
1185
+ print("Research node running on port 7950...")
1186
+ await mesh.serve_forever()
1187
+
1188
+
1189
+ if __name__ == "__main__":
1190
+ asyncio.run(main())
1191
+ ```
1192
+
1193
+ ### Machine 2: Writer Node + Orchestrator
1194
+
1195
+ ```python
1196
+ # writer_node.py
1197
+ import asyncio
1198
+ from jarviscore import Mesh
1199
+ from agents import WriterAgent
1200
+
1201
+
1202
+ async def main():
1203
+ mesh = Mesh(
1204
+ mode="distributed",
1205
+ config={
1206
+ "bind_host": "0.0.0.0",
1207
+ "bind_port": 7950,
1208
+ "node_name": "writer-node",
1209
+ "seed_nodes": "192.168.1.10:7950", # IP of research node
1210
+ }
1211
+ )
1212
+
1213
+ mesh.add(WriterAgent)
1214
+ await mesh.start()
1215
+
1216
+ # Wait for nodes to discover each other
1217
+ await asyncio.sleep(2)
1218
+
1219
+ # Run workflow - tasks automatically route to correct nodes
1220
+ results = await mesh.workflow("cross-node-pipeline", [
1221
+ {"id": "research", "agent": "researcher", "task": "AI trends"},
1222
+ {"id": "write", "agent": "writer", "task": "Write article",
1223
+ "depends_on": ["research"]},
1224
+ ])
1225
+
1226
+ print(results)
1227
+ await mesh.stop()
1228
+
1229
+
1230
+ if __name__ == "__main__":
1231
+ asyncio.run(main())
1232
+ ```
1233
+
1234
+ ### How Node Discovery Works
1235
+
1236
+ 1. On startup, nodes connect to seed nodes
1237
+ 2. Seed nodes share their known peers
1238
+ 3. Nodes exchange agent capability information
1239
+ 4. Workflows automatically route tasks to nodes with matching agents
1240
+
1241
+ ---
1242
+
1243
+ ## Error Handling
1244
+
1245
+ ### In P2P Mode
1246
+
1247
+ ```python
1248
+ async def run(self):
1249
+ while not self.shutdown_requested:
1250
+ try:
1251
+ if self.peers:
1252
+ msg = await self.peers.receive(timeout=0.5)
1253
+ if msg and msg.is_request:
1254
+ try:
1255
+ result = await self.process(msg.data)
1256
+ await self.peers.respond(msg, {"response": result})
1257
+ except Exception as e:
1258
+ await self.peers.respond(msg, {
1259
+ "error": str(e),
1260
+ "status": "failed"
1261
+ })
1262
+ except Exception as e:
1263
+ print(f"Error in run loop: {e}")
1264
+
1265
+ await asyncio.sleep(0.1)
1266
+ ```
1267
+
1268
+ ### In Distributed Mode
1269
+
1270
+ ```python
1271
+ async def execute_task(self, task: dict) -> dict:
1272
+ try:
1273
+ result = await self.do_work(task)
1274
+ return {
1275
+ "status": "success",
1276
+ "output": result
1277
+ }
1278
+ except ValueError as e:
1279
+ return {
1280
+ "status": "error",
1281
+ "error": f"Invalid input: {e}"
1282
+ }
1283
+ except Exception as e:
1284
+ return {
1285
+ "status": "error",
1286
+ "error": f"Unexpected error: {e}"
1287
+ }
1288
+ ```
1289
+
1290
+ ### Handling Missing Peers
1291
+
1292
+ ```python
1293
+ async def ask_researcher(self, question: str) -> str:
1294
+ if not self.peers:
1295
+ raise RuntimeError("Peer system not initialized")
1296
+
1297
+ try:
1298
+ response = await asyncio.wait_for(
1299
+ self.peers.as_tool().execute(
1300
+ "ask_peer",
1301
+ {"role": "researcher", "question": question}
1302
+ ),
1303
+ timeout=30.0 # 30 second timeout
1304
+ )
1305
+ return response.get("response", "")
1306
+ except asyncio.TimeoutError:
1307
+ raise RuntimeError("Researcher did not respond in time")
1308
+ except Exception as e:
1309
+ raise RuntimeError(f"Failed to contact researcher: {e}")
1310
+ ```
1311
+
1312
+ ---
1313
+
1314
+ ## Troubleshooting
1315
+
1316
+ ### Agent not receiving messages
1317
+
1318
+ **Problem**: `self.peers.receive()` always returns `None`
1319
+
1320
+ **Solutions**:
1321
+ 1. Ensure the sending agent is using the correct `role` in `ask_peer`
1322
+ 2. Check that both agents are registered with the mesh
1323
+ 3. Verify `await super().setup()` is called in your `setup()` method
1324
+ 4. Add logging to confirm your `run()` loop is executing
1325
+
1326
+ ### Workflow tasks not executing
1327
+
1328
+ **Problem**: `mesh.workflow()` hangs or returns empty results
1329
+
1330
+ **Solutions**:
1331
+ 1. Verify agent `role` matches the `agent` field in workflow steps
1332
+ 2. Check `execute_task()` returns a dict with `status` key
1333
+ 3. Ensure all `depends_on` step IDs exist in the workflow
1334
+ 4. Check for circular dependencies
1335
+
1336
+ ### Nodes not discovering each other
1337
+
1338
+ **Problem**: Multi-node setup, but workflows fail to find agents
1339
+
1340
+ **Solutions**:
1341
+ 1. Verify `seed_nodes` IP and port are correct
1342
+ 2. Check firewall allows connections on the bind port
1343
+ 3. Ensure `bind_host` is `"0.0.0.0"` (not `"127.0.0.1"`) for remote connections
1344
+ 4. Wait a few seconds after `mesh.start()` for discovery to complete
1345
+
1346
+ ### "Peer system not available" errors
1347
+
1348
+ **Problem**: `self.peers` is `None`
1349
+
1350
+ **Solutions**:
1351
+ 1. Only access `self.peers` after `setup()` completes
1352
+ 2. Check that mesh is started with `await mesh.start()`
1353
+ 3. Verify the agent was added with `mesh.add(AgentClass)`
1354
+
1355
+ ---
1356
+
1357
+ ## Examples
1358
+
1359
+ For complete, runnable examples, see:
1360
+
1361
+ - `examples/customagent_p2p_example.py` - P2P mode with peer communication
1362
+ - `examples/customagent_distributed_example.py` - Distributed mode with workflows