jarviscore-framework 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/customagent_p2p_example.py +566 -183
- jarviscore/__init__.py +1 -1
- jarviscore/data/examples/customagent_p2p_example.py +566 -183
- jarviscore/docs/API_REFERENCE.md +2 -2
- jarviscore/docs/CONFIGURATION.md +2 -2
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +1156 -209
- jarviscore/docs/GETTING_STARTED.md +1 -1
- jarviscore/docs/TROUBLESHOOTING.md +3 -3
- jarviscore/docs/USER_GUIDE.md +2 -2
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/METADATA +7 -6
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/RECORD +14 -14
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -1,248 +1,753 @@
|
|
|
1
1
|
# CustomAgent Guide
|
|
2
2
|
|
|
3
|
-
CustomAgent
|
|
3
|
+
CustomAgent lets you integrate your **existing agent code** with JarvisCore's networking and orchestration capabilities.
|
|
4
4
|
|
|
5
|
-
You keep
|
|
6
|
-
|
|
7
|
-
- Workflow orchestration (distributed mode)
|
|
8
|
-
- P2P peer tools (ask_peer, broadcast, etc.)
|
|
5
|
+
**You keep**: Your execution logic, LLM calls, and business logic.
|
|
6
|
+
**Framework provides**: Agent discovery, peer communication, workflow orchestration, and multi-node deployment.
|
|
9
7
|
|
|
10
8
|
---
|
|
11
9
|
|
|
12
|
-
##
|
|
10
|
+
## Table of Contents
|
|
13
11
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
12
|
+
1. [Prerequisites](#prerequisites)
|
|
13
|
+
2. [Choose Your Mode](#choose-your-mode)
|
|
14
|
+
3. [P2P Mode](#p2p-mode)
|
|
15
|
+
4. [Distributed Mode](#distributed-mode)
|
|
16
|
+
5. [API Reference](#api-reference)
|
|
17
|
+
6. [Multi-Node Deployment](#multi-node-deployment)
|
|
18
|
+
7. [Error Handling](#error-handling)
|
|
19
|
+
8. [Troubleshooting](#troubleshooting)
|
|
18
20
|
|
|
19
21
|
---
|
|
20
22
|
|
|
21
|
-
##
|
|
23
|
+
## Prerequisites
|
|
22
24
|
|
|
23
|
-
###
|
|
25
|
+
### Installation
|
|
24
26
|
|
|
25
|
-
|
|
27
|
+
```bash
|
|
28
|
+
pip install jarviscore-framework
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
### Your LLM Client
|
|
32
|
+
|
|
33
|
+
Throughout this guide, we use `MyLLMClient()` as a placeholder for your LLM. Replace it with your actual client:
|
|
26
34
|
|
|
27
35
|
```python
|
|
28
|
-
#
|
|
29
|
-
|
|
30
|
-
|
|
36
|
+
# Example: OpenAI
|
|
37
|
+
from openai import OpenAI
|
|
38
|
+
client = OpenAI()
|
|
39
|
+
|
|
40
|
+
def chat(prompt: str) -> str:
|
|
41
|
+
response = client.chat.completions.create(
|
|
42
|
+
model="gpt-4",
|
|
43
|
+
messages=[{"role": "user", "content": prompt}]
|
|
44
|
+
)
|
|
45
|
+
return response.choices[0].message.content
|
|
31
46
|
|
|
32
|
-
|
|
33
|
-
|
|
47
|
+
# Example: Anthropic
|
|
48
|
+
from anthropic import Anthropic
|
|
49
|
+
client = Anthropic()
|
|
34
50
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
51
|
+
def chat(prompt: str) -> str:
|
|
52
|
+
response = client.messages.create(
|
|
53
|
+
model="claude-3-sonnet-20240229",
|
|
54
|
+
max_tokens=1024,
|
|
55
|
+
messages=[{"role": "user", "content": prompt}]
|
|
56
|
+
)
|
|
57
|
+
return response.content[0].text
|
|
38
58
|
|
|
59
|
+
# Example: Local/Custom
|
|
60
|
+
class MyLLMClient:
|
|
61
|
+
def chat(self, prompt: str) -> str:
|
|
62
|
+
# Your implementation
|
|
63
|
+
return "response"
|
|
64
|
+
```
|
|
39
65
|
|
|
40
|
-
|
|
41
|
-
class StandaloneAssistant:
|
|
42
|
-
"""Your existing assistant that needs researcher help."""
|
|
66
|
+
---
|
|
43
67
|
|
|
44
|
-
|
|
45
|
-
self.researcher = researcher # Direct reference
|
|
46
|
-
self.llm = MyLLMClient()
|
|
68
|
+
## Choose Your Mode
|
|
47
69
|
|
|
48
|
-
def help(self, question: str) -> str:
|
|
49
|
-
# Directly calls researcher
|
|
50
|
-
research = self.researcher.research(question)
|
|
51
|
-
return self.llm.chat(f"Based on: {research}\nAnswer: {question}")
|
|
52
70
|
```
|
|
71
|
+
┌─────────────────────────────────────────────────────────────┐
|
|
72
|
+
│ Which mode should I use? │
|
|
73
|
+
└─────────────────────────────────────────────────────────────┘
|
|
74
|
+
│
|
|
75
|
+
▼
|
|
76
|
+
┌───────────────────────────────┐
|
|
77
|
+
│ Do agents need to coordinate │
|
|
78
|
+
│ continuously in real-time? │
|
|
79
|
+
└───────────────────────────────┘
|
|
80
|
+
│ │
|
|
81
|
+
YES NO
|
|
82
|
+
│ │
|
|
83
|
+
▼ ▼
|
|
84
|
+
┌──────────┐ ┌───────────────────────┐
|
|
85
|
+
│ P2P Mode │ │ Do you have task │
|
|
86
|
+
└──────────┘ │ pipelines with │
|
|
87
|
+
│ dependencies? │
|
|
88
|
+
└───────────────────────┘
|
|
89
|
+
│ │
|
|
90
|
+
YES NO
|
|
91
|
+
│ │
|
|
92
|
+
▼ ▼
|
|
93
|
+
┌────────────┐ ┌──────────┐
|
|
94
|
+
│Distributed │ │ P2P Mode │
|
|
95
|
+
│ Mode │ └──────────┘
|
|
96
|
+
└────────────┘
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Quick Comparison
|
|
53
100
|
|
|
54
|
-
|
|
101
|
+
| Feature | P2P Mode | Distributed Mode |
|
|
102
|
+
|---------|----------|------------------|
|
|
103
|
+
| **Primary method** | `run()` - continuous loop | `execute_task()` - on-demand |
|
|
104
|
+
| **Communication** | Direct peer messaging | Workflow orchestration |
|
|
105
|
+
| **Best for** | Chatbots, real-time agents | Pipelines, batch processing |
|
|
106
|
+
| **Coordination** | Agents self-coordinate | Framework coordinates |
|
|
107
|
+
| **Supports workflows** | No | Yes |
|
|
55
108
|
|
|
56
109
|
---
|
|
57
110
|
|
|
58
|
-
|
|
111
|
+
## P2P Mode
|
|
59
112
|
|
|
60
|
-
|
|
113
|
+
P2P mode is for agents that run continuously and communicate directly with each other.
|
|
114
|
+
|
|
115
|
+
### Migration Overview
|
|
116
|
+
|
|
117
|
+
```
|
|
118
|
+
YOUR PROJECT STRUCTURE
|
|
119
|
+
──────────────────────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
BEFORE (standalone): AFTER (with JarvisCore):
|
|
122
|
+
├── my_agent.py ├── agents.py ← Modified agent code
|
|
123
|
+
└── (run directly) └── main.py ← NEW entry point
|
|
124
|
+
▲
|
|
125
|
+
│
|
|
126
|
+
This is now how you
|
|
127
|
+
start your agents
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Step 1: Install the Framework
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
pip install jarviscore-framework
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Step 2: Your Existing Code (Before)
|
|
137
|
+
|
|
138
|
+
Let's say you have a standalone agent like this:
|
|
61
139
|
|
|
62
140
|
```python
|
|
63
|
-
#
|
|
141
|
+
# my_agent.py (YOUR EXISTING CODE)
|
|
142
|
+
class MyResearcher:
|
|
143
|
+
"""Your existing agent - runs standalone."""
|
|
144
|
+
|
|
145
|
+
def __init__(self):
|
|
146
|
+
self.llm = MyLLMClient()
|
|
147
|
+
|
|
148
|
+
def research(self, query: str) -> str:
|
|
149
|
+
return self.llm.chat(f"Research: {query}")
|
|
150
|
+
|
|
151
|
+
# You currently run it directly:
|
|
152
|
+
if __name__ == "__main__":
|
|
153
|
+
agent = MyResearcher()
|
|
154
|
+
result = agent.research("What is AI?")
|
|
155
|
+
print(result)
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Step 3: Modify Your Agent Code → `agents.py`
|
|
159
|
+
|
|
160
|
+
Convert your existing class to inherit from `CustomAgent`:
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
# agents.py (MODIFIED VERSION OF YOUR CODE)
|
|
164
|
+
import asyncio
|
|
64
165
|
from jarviscore.profiles import CustomAgent
|
|
65
166
|
|
|
167
|
+
|
|
66
168
|
class ResearcherAgent(CustomAgent):
|
|
67
|
-
"""
|
|
169
|
+
"""Your agent, now framework-integrated."""
|
|
170
|
+
|
|
171
|
+
# NEW: Required class attributes for discovery
|
|
68
172
|
role = "researcher"
|
|
69
173
|
capabilities = ["research", "analysis"]
|
|
70
174
|
|
|
71
175
|
async def setup(self):
|
|
176
|
+
"""NEW: Called once on startup. Move your __init__ logic here."""
|
|
72
177
|
await super().setup()
|
|
73
|
-
self.llm = MyLLMClient() # Your existing
|
|
178
|
+
self.llm = MyLLMClient() # Your existing initialization
|
|
74
179
|
|
|
75
180
|
async def run(self):
|
|
76
|
-
"""
|
|
181
|
+
"""NEW: Main loop - replaces your if __name__ == '__main__' block."""
|
|
77
182
|
while not self.shutdown_requested:
|
|
78
183
|
if self.peers:
|
|
79
184
|
msg = await self.peers.receive(timeout=0.5)
|
|
80
185
|
if msg and msg.is_request:
|
|
81
186
|
query = msg.data.get("question", "")
|
|
82
|
-
#
|
|
187
|
+
# YOUR EXISTING LOGIC:
|
|
83
188
|
result = self.llm.chat(f"Research: {query}")
|
|
84
189
|
await self.peers.respond(msg, {"response": result})
|
|
85
|
-
|
|
86
|
-
|
|
190
|
+
await asyncio.sleep(0.1)
|
|
191
|
+
|
|
192
|
+
async def execute_task(self, task: dict) -> dict:
|
|
193
|
+
"""
|
|
194
|
+
Required by base Agent class (@abstractmethod).
|
|
195
|
+
|
|
196
|
+
In P2P mode, your main logic lives in run(), not here.
|
|
197
|
+
This must exist because Python requires all abstract methods
|
|
198
|
+
to be implemented, or you get TypeError on instantiation.
|
|
199
|
+
"""
|
|
200
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
**What changed:**
|
|
204
|
+
|
|
205
|
+
| Before | After |
|
|
206
|
+
|--------|-------|
|
|
207
|
+
| `class MyResearcher:` | `class ResearcherAgent(CustomAgent):` |
|
|
208
|
+
| `def __init__(self):` | `async def setup(self):` + `await super().setup()` |
|
|
209
|
+
| `if __name__ == "__main__":` | `async def run(self):` loop |
|
|
210
|
+
| Direct method calls | Peer message handling |
|
|
211
|
+
|
|
212
|
+
> **Note**: This is a minimal example. For the full pattern with **LLM-driven peer communication** (where your LLM autonomously decides when to call other agents), see the [Complete Example](#complete-example-llm-driven-peer-communication) below.
|
|
213
|
+
|
|
214
|
+
### Step 4: Create New Entry Point → `main.py`
|
|
215
|
+
|
|
216
|
+
**This is your NEW main file.** Instead of running `python my_agent.py`, you'll run `python main.py`.
|
|
217
|
+
|
|
218
|
+
```python
|
|
219
|
+
# main.py (NEW FILE - YOUR NEW ENTRY POINT)
|
|
220
|
+
import asyncio
|
|
221
|
+
from jarviscore import Mesh
|
|
222
|
+
from agents import ResearcherAgent
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
async def main():
|
|
226
|
+
# Create the mesh network
|
|
227
|
+
mesh = Mesh(
|
|
228
|
+
mode="p2p",
|
|
229
|
+
config={
|
|
230
|
+
"bind_port": 7950, # Port for P2P communication
|
|
231
|
+
"node_name": "my-node", # Identifies this node in the network
|
|
232
|
+
}
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Register your agent(s)
|
|
236
|
+
mesh.add(ResearcherAgent)
|
|
237
|
+
|
|
238
|
+
# Start the mesh (calls setup() on all agents)
|
|
239
|
+
await mesh.start()
|
|
240
|
+
|
|
241
|
+
# Run forever - agents handle their own work in run() loops
|
|
242
|
+
await mesh.run_forever()
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
if __name__ == "__main__":
|
|
246
|
+
asyncio.run(main())
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
**Why a new entry file?**
|
|
250
|
+
|
|
251
|
+
| Reason | Explanation |
|
|
252
|
+
|--------|-------------|
|
|
253
|
+
| **Mesh setup** | The Mesh handles networking, discovery, and lifecycle |
|
|
254
|
+
| **Multiple agents** | You can add many agents to one mesh |
|
|
255
|
+
| **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
|
|
256
|
+
| **Standard pattern** | Consistent entry point across all JarvisCore projects |
|
|
257
|
+
|
|
258
|
+
### Step 5: Run Your Agents
|
|
259
|
+
|
|
260
|
+
```bash
|
|
261
|
+
# OLD WAY (no longer used):
|
|
262
|
+
# python my_agent.py
|
|
263
|
+
|
|
264
|
+
# NEW WAY:
|
|
265
|
+
python main.py
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
---
|
|
269
|
+
|
|
270
|
+
### Complete Example: LLM-Driven Peer Communication
|
|
271
|
+
|
|
272
|
+
This is the **key pattern** for P2P mode. Your LLM gets peer tools added to its toolset, and it **autonomously decides** when to ask other agents for help.
|
|
273
|
+
|
|
274
|
+
```
|
|
275
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
276
|
+
│ LLM-DRIVEN PEER COMMUNICATION │
|
|
277
|
+
├─────────────────────────────────────────────────────────────────┤
|
|
278
|
+
│ │
|
|
279
|
+
│ User: "Analyze this sales data" │
|
|
280
|
+
│ │ │
|
|
281
|
+
│ ▼ │
|
|
282
|
+
│ ┌─────────────────────────────────────┐ │
|
|
283
|
+
│ │ ASSISTANT'S LLM │ │
|
|
284
|
+
│ │ │ │
|
|
285
|
+
│ │ Tools available: │ │
|
|
286
|
+
│ │ - web_search (local) │ │
|
|
287
|
+
│ │ - ask_peer (peer) ◄── NEW! │ │
|
|
288
|
+
│ │ - broadcast (peer) ◄── NEW! │ │
|
|
289
|
+
│ │ │ │
|
|
290
|
+
│ │ LLM decides: "I need analysis │ │
|
|
291
|
+
│ │ help, let me ask the analyst" │ │
|
|
292
|
+
│ └─────────────────────────────────────┘ │
|
|
293
|
+
│ │ │
|
|
294
|
+
│ ▼ uses ask_peer tool │
|
|
295
|
+
│ ┌─────────────────────────────────────┐ │
|
|
296
|
+
│ │ ANALYST AGENT │ │
|
|
297
|
+
│ │ (processes with its own LLM) │ │
|
|
298
|
+
│ └─────────────────────────────────────┘ │
|
|
299
|
+
│ │ │
|
|
300
|
+
│ ▼ returns analysis │
|
|
301
|
+
│ ┌─────────────────────────────────────┐ │
|
|
302
|
+
│ │ ASSISTANT'S LLM │ │
|
|
303
|
+
│ │ "Based on the analyst's findings, │ │
|
|
304
|
+
│ │ here's your answer..." │ │
|
|
305
|
+
│ └─────────────────────────────────────┘ │
|
|
306
|
+
│ │
|
|
307
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
308
|
+
```
|
|
309
|
+
|
|
310
|
+
**The key insight**: You add peer tools to your LLM's toolset. The LLM decides when to use them.
|
|
311
|
+
|
|
312
|
+
```python
|
|
313
|
+
# agents.py
|
|
314
|
+
import asyncio
|
|
315
|
+
from jarviscore.profiles import CustomAgent
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
class AnalystAgent(CustomAgent):
|
|
319
|
+
"""
|
|
320
|
+
Analyst agent - specialists in data analysis.
|
|
321
|
+
|
|
322
|
+
This agent:
|
|
323
|
+
1. Listens for incoming requests from peers
|
|
324
|
+
2. Processes requests using its own LLM
|
|
325
|
+
3. Responds with analysis results
|
|
326
|
+
"""
|
|
327
|
+
role = "analyst"
|
|
328
|
+
capabilities = ["analysis", "data_interpretation", "reporting"]
|
|
329
|
+
|
|
330
|
+
async def setup(self):
|
|
331
|
+
await super().setup()
|
|
332
|
+
self.llm = MyLLMClient() # Your LLM client
|
|
333
|
+
|
|
334
|
+
def get_tools(self) -> list:
|
|
335
|
+
"""
|
|
336
|
+
Tools available to THIS agent's LLM.
|
|
337
|
+
|
|
338
|
+
The analyst has local analysis tools.
|
|
339
|
+
It can also ask other peers if needed.
|
|
340
|
+
"""
|
|
341
|
+
tools = [
|
|
342
|
+
{
|
|
343
|
+
"name": "statistical_analysis",
|
|
344
|
+
"description": "Run statistical analysis on numeric data",
|
|
345
|
+
"input_schema": {
|
|
346
|
+
"type": "object",
|
|
347
|
+
"properties": {
|
|
348
|
+
"data": {"type": "string", "description": "Data to analyze"}
|
|
349
|
+
},
|
|
350
|
+
"required": ["data"]
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
# ADD PEER TOOLS - so LLM can ask other agents if needed
|
|
356
|
+
if self.peers:
|
|
357
|
+
tools.extend(self.peers.as_tool().schema)
|
|
358
|
+
|
|
359
|
+
return tools
|
|
360
|
+
|
|
361
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
362
|
+
"""
|
|
363
|
+
Execute a tool by name.
|
|
364
|
+
|
|
365
|
+
Routes to peer tools or local tools as appropriate.
|
|
366
|
+
"""
|
|
367
|
+
# PEER TOOLS - check and execute
|
|
368
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
369
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
370
|
+
|
|
371
|
+
# LOCAL TOOLS
|
|
372
|
+
if tool_name == "statistical_analysis":
|
|
373
|
+
data = args.get("data", "")
|
|
374
|
+
return f"Analysis of '{data}': mean=150.3, std=23.4, trend=positive"
|
|
375
|
+
|
|
376
|
+
return f"Unknown tool: {tool_name}"
|
|
377
|
+
|
|
378
|
+
async def process_with_llm(self, query: str) -> str:
|
|
379
|
+
"""Process a request using LLM with tools."""
|
|
380
|
+
system_prompt = """You are an expert data analyst.
|
|
381
|
+
You have tools for statistical analysis.
|
|
382
|
+
Analyze data thoroughly and provide insights."""
|
|
383
|
+
|
|
384
|
+
tools = self.get_tools()
|
|
385
|
+
messages = [{"role": "user", "content": query}]
|
|
386
|
+
|
|
387
|
+
# Call LLM with tools
|
|
388
|
+
response = self.llm.chat(messages, tools=tools, system=system_prompt)
|
|
389
|
+
|
|
390
|
+
# Handle tool use if LLM decides to use a tool
|
|
391
|
+
if response.get("type") == "tool_use":
|
|
392
|
+
tool_result = await self.execute_tool(
|
|
393
|
+
response["tool_name"],
|
|
394
|
+
response["tool_args"]
|
|
395
|
+
)
|
|
396
|
+
# Continue conversation with tool result
|
|
397
|
+
response = self.llm.continue_with_tool_result(
|
|
398
|
+
messages, response["tool_use_id"], tool_result
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
return response.get("content", "Analysis complete.")
|
|
402
|
+
|
|
403
|
+
async def run(self):
|
|
404
|
+
"""Listen for incoming requests from peers."""
|
|
405
|
+
while not self.shutdown_requested:
|
|
406
|
+
if self.peers:
|
|
407
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
408
|
+
if msg and msg.is_request:
|
|
409
|
+
query = msg.data.get("question", msg.data.get("query", ""))
|
|
410
|
+
|
|
411
|
+
# Process with LLM
|
|
412
|
+
result = await self.process_with_llm(query)
|
|
413
|
+
|
|
414
|
+
await self.peers.respond(msg, {"response": result})
|
|
415
|
+
await asyncio.sleep(0.1)
|
|
87
416
|
|
|
88
|
-
async def execute_task(self, task):
|
|
89
|
-
|
|
417
|
+
async def execute_task(self, task: dict) -> dict:
|
|
418
|
+
"""Required by base class."""
|
|
419
|
+
return {"status": "success"}
|
|
90
420
|
|
|
91
421
|
|
|
92
422
|
class AssistantAgent(CustomAgent):
|
|
93
|
-
"""
|
|
423
|
+
"""
|
|
424
|
+
Assistant agent - coordinates with other specialists.
|
|
425
|
+
|
|
426
|
+
This agent:
|
|
427
|
+
1. Has its own LLM for reasoning
|
|
428
|
+
2. Has peer tools (ask_peer, broadcast) in its toolset
|
|
429
|
+
3. LLM AUTONOMOUSLY decides when to ask other agents
|
|
430
|
+
"""
|
|
94
431
|
role = "assistant"
|
|
95
|
-
capabilities = ["
|
|
432
|
+
capabilities = ["chat", "coordination", "search"]
|
|
96
433
|
|
|
97
434
|
async def setup(self):
|
|
98
435
|
await super().setup()
|
|
99
|
-
self.llm = MyLLMClient()
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
436
|
+
self.llm = MyLLMClient() # Your LLM client
|
|
437
|
+
self.tool_calls = [] # Track tool usage
|
|
438
|
+
|
|
439
|
+
def get_tools(self) -> list:
|
|
440
|
+
"""
|
|
441
|
+
Tools available to THIS agent's LLM.
|
|
442
|
+
|
|
443
|
+
IMPORTANT: This includes PEER TOOLS!
|
|
444
|
+
The LLM sees ask_peer, broadcast_update, list_peers
|
|
445
|
+
and decides when to use them.
|
|
446
|
+
"""
|
|
447
|
+
# Local tools
|
|
448
|
+
tools = [
|
|
449
|
+
{
|
|
450
|
+
"name": "web_search",
|
|
451
|
+
"description": "Search the web for information",
|
|
452
|
+
"input_schema": {
|
|
453
|
+
"type": "object",
|
|
454
|
+
"properties": {
|
|
455
|
+
"query": {"type": "string", "description": "Search query"}
|
|
456
|
+
},
|
|
457
|
+
"required": ["query"]
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
]
|
|
461
|
+
|
|
462
|
+
# ADD PEER TOOLS TO LLM'S TOOLSET
|
|
463
|
+
# This is the key! LLM will see:
|
|
464
|
+
# - ask_peer: Ask another agent for help
|
|
465
|
+
# - broadcast_update: Send message to all peers
|
|
466
|
+
# - list_peers: See available agents
|
|
103
467
|
if self.peers:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
468
|
+
tools.extend(self.peers.as_tool().schema)
|
|
469
|
+
|
|
470
|
+
return tools
|
|
471
|
+
|
|
472
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
473
|
+
"""
|
|
474
|
+
Execute a tool by name.
|
|
475
|
+
|
|
476
|
+
When LLM calls ask_peer, this routes to the peer system.
|
|
477
|
+
"""
|
|
478
|
+
self.tool_calls.append({"tool": tool_name, "args": args})
|
|
479
|
+
|
|
480
|
+
# PEER TOOLS - route to peer system
|
|
481
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
482
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
483
|
+
|
|
484
|
+
# LOCAL TOOLS
|
|
485
|
+
if tool_name == "web_search":
|
|
486
|
+
return f"Search results for '{args.get('query')}': Found 10 articles."
|
|
487
|
+
|
|
488
|
+
return f"Unknown tool: {tool_name}"
|
|
489
|
+
|
|
490
|
+
async def chat(self, user_message: str) -> str:
|
|
491
|
+
"""
|
|
492
|
+
Complete LLM chat with autonomous tool use.
|
|
493
|
+
|
|
494
|
+
The LLM sees all tools (including peer tools) and decides
|
|
495
|
+
which to use. If user asks for analysis, LLM will use
|
|
496
|
+
ask_peer to contact the analyst.
|
|
497
|
+
"""
|
|
498
|
+
# System prompt tells LLM about its capabilities
|
|
499
|
+
system_prompt = """You are a helpful assistant.
|
|
500
|
+
|
|
501
|
+
You have access to these capabilities:
|
|
502
|
+
- web_search: Search the web for information
|
|
503
|
+
- ask_peer: Ask specialist agents for help (e.g., analyst for data analysis)
|
|
504
|
+
- broadcast_update: Send updates to all connected agents
|
|
505
|
+
- list_peers: See what other agents are available
|
|
506
|
+
|
|
507
|
+
When a user needs data analysis, USE ask_peer to ask the analyst.
|
|
508
|
+
When a user needs web information, USE web_search.
|
|
509
|
+
Be concise in your responses."""
|
|
510
|
+
|
|
511
|
+
tools = self.get_tools()
|
|
512
|
+
messages = [{"role": "user", "content": user_message}]
|
|
513
|
+
|
|
514
|
+
# Call LLM - it will decide which tools to use
|
|
515
|
+
response = self.llm.chat(messages, tools=tools, system=system_prompt)
|
|
516
|
+
|
|
517
|
+
# Handle tool use loop
|
|
518
|
+
while response.get("type") == "tool_use":
|
|
519
|
+
tool_name = response["tool_name"]
|
|
520
|
+
tool_args = response["tool_args"]
|
|
521
|
+
|
|
522
|
+
# Execute the tool (might be ask_peer!)
|
|
523
|
+
tool_result = await self.execute_tool(tool_name, tool_args)
|
|
524
|
+
|
|
525
|
+
# Continue conversation with tool result
|
|
526
|
+
response = self.llm.continue_with_tool_result(
|
|
527
|
+
messages, response["tool_use_id"], tool_result, tools
|
|
107
528
|
)
|
|
108
|
-
return "No researcher available"
|
|
109
529
|
|
|
110
|
-
|
|
111
|
-
"""Your existing logic, now uses peer communication."""
|
|
112
|
-
research = await self.ask_researcher(question)
|
|
113
|
-
return self.llm.chat(f"Based on: {research}\nAnswer: {question}")
|
|
530
|
+
return response.get("content", "")
|
|
114
531
|
|
|
115
532
|
async def run(self):
|
|
116
|
-
"""
|
|
533
|
+
"""Main loop - listen for incoming requests."""
|
|
117
534
|
while not self.shutdown_requested:
|
|
118
|
-
|
|
535
|
+
if self.peers:
|
|
536
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
537
|
+
if msg and msg.is_request:
|
|
538
|
+
query = msg.data.get("query", "")
|
|
539
|
+
result = await self.chat(query)
|
|
540
|
+
await self.peers.respond(msg, {"response": result})
|
|
119
541
|
await asyncio.sleep(0.1)
|
|
120
542
|
|
|
121
|
-
async def execute_task(self, task):
|
|
543
|
+
async def execute_task(self, task: dict) -> dict:
|
|
544
|
+
"""Required by base class."""
|
|
122
545
|
return {"status": "success"}
|
|
123
546
|
```
|
|
124
547
|
|
|
125
|
-
**Step 2: Create `main.py`** - Run with mesh
|
|
126
|
-
|
|
127
548
|
```python
|
|
128
549
|
# main.py
|
|
129
550
|
import asyncio
|
|
130
551
|
from jarviscore import Mesh
|
|
131
|
-
from agents import
|
|
552
|
+
from agents import AnalystAgent, AssistantAgent
|
|
553
|
+
|
|
132
554
|
|
|
133
555
|
async def main():
|
|
134
556
|
mesh = Mesh(
|
|
135
557
|
mode="p2p",
|
|
136
558
|
config={
|
|
137
|
-
|
|
138
|
-
|
|
559
|
+
"bind_port": 7950,
|
|
560
|
+
"node_name": "my-agents",
|
|
139
561
|
}
|
|
140
562
|
)
|
|
141
563
|
|
|
142
|
-
|
|
143
|
-
mesh.add(
|
|
564
|
+
# Add both agents
|
|
565
|
+
mesh.add(AnalystAgent)
|
|
566
|
+
assistant = mesh.add(AssistantAgent)
|
|
144
567
|
|
|
145
568
|
await mesh.start()
|
|
146
569
|
|
|
147
|
-
#
|
|
148
|
-
|
|
570
|
+
# Start analyst listening in background
|
|
571
|
+
analyst = mesh.get_agent("analyst")
|
|
572
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
149
573
|
|
|
150
|
-
#
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
574
|
+
# Give time for setup
|
|
575
|
+
await asyncio.sleep(0.5)
|
|
576
|
+
|
|
577
|
+
# User asks a question - LLM will autonomously decide to use ask_peer
|
|
578
|
+
print("User: Please analyze the Q4 sales trends")
|
|
579
|
+
response = await assistant.chat("Please analyze the Q4 sales trends")
|
|
580
|
+
print(f"Assistant: {response}")
|
|
581
|
+
|
|
582
|
+
# Check what tools were used
|
|
583
|
+
print(f"\nTools used: {assistant.tool_calls}")
|
|
584
|
+
# Output: [{'tool': 'ask_peer', 'args': {'role': 'analyst', 'question': '...'}}]
|
|
154
585
|
|
|
586
|
+
# Cleanup
|
|
587
|
+
analyst.request_shutdown()
|
|
588
|
+
analyst_task.cancel()
|
|
155
589
|
await mesh.stop()
|
|
156
590
|
|
|
157
|
-
|
|
591
|
+
|
|
592
|
+
if __name__ == "__main__":
|
|
593
|
+
asyncio.run(main())
|
|
158
594
|
```
|
|
159
595
|
|
|
160
|
-
###
|
|
596
|
+
### Key Concepts for P2P Mode
|
|
161
597
|
|
|
162
|
-
|
|
163
|
-
|--------|-------|
|
|
164
|
-
| Direct object reference | `self.peers.as_tool().execute("ask_peer", ...)` |
|
|
165
|
-
| Tightly coupled | Loosely coupled via peer discovery |
|
|
166
|
-
| Same process only | Can run on different machines |
|
|
167
|
-
| No discovery | Automatic agent discovery |
|
|
598
|
+
#### Adding Peer Tools to Your LLM
|
|
168
599
|
|
|
169
|
-
|
|
600
|
+
This is the most important pattern. Add peer tools to `get_tools()`:
|
|
170
601
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
602
|
+
```python
|
|
603
|
+
def get_tools(self) -> list:
|
|
604
|
+
tools = [
|
|
605
|
+
# Your local tools...
|
|
606
|
+
]
|
|
175
607
|
|
|
176
|
-
|
|
608
|
+
# ADD PEER TOOLS - LLM will see ask_peer, broadcast, list_peers
|
|
609
|
+
if self.peers:
|
|
610
|
+
tools.extend(self.peers.as_tool().schema)
|
|
177
611
|
|
|
178
|
-
|
|
612
|
+
return tools
|
|
613
|
+
```
|
|
179
614
|
|
|
180
|
-
|
|
615
|
+
#### Routing Tool Execution
|
|
181
616
|
|
|
182
|
-
|
|
617
|
+
Route tool calls to either peer tools or local tools:
|
|
183
618
|
|
|
184
619
|
```python
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
self.
|
|
620
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
621
|
+
# Check peer tools first
|
|
622
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
623
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
189
624
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
return
|
|
625
|
+
# Then local tools
|
|
626
|
+
if tool_name == "my_local_tool":
|
|
627
|
+
return self.my_local_tool(args)
|
|
193
628
|
|
|
629
|
+
return f"Unknown tool: {tool_name}"
|
|
630
|
+
```
|
|
194
631
|
|
|
195
|
-
|
|
196
|
-
def __init__(self):
|
|
197
|
-
self.llm = MyLLMClient()
|
|
632
|
+
#### System Prompt for Peer Awareness
|
|
198
633
|
|
|
199
|
-
|
|
200
|
-
prompt = task
|
|
201
|
-
if context:
|
|
202
|
-
prompt = f"Based on: {context}\n\n{task}"
|
|
203
|
-
result = self.llm.chat(prompt)
|
|
204
|
-
return {"output": result}
|
|
634
|
+
Tell the LLM about peer capabilities:
|
|
205
635
|
|
|
636
|
+
```python
|
|
637
|
+
system_prompt = """You are a helpful assistant.
|
|
206
638
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
writer = StandaloneWriter()
|
|
639
|
+
You have access to:
|
|
640
|
+
- ask_peer: Ask specialist agents for help
|
|
641
|
+
- broadcast_update: Send updates to all agents
|
|
211
642
|
|
|
212
|
-
|
|
213
|
-
|
|
643
|
+
When a user needs specialized help, USE ask_peer to contact the right agent."""
|
|
644
|
+
```
|
|
214
645
|
|
|
215
|
-
|
|
216
|
-
article = writer.execute("Write article", context=research["output"])
|
|
646
|
+
#### The `run()` Loop
|
|
217
647
|
|
|
218
|
-
|
|
219
|
-
```
|
|
648
|
+
Listen for incoming requests and process with LLM:
|
|
220
649
|
|
|
221
|
-
|
|
650
|
+
```python
|
|
651
|
+
async def run(self):
|
|
652
|
+
while not self.shutdown_requested:
|
|
653
|
+
if self.peers:
|
|
654
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
655
|
+
if msg and msg.is_request:
|
|
656
|
+
result = await self.process_with_llm(msg.data)
|
|
657
|
+
await self.peers.respond(msg, {"response": result})
|
|
658
|
+
await asyncio.sleep(0.1)
|
|
659
|
+
```
|
|
222
660
|
|
|
223
661
|
---
|
|
224
662
|
|
|
225
|
-
|
|
663
|
+
## Distributed Mode
|
|
664
|
+
|
|
665
|
+
Distributed mode is for task pipelines where the framework orchestrates execution order and passes data between steps.
|
|
666
|
+
|
|
667
|
+
### Migration Overview
|
|
226
668
|
|
|
227
|
-
|
|
669
|
+
```
|
|
670
|
+
YOUR PROJECT STRUCTURE
|
|
671
|
+
──────────────────────────────────────────────────────────────────
|
|
672
|
+
|
|
673
|
+
BEFORE (standalone): AFTER (with JarvisCore):
|
|
674
|
+
├── pipeline.py ├── agents.py ← Modified agent code
|
|
675
|
+
└── (manual orchestration) └── main.py ← NEW entry point
|
|
676
|
+
▲
|
|
677
|
+
│
|
|
678
|
+
This is now how you
|
|
679
|
+
start your pipeline
|
|
680
|
+
```
|
|
681
|
+
|
|
682
|
+
### Step 1: Install the Framework
|
|
683
|
+
|
|
684
|
+
```bash
|
|
685
|
+
pip install jarviscore-framework
|
|
686
|
+
```
|
|
687
|
+
|
|
688
|
+
### Step 2: Your Existing Code (Before)
|
|
689
|
+
|
|
690
|
+
Let's say you have a manual pipeline like this:
|
|
228
691
|
|
|
229
692
|
```python
|
|
230
|
-
#
|
|
693
|
+
# pipeline.py (YOUR EXISTING CODE)
|
|
694
|
+
class Researcher:
|
|
695
|
+
def execute(self, task: str) -> dict:
|
|
696
|
+
return {"output": f"Research on: {task}"}
|
|
697
|
+
|
|
698
|
+
class Writer:
|
|
699
|
+
def execute(self, task: str, context: dict = None) -> dict:
|
|
700
|
+
return {"output": f"Article based on: {context}"}
|
|
701
|
+
|
|
702
|
+
# Manual orchestration - you pass data between steps yourself:
|
|
703
|
+
if __name__ == "__main__":
|
|
704
|
+
researcher = Researcher()
|
|
705
|
+
writer = Writer()
|
|
706
|
+
|
|
707
|
+
research = researcher.execute("AI trends")
|
|
708
|
+
article = writer.execute("Write article", context=research) # Manual!
|
|
709
|
+
print(article)
|
|
710
|
+
```
|
|
711
|
+
|
|
712
|
+
**Problems with this approach:**
|
|
713
|
+
- You manually pass context between steps
|
|
714
|
+
- No dependency management
|
|
715
|
+
- Hard to run on multiple machines
|
|
716
|
+
- No automatic retries on failure
|
|
717
|
+
|
|
718
|
+
### Step 3: Modify Your Agent Code → `agents.py`
|
|
719
|
+
|
|
720
|
+
Convert your existing classes to inherit from `CustomAgent`:
|
|
721
|
+
|
|
722
|
+
```python
|
|
723
|
+
# agents.py (MODIFIED VERSION OF YOUR CODE)
|
|
231
724
|
from jarviscore.profiles import CustomAgent
|
|
232
725
|
|
|
726
|
+
|
|
233
727
|
class ResearcherAgent(CustomAgent):
|
|
728
|
+
"""Your researcher, now framework-integrated."""
|
|
729
|
+
|
|
730
|
+
# NEW: Required class attributes
|
|
234
731
|
role = "researcher"
|
|
235
732
|
capabilities = ["research"]
|
|
236
733
|
|
|
237
734
|
async def setup(self):
|
|
735
|
+
"""NEW: Called once on startup."""
|
|
238
736
|
await super().setup()
|
|
239
|
-
|
|
737
|
+
# Your initialization here (DB connections, LLM clients, etc.)
|
|
738
|
+
|
|
739
|
+
async def execute_task(self, task: dict) -> dict:
|
|
740
|
+
"""
|
|
741
|
+
MODIFIED: Now receives a task dict, returns a result dict.
|
|
240
742
|
|
|
241
|
-
|
|
242
|
-
"""
|
|
743
|
+
The framework calls this method - you don't call it manually.
|
|
744
|
+
"""
|
|
243
745
|
task_desc = task.get("task", "")
|
|
244
|
-
|
|
245
|
-
|
|
746
|
+
|
|
747
|
+
# YOUR EXISTING LOGIC:
|
|
748
|
+
result = f"Research on: {task_desc}"
|
|
749
|
+
|
|
750
|
+
# NEW: Return format for framework
|
|
246
751
|
return {
|
|
247
752
|
"status": "success",
|
|
248
753
|
"output": result
|
|
@@ -250,166 +755,608 @@ class ResearcherAgent(CustomAgent):
|
|
|
250
755
|
|
|
251
756
|
|
|
252
757
|
class WriterAgent(CustomAgent):
|
|
758
|
+
"""Your writer, now framework-integrated."""
|
|
759
|
+
|
|
253
760
|
role = "writer"
|
|
254
761
|
capabilities = ["writing"]
|
|
255
762
|
|
|
256
763
|
async def setup(self):
|
|
257
764
|
await super().setup()
|
|
258
|
-
self.llm = MyLLMClient()
|
|
259
765
|
|
|
260
|
-
async def execute_task(self, task):
|
|
261
|
-
"""
|
|
766
|
+
async def execute_task(self, task: dict) -> dict:
|
|
767
|
+
"""
|
|
768
|
+
Context from previous steps is AUTOMATICALLY injected.
|
|
769
|
+
No more manual passing!
|
|
770
|
+
"""
|
|
262
771
|
task_desc = task.get("task", "")
|
|
263
|
-
context = task.get("context", {}) #
|
|
772
|
+
context = task.get("context", {}) # ← Framework injects this!
|
|
264
773
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
774
|
+
# YOUR EXISTING LOGIC:
|
|
775
|
+
research_output = context.get("research", {}).get("output", "")
|
|
776
|
+
result = f"Article based on: {research_output}"
|
|
268
777
|
|
|
269
|
-
result = self.llm.chat(prompt)
|
|
270
778
|
return {
|
|
271
779
|
"status": "success",
|
|
272
780
|
"output": result
|
|
273
781
|
}
|
|
274
782
|
```
|
|
275
783
|
|
|
276
|
-
**
|
|
784
|
+
**What changed:**
|
|
785
|
+
|
|
786
|
+
| Before | After |
|
|
787
|
+
|--------|-------|
|
|
788
|
+
| `class Researcher:` | `class ResearcherAgent(CustomAgent):` |
|
|
789
|
+
| `def execute(self, task):` | `async def execute_task(self, task: dict):` |
|
|
790
|
+
| Return anything | Return `{"status": "...", "output": ...}` |
|
|
791
|
+
| Manual `context=research` | Framework auto-injects via `depends_on` |
|
|
792
|
+
|
|
793
|
+
### Step 4: Create New Entry Point → `main.py`
|
|
794
|
+
|
|
795
|
+
**This is your NEW main file.** Instead of running `python pipeline.py`, you'll run `python main.py`.
|
|
277
796
|
|
|
278
797
|
```python
|
|
279
|
-
# main.py
|
|
798
|
+
# main.py (NEW FILE - YOUR NEW ENTRY POINT)
|
|
280
799
|
import asyncio
|
|
281
800
|
from jarviscore import Mesh
|
|
282
801
|
from agents import ResearcherAgent, WriterAgent
|
|
283
802
|
|
|
803
|
+
|
|
284
804
|
async def main():
|
|
805
|
+
# Create the mesh network
|
|
285
806
|
mesh = Mesh(
|
|
286
807
|
mode="distributed",
|
|
287
808
|
config={
|
|
288
|
-
|
|
289
|
-
|
|
809
|
+
"bind_port": 7950,
|
|
810
|
+
"node_name": "pipeline-node",
|
|
290
811
|
}
|
|
291
812
|
)
|
|
292
813
|
|
|
814
|
+
# Register your agents
|
|
293
815
|
mesh.add(ResearcherAgent)
|
|
294
816
|
mesh.add(WriterAgent)
|
|
295
817
|
|
|
818
|
+
# Start the mesh (calls setup() on all agents)
|
|
296
819
|
await mesh.start()
|
|
297
820
|
|
|
298
|
-
#
|
|
821
|
+
# Define your workflow - framework handles orchestration!
|
|
299
822
|
results = await mesh.workflow("content-pipeline", [
|
|
300
823
|
{
|
|
301
|
-
"id": "research",
|
|
302
|
-
"agent": "researcher",
|
|
303
|
-
"task": "
|
|
824
|
+
"id": "research", # Step identifier
|
|
825
|
+
"agent": "researcher", # Which agent handles this
|
|
826
|
+
"task": "AI trends 2024" # Task description
|
|
304
827
|
},
|
|
305
828
|
{
|
|
306
829
|
"id": "write",
|
|
307
830
|
"agent": "writer",
|
|
308
|
-
"task": "Write
|
|
309
|
-
"depends_on": ["research"] #
|
|
831
|
+
"task": "Write a blog post",
|
|
832
|
+
"depends_on": ["research"] # ← Framework auto-injects research output!
|
|
310
833
|
}
|
|
311
834
|
])
|
|
312
835
|
|
|
313
|
-
|
|
314
|
-
print(results[
|
|
836
|
+
# Results in workflow order
|
|
837
|
+
print("Research:", results[0]["output"])
|
|
838
|
+
print("Article:", results[1]["output"])
|
|
315
839
|
|
|
316
840
|
await mesh.stop()
|
|
317
841
|
|
|
318
|
-
|
|
842
|
+
|
|
843
|
+
if __name__ == "__main__":
|
|
844
|
+
asyncio.run(main())
|
|
319
845
|
```
|
|
320
846
|
|
|
321
|
-
|
|
847
|
+
**Why a new entry file?**
|
|
322
848
|
|
|
323
|
-
|
|
|
324
|
-
|
|
325
|
-
|
|
|
326
|
-
|
|
|
327
|
-
|
|
|
328
|
-
|
|
|
849
|
+
| Reason | Explanation |
|
|
850
|
+
|--------|-------------|
|
|
851
|
+
| **Workflow orchestration** | `mesh.workflow()` handles dependencies, ordering, retries |
|
|
852
|
+
| **No manual context passing** | `depends_on` automatically injects previous step outputs |
|
|
853
|
+
| **Multiple agents** | Register all agents in one place |
|
|
854
|
+
| **Multi-node ready** | Same code works across machines with `seed_nodes` config |
|
|
855
|
+
| **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
|
|
856
|
+
|
|
857
|
+
### Step 5: Run Your Pipeline
|
|
329
858
|
|
|
330
|
-
|
|
859
|
+
```bash
|
|
860
|
+
# OLD WAY (no longer used):
|
|
861
|
+
# python pipeline.py
|
|
331
862
|
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
4. **Use `mesh.workflow()`** with `depends_on` for dependencies
|
|
863
|
+
# NEW WAY:
|
|
864
|
+
python main.py
|
|
865
|
+
```
|
|
336
866
|
|
|
337
867
|
---
|
|
338
868
|
|
|
339
|
-
|
|
869
|
+
### Complete Example: Three-Stage Content Pipeline
|
|
340
870
|
|
|
341
|
-
|
|
871
|
+
This example shows a research → write → review pipeline.
|
|
342
872
|
|
|
343
|
-
**Machine 1:**
|
|
344
873
|
```python
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
874
|
+
# agents.py
|
|
875
|
+
from jarviscore.profiles import CustomAgent
|
|
876
|
+
|
|
877
|
+
|
|
878
|
+
class ResearcherAgent(CustomAgent):
|
|
879
|
+
"""Researches topics and returns findings."""
|
|
880
|
+
|
|
881
|
+
role = "researcher"
|
|
882
|
+
capabilities = ["research"]
|
|
883
|
+
|
|
884
|
+
async def setup(self):
|
|
885
|
+
await super().setup()
|
|
886
|
+
# self.llm = MyLLMClient()
|
|
887
|
+
|
|
888
|
+
async def execute_task(self, task: dict) -> dict:
|
|
889
|
+
topic = task.get("task", "")
|
|
890
|
+
|
|
891
|
+
# Your research logic
|
|
892
|
+
findings = f"Research findings on: {topic}"
|
|
893
|
+
# findings = self.llm.chat(f"Research: {topic}")
|
|
894
|
+
|
|
895
|
+
return {
|
|
896
|
+
"status": "success",
|
|
897
|
+
"output": findings
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
class WriterAgent(CustomAgent):
|
|
902
|
+
"""Writes content based on research."""
|
|
903
|
+
|
|
904
|
+
role = "writer"
|
|
905
|
+
capabilities = ["writing"]
|
|
906
|
+
|
|
907
|
+
async def setup(self):
|
|
908
|
+
await super().setup()
|
|
909
|
+
# self.llm = MyLLMClient()
|
|
910
|
+
|
|
911
|
+
async def execute_task(self, task: dict) -> dict:
|
|
912
|
+
instruction = task.get("task", "")
|
|
913
|
+
context = task.get("context", {}) # Output from depends_on steps
|
|
914
|
+
|
|
915
|
+
# Combine context from previous steps
|
|
916
|
+
research = context.get("research", {}).get("output", "")
|
|
917
|
+
|
|
918
|
+
# Your writing logic
|
|
919
|
+
article = f"Article based on: {research}\nTopic: {instruction}"
|
|
920
|
+
# article = self.llm.chat(f"Based on: {research}\nWrite: {instruction}")
|
|
921
|
+
|
|
922
|
+
return {
|
|
923
|
+
"status": "success",
|
|
924
|
+
"output": article
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
|
|
928
|
+
class EditorAgent(CustomAgent):
|
|
929
|
+
"""Reviews and polishes content."""
|
|
930
|
+
|
|
931
|
+
role = "editor"
|
|
932
|
+
capabilities = ["editing", "review"]
|
|
933
|
+
|
|
934
|
+
async def setup(self):
|
|
935
|
+
await super().setup()
|
|
936
|
+
|
|
937
|
+
async def execute_task(self, task: dict) -> dict:
|
|
938
|
+
instruction = task.get("task", "")
|
|
939
|
+
context = task.get("context", {})
|
|
940
|
+
|
|
941
|
+
# Get output from the writing step
|
|
942
|
+
draft = context.get("write", {}).get("output", "")
|
|
943
|
+
|
|
944
|
+
# Your editing logic
|
|
945
|
+
polished = f"[EDITED] {draft}"
|
|
946
|
+
|
|
947
|
+
return {
|
|
948
|
+
"status": "success",
|
|
949
|
+
"output": polished
|
|
950
|
+
}
|
|
353
951
|
```
|
|
354
952
|
|
|
355
|
-
**Machine 2:**
|
|
356
953
|
```python
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
954
|
+
# main.py
|
|
955
|
+
import asyncio
|
|
956
|
+
from jarviscore import Mesh
|
|
957
|
+
from agents import ResearcherAgent, WriterAgent, EditorAgent
|
|
958
|
+
|
|
959
|
+
|
|
960
|
+
async def main():
|
|
961
|
+
mesh = Mesh(
|
|
962
|
+
mode="distributed",
|
|
963
|
+
config={
|
|
964
|
+
"bind_port": 7950,
|
|
965
|
+
"node_name": "content-node",
|
|
966
|
+
}
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
mesh.add(ResearcherAgent)
|
|
970
|
+
mesh.add(WriterAgent)
|
|
971
|
+
mesh.add(EditorAgent)
|
|
972
|
+
|
|
973
|
+
await mesh.start()
|
|
974
|
+
|
|
975
|
+
# Define a multi-step workflow with dependencies
|
|
976
|
+
results = await mesh.workflow("content-pipeline", [
|
|
977
|
+
{
|
|
978
|
+
"id": "research", # Unique step identifier
|
|
979
|
+
"agent": "researcher", # Which agent handles this
|
|
980
|
+
"task": "AI trends in 2024" # Task description
|
|
981
|
+
},
|
|
982
|
+
{
|
|
983
|
+
"id": "write",
|
|
984
|
+
"agent": "writer",
|
|
985
|
+
"task": "Write a blog post about the research",
|
|
986
|
+
"depends_on": ["research"] # Wait for research, inject its output
|
|
987
|
+
},
|
|
988
|
+
{
|
|
989
|
+
"id": "edit",
|
|
990
|
+
"agent": "editor",
|
|
991
|
+
"task": "Polish and improve the article",
|
|
992
|
+
"depends_on": ["write"] # Wait for writing step
|
|
993
|
+
}
|
|
994
|
+
])
|
|
995
|
+
|
|
996
|
+
# Results are in workflow order
|
|
997
|
+
print("Research:", results[0]["output"])
|
|
998
|
+
print("Draft:", results[1]["output"])
|
|
999
|
+
print("Final:", results[2]["output"])
|
|
1000
|
+
|
|
1001
|
+
await mesh.stop()
|
|
1002
|
+
|
|
1003
|
+
|
|
1004
|
+
if __name__ == "__main__":
|
|
1005
|
+
asyncio.run(main())
|
|
1006
|
+
```
|
|
1007
|
+
|
|
1008
|
+
### Key Concepts for Distributed Mode
|
|
1009
|
+
|
|
1010
|
+
#### The `execute_task()` Method
|
|
1011
|
+
|
|
1012
|
+
Called by the workflow engine when a task is assigned to your agent.
|
|
1013
|
+
|
|
1014
|
+
```python
|
|
1015
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1016
|
+
# task dict contains:
|
|
1017
|
+
# - "id": str - the step ID from the workflow
|
|
1018
|
+
# - "task": str - the task description
|
|
1019
|
+
# - "context": dict - outputs from depends_on steps (keyed by step ID)
|
|
1020
|
+
|
|
1021
|
+
return {
|
|
1022
|
+
"status": "success", # or "error"
|
|
1023
|
+
"output": result, # your result data
|
|
1024
|
+
# "error": "message" # if status is "error"
|
|
1025
|
+
}
|
|
1026
|
+
```
|
|
1027
|
+
|
|
1028
|
+
#### The `task` Dictionary Structure
|
|
1029
|
+
|
|
1030
|
+
```python
|
|
1031
|
+
{
|
|
1032
|
+
"id": "step_id", # Step identifier from workflow
|
|
1033
|
+
"task": "task description", # What to do
|
|
1034
|
+
"context": { # Outputs from dependencies
|
|
1035
|
+
"previous_step_id": {
|
|
1036
|
+
"status": "success",
|
|
1037
|
+
"output": "..." # Whatever previous step returned
|
|
1038
|
+
}
|
|
1039
|
+
}
|
|
1040
|
+
}
|
|
1041
|
+
```
|
|
1042
|
+
|
|
1043
|
+
#### Workflow Step Definition
|
|
1044
|
+
|
|
1045
|
+
```python
|
|
1046
|
+
{
|
|
1047
|
+
"id": "unique_step_id", # Required: unique identifier
|
|
1048
|
+
"agent": "agent_role", # Required: which agent handles this
|
|
1049
|
+
"task": "description", # Required: task description
|
|
1050
|
+
"depends_on": ["step1", ...] # Optional: steps that must complete first
|
|
1051
|
+
}
|
|
366
1052
|
```
|
|
367
1053
|
|
|
368
|
-
|
|
1054
|
+
#### Parallel Execution
|
|
1055
|
+
|
|
1056
|
+
Steps without `depends_on` or with satisfied dependencies run in parallel:
|
|
1057
|
+
|
|
1058
|
+
```python
|
|
1059
|
+
results = await mesh.workflow("parallel-example", [
|
|
1060
|
+
{"id": "a", "agent": "worker", "task": "Task A"}, # Runs immediately
|
|
1061
|
+
{"id": "b", "agent": "worker", "task": "Task B"}, # Runs in parallel with A
|
|
1062
|
+
{"id": "c", "agent": "worker", "task": "Task C",
|
|
1063
|
+
"depends_on": ["a", "b"]}, # Waits for A and B
|
|
1064
|
+
])
|
|
1065
|
+
```
|
|
369
1066
|
|
|
370
1067
|
---
|
|
371
1068
|
|
|
372
|
-
##
|
|
1069
|
+
## API Reference
|
|
1070
|
+
|
|
1071
|
+
### CustomAgent Class Attributes
|
|
1072
|
+
|
|
1073
|
+
| Attribute | Type | Required | Description |
|
|
1074
|
+
|-----------|------|----------|-------------|
|
|
1075
|
+
| `role` | `str` | Yes | Unique identifier for this agent type (e.g., `"researcher"`) |
|
|
1076
|
+
| `capabilities` | `list[str]` | Yes | List of capabilities for discovery (e.g., `["research", "analysis"]`) |
|
|
1077
|
+
|
|
1078
|
+
### CustomAgent Methods
|
|
1079
|
+
|
|
1080
|
+
| Method | Mode | Description |
|
|
1081
|
+
|--------|------|-------------|
|
|
1082
|
+
| `setup()` | Both | Called once on startup. Initialize resources here. Always call `await super().setup()` |
|
|
1083
|
+
| `run()` | P2P | Main loop for continuous operation. Required for P2P mode |
|
|
1084
|
+
| `execute_task(task)` | Distributed | Handle a workflow task. Required for Distributed mode |
|
|
1085
|
+
|
|
1086
|
+
### Why `execute_task()` is Required in P2P Mode
|
|
1087
|
+
|
|
1088
|
+
You may notice that P2P agents must implement `execute_task()` even though they primarily use `run()`. Here's why:
|
|
1089
|
+
|
|
1090
|
+
```
|
|
1091
|
+
Agent (base class)
|
|
1092
|
+
│
|
|
1093
|
+
├── @abstractmethod execute_task() ← Python REQUIRES this to be implemented
|
|
1094
|
+
│
|
|
1095
|
+
└── run() ← Optional, default does nothing
|
|
1096
|
+
```
|
|
1097
|
+
|
|
1098
|
+
**The technical reason:**
|
|
1099
|
+
|
|
1100
|
+
1. `Agent.execute_task()` is declared as `@abstractmethod` in `core/agent.py`
|
|
1101
|
+
2. Python's ABC (Abstract Base Class) requires ALL abstract methods to be implemented
|
|
1102
|
+
3. If you don't implement it, Python raises:
|
|
1103
|
+
```
|
|
1104
|
+
TypeError: Can't instantiate abstract class MyAgent with abstract method execute_task
|
|
1105
|
+
```
|
|
1106
|
+
|
|
1107
|
+
**The design reason:**
|
|
1108
|
+
|
|
1109
|
+
- **Unified interface**: All agents can be called via `execute_task()`, regardless of mode
|
|
1110
|
+
- **Flexibility**: A P2P agent can still participate in workflows if needed
|
|
1111
|
+
- **Testing**: You can test any agent by calling `execute_task()` directly
|
|
1112
|
+
|
|
1113
|
+
**What to put in it for P2P mode:**
|
|
373
1114
|
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
1115
|
+
```python
|
|
1116
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1117
|
+
"""Minimal implementation - main logic is in run()."""
|
|
1118
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
1119
|
+
```
|
|
1120
|
+
|
|
1121
|
+
### Peer Tools (P2P Mode)
|
|
1122
|
+
|
|
1123
|
+
Access via `self.peers.as_tool().execute(tool_name, params)`:
|
|
1124
|
+
|
|
1125
|
+
| Tool | Parameters | Description |
|
|
1126
|
+
|------|------------|-------------|
|
|
1127
|
+
| `ask_peer` | `{"role": str, "question": str}` | Send a request to a peer by role and wait for response |
|
|
1128
|
+
| `broadcast` | `{"message": str}` | Send a message to all connected peers |
|
|
1129
|
+
| `list_peers` | `{}` | Get list of available peers and their capabilities |
|
|
1130
|
+
|
|
1131
|
+
### Mesh Configuration
|
|
1132
|
+
|
|
1133
|
+
```python
|
|
1134
|
+
mesh = Mesh(
|
|
1135
|
+
mode="p2p" | "distributed",
|
|
1136
|
+
config={
|
|
1137
|
+
"bind_host": "0.0.0.0", # IP to bind to (default: "127.0.0.1")
|
|
1138
|
+
"bind_port": 7950, # Port to listen on
|
|
1139
|
+
"node_name": "my-node", # Human-readable node name
|
|
1140
|
+
"seed_nodes": "ip:port,ip:port", # Comma-separated list of known nodes
|
|
1141
|
+
}
|
|
1142
|
+
)
|
|
1143
|
+
```
|
|
1144
|
+
|
|
1145
|
+
### Mesh Methods
|
|
1146
|
+
|
|
1147
|
+
| Method | Description |
|
|
1148
|
+
|--------|-------------|
|
|
1149
|
+
| `mesh.add(AgentClass)` | Register an agent class |
|
|
1150
|
+
| `mesh.start()` | Initialize and start all agents |
|
|
1151
|
+
| `mesh.stop()` | Gracefully shut down all agents |
|
|
1152
|
+
| `mesh.run_forever()` | Block until shutdown signal |
|
|
1153
|
+
| `mesh.serve_forever()` | Same as `run_forever()` |
|
|
1154
|
+
| `mesh.get_agent(role)` | Get agent instance by role |
|
|
1155
|
+
| `mesh.workflow(name, steps)` | Run a workflow (Distributed mode) |
|
|
381
1156
|
|
|
382
1157
|
---
|
|
383
1158
|
|
|
384
|
-
##
|
|
1159
|
+
## Multi-Node Deployment
|
|
1160
|
+
|
|
1161
|
+
Run agents across multiple machines. Nodes discover each other via seed nodes.
|
|
1162
|
+
|
|
1163
|
+
### Machine 1: Research Node
|
|
385
1164
|
|
|
386
|
-
### P2P Mode
|
|
387
1165
|
```python
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
1166
|
+
# research_node.py
|
|
1167
|
+
import asyncio
|
|
1168
|
+
from jarviscore import Mesh
|
|
1169
|
+
from agents import ResearcherAgent
|
|
391
1170
|
|
|
392
|
-
async def run(self): # Required
|
|
393
|
-
while not self.shutdown_requested:
|
|
394
|
-
msg = await self.peers.receive(timeout=0.5)
|
|
395
|
-
# Handle messages
|
|
396
1171
|
|
|
397
|
-
|
|
398
|
-
|
|
1172
|
+
async def main():
|
|
1173
|
+
mesh = Mesh(
|
|
1174
|
+
mode="distributed",
|
|
1175
|
+
config={
|
|
1176
|
+
"bind_host": "0.0.0.0", # Accept connections from any IP
|
|
1177
|
+
"bind_port": 7950,
|
|
1178
|
+
"node_name": "research-node",
|
|
1179
|
+
}
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
mesh.add(ResearcherAgent)
|
|
1183
|
+
await mesh.start()
|
|
1184
|
+
|
|
1185
|
+
print("Research node running on port 7950...")
|
|
1186
|
+
await mesh.serve_forever()
|
|
1187
|
+
|
|
1188
|
+
|
|
1189
|
+
if __name__ == "__main__":
|
|
1190
|
+
asyncio.run(main())
|
|
399
1191
|
```
|
|
400
1192
|
|
|
401
|
-
###
|
|
1193
|
+
### Machine 2: Writer Node + Orchestrator
|
|
1194
|
+
|
|
402
1195
|
```python
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
1196
|
+
# writer_node.py
|
|
1197
|
+
import asyncio
|
|
1198
|
+
from jarviscore import Mesh
|
|
1199
|
+
from agents import WriterAgent
|
|
1200
|
+
|
|
406
1201
|
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
1202
|
+
async def main():
|
|
1203
|
+
mesh = Mesh(
|
|
1204
|
+
mode="distributed",
|
|
1205
|
+
config={
|
|
1206
|
+
"bind_host": "0.0.0.0",
|
|
1207
|
+
"bind_port": 7950,
|
|
1208
|
+
"node_name": "writer-node",
|
|
1209
|
+
"seed_nodes": "192.168.1.10:7950", # IP of research node
|
|
1210
|
+
}
|
|
1211
|
+
)
|
|
410
1212
|
|
|
411
|
-
mesh
|
|
412
|
-
|
|
1213
|
+
mesh.add(WriterAgent)
|
|
1214
|
+
await mesh.start()
|
|
1215
|
+
|
|
1216
|
+
# Wait for nodes to discover each other
|
|
1217
|
+
await asyncio.sleep(2)
|
|
1218
|
+
|
|
1219
|
+
# Run workflow - tasks automatically route to correct nodes
|
|
1220
|
+
results = await mesh.workflow("cross-node-pipeline", [
|
|
1221
|
+
{"id": "research", "agent": "researcher", "task": "AI trends"},
|
|
1222
|
+
{"id": "write", "agent": "writer", "task": "Write article",
|
|
1223
|
+
"depends_on": ["research"]},
|
|
1224
|
+
])
|
|
1225
|
+
|
|
1226
|
+
print(results)
|
|
1227
|
+
await mesh.stop()
|
|
1228
|
+
|
|
1229
|
+
|
|
1230
|
+
if __name__ == "__main__":
|
|
1231
|
+
asyncio.run(main())
|
|
1232
|
+
```
|
|
1233
|
+
|
|
1234
|
+
### How Node Discovery Works
|
|
1235
|
+
|
|
1236
|
+
1. On startup, nodes connect to seed nodes
|
|
1237
|
+
2. Seed nodes share their known peers
|
|
1238
|
+
3. Nodes exchange agent capability information
|
|
1239
|
+
4. Workflows automatically route tasks to nodes with matching agents
|
|
1240
|
+
|
|
1241
|
+
---
|
|
1242
|
+
|
|
1243
|
+
## Error Handling
|
|
1244
|
+
|
|
1245
|
+
### In P2P Mode
|
|
1246
|
+
|
|
1247
|
+
```python
|
|
1248
|
+
async def run(self):
|
|
1249
|
+
while not self.shutdown_requested:
|
|
1250
|
+
try:
|
|
1251
|
+
if self.peers:
|
|
1252
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
1253
|
+
if msg and msg.is_request:
|
|
1254
|
+
try:
|
|
1255
|
+
result = await self.process(msg.data)
|
|
1256
|
+
await self.peers.respond(msg, {"response": result})
|
|
1257
|
+
except Exception as e:
|
|
1258
|
+
await self.peers.respond(msg, {
|
|
1259
|
+
"error": str(e),
|
|
1260
|
+
"status": "failed"
|
|
1261
|
+
})
|
|
1262
|
+
except Exception as e:
|
|
1263
|
+
print(f"Error in run loop: {e}")
|
|
1264
|
+
|
|
1265
|
+
await asyncio.sleep(0.1)
|
|
1266
|
+
```
|
|
1267
|
+
|
|
1268
|
+
### In Distributed Mode
|
|
1269
|
+
|
|
1270
|
+
```python
|
|
1271
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1272
|
+
try:
|
|
1273
|
+
result = await self.do_work(task)
|
|
1274
|
+
return {
|
|
1275
|
+
"status": "success",
|
|
1276
|
+
"output": result
|
|
1277
|
+
}
|
|
1278
|
+
except ValueError as e:
|
|
1279
|
+
return {
|
|
1280
|
+
"status": "error",
|
|
1281
|
+
"error": f"Invalid input: {e}"
|
|
1282
|
+
}
|
|
1283
|
+
except Exception as e:
|
|
1284
|
+
return {
|
|
1285
|
+
"status": "error",
|
|
1286
|
+
"error": f"Unexpected error: {e}"
|
|
1287
|
+
}
|
|
413
1288
|
```
|
|
414
1289
|
|
|
415
|
-
|
|
1290
|
+
### Handling Missing Peers
|
|
1291
|
+
|
|
1292
|
+
```python
|
|
1293
|
+
async def ask_researcher(self, question: str) -> str:
|
|
1294
|
+
if not self.peers:
|
|
1295
|
+
raise RuntimeError("Peer system not initialized")
|
|
1296
|
+
|
|
1297
|
+
try:
|
|
1298
|
+
response = await asyncio.wait_for(
|
|
1299
|
+
self.peers.as_tool().execute(
|
|
1300
|
+
"ask_peer",
|
|
1301
|
+
{"role": "researcher", "question": question}
|
|
1302
|
+
),
|
|
1303
|
+
timeout=30.0 # 30 second timeout
|
|
1304
|
+
)
|
|
1305
|
+
return response.get("response", "")
|
|
1306
|
+
except asyncio.TimeoutError:
|
|
1307
|
+
raise RuntimeError("Researcher did not respond in time")
|
|
1308
|
+
except Exception as e:
|
|
1309
|
+
raise RuntimeError(f"Failed to contact researcher: {e}")
|
|
1310
|
+
```
|
|
1311
|
+
|
|
1312
|
+
---
|
|
1313
|
+
|
|
1314
|
+
## Troubleshooting
|
|
1315
|
+
|
|
1316
|
+
### Agent not receiving messages
|
|
1317
|
+
|
|
1318
|
+
**Problem**: `self.peers.receive()` always returns `None`
|
|
1319
|
+
|
|
1320
|
+
**Solutions**:
|
|
1321
|
+
1. Ensure the sending agent is using the correct `role` in `ask_peer`
|
|
1322
|
+
2. Check that both agents are registered with the mesh
|
|
1323
|
+
3. Verify `await super().setup()` is called in your `setup()` method
|
|
1324
|
+
4. Add logging to confirm your `run()` loop is executing
|
|
1325
|
+
|
|
1326
|
+
### Workflow tasks not executing
|
|
1327
|
+
|
|
1328
|
+
**Problem**: `mesh.workflow()` hangs or returns empty results
|
|
1329
|
+
|
|
1330
|
+
**Solutions**:
|
|
1331
|
+
1. Verify agent `role` matches the `agent` field in workflow steps
|
|
1332
|
+
2. Check `execute_task()` returns a dict with `status` key
|
|
1333
|
+
3. Ensure all `depends_on` step IDs exist in the workflow
|
|
1334
|
+
4. Check for circular dependencies
|
|
1335
|
+
|
|
1336
|
+
### Nodes not discovering each other
|
|
1337
|
+
|
|
1338
|
+
**Problem**: Multi-node setup, but workflows fail to find agents
|
|
1339
|
+
|
|
1340
|
+
**Solutions**:
|
|
1341
|
+
1. Verify `seed_nodes` IP and port are correct
|
|
1342
|
+
2. Check firewall allows connections on the bind port
|
|
1343
|
+
3. Ensure `bind_host` is `"0.0.0.0"` (not `"127.0.0.1"`) for remote connections
|
|
1344
|
+
4. Wait a few seconds after `mesh.start()` for discovery to complete
|
|
1345
|
+
|
|
1346
|
+
### "Peer system not available" errors
|
|
1347
|
+
|
|
1348
|
+
**Problem**: `self.peers` is `None`
|
|
1349
|
+
|
|
1350
|
+
**Solutions**:
|
|
1351
|
+
1. Only access `self.peers` after `setup()` completes
|
|
1352
|
+
2. Check that mesh is started with `await mesh.start()`
|
|
1353
|
+
3. Verify the agent was added with `mesh.add(AgentClass)`
|
|
1354
|
+
|
|
1355
|
+
---
|
|
1356
|
+
|
|
1357
|
+
## Examples
|
|
1358
|
+
|
|
1359
|
+
For complete, runnable examples, see:
|
|
1360
|
+
|
|
1361
|
+
- `examples/customagent_p2p_example.py` - P2P mode with peer communication
|
|
1362
|
+
- `examples/customagent_distributed_example.py` - Distributed mode with workflows
|