jarviscore-framework 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +162 -0
- examples/customagent_p2p_example.py +566 -183
- examples/fastapi_integration_example.py +570 -0
- examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/__init__.py +22 -5
- jarviscore/cli/smoketest.py +8 -4
- jarviscore/core/agent.py +227 -0
- jarviscore/data/examples/cloud_deployment_example.py +162 -0
- jarviscore/data/examples/customagent_p2p_example.py +566 -183
- jarviscore/data/examples/fastapi_integration_example.py +570 -0
- jarviscore/data/examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/docs/API_REFERENCE.md +296 -3
- jarviscore/docs/CHANGELOG.md +97 -0
- jarviscore/docs/CONFIGURATION.md +2 -2
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +2021 -255
- jarviscore/docs/GETTING_STARTED.md +112 -8
- jarviscore/docs/TROUBLESHOOTING.md +3 -3
- jarviscore/docs/USER_GUIDE.md +152 -6
- jarviscore/integrations/__init__.py +16 -0
- jarviscore/integrations/fastapi.py +247 -0
- jarviscore/p2p/broadcaster.py +10 -3
- jarviscore/p2p/coordinator.py +310 -14
- jarviscore/p2p/keepalive.py +45 -23
- jarviscore/p2p/peer_client.py +282 -10
- jarviscore/p2p/swim_manager.py +9 -4
- jarviscore/profiles/__init__.py +10 -2
- jarviscore/profiles/listeneragent.py +292 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/METADATA +42 -8
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/RECORD +36 -22
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/WHEEL +1 -1
- tests/test_13_dx_improvements.py +554 -0
- tests/test_14_cloud_deployment.py +403 -0
- tests/test_15_llm_cognitive_discovery.py +684 -0
- tests/test_16_unified_dx_flow.py +947 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,415 +1,2181 @@
|
|
|
1
1
|
# CustomAgent Guide
|
|
2
2
|
|
|
3
|
-
CustomAgent
|
|
3
|
+
CustomAgent lets you integrate your **existing agent code** with JarvisCore's networking and orchestration capabilities.
|
|
4
4
|
|
|
5
|
-
You keep
|
|
6
|
-
|
|
7
|
-
- Workflow orchestration (distributed mode)
|
|
8
|
-
- P2P peer tools (ask_peer, broadcast, etc.)
|
|
5
|
+
**You keep**: Your execution logic, LLM calls, and business logic.
|
|
6
|
+
**Framework provides**: Agent discovery, peer communication, workflow orchestration, and multi-node deployment.
|
|
9
7
|
|
|
10
8
|
---
|
|
11
9
|
|
|
12
|
-
##
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
10
|
+
## Table of Contents
|
|
11
|
+
|
|
12
|
+
1. [Prerequisites](#prerequisites)
|
|
13
|
+
2. [Choose Your Mode](#choose-your-mode)
|
|
14
|
+
3. [P2P Mode](#p2p-mode)
|
|
15
|
+
4. [ListenerAgent (v0.3.0)](#listeneragent-v030) - API-first agents without run() loops
|
|
16
|
+
5. [Distributed Mode](#distributed-mode)
|
|
17
|
+
6. [Cognitive Discovery (v0.3.0)](#cognitive-discovery-v030) - Dynamic peer awareness for LLMs
|
|
18
|
+
7. [FastAPI Integration (v0.3.0)](#fastapi-integration-v030) - 3-line setup with JarvisLifespan
|
|
19
|
+
8. [Cloud Deployment (v0.3.0)](#cloud-deployment-v030) - Self-registration for containers
|
|
20
|
+
9. [API Reference](#api-reference)
|
|
21
|
+
10. [Multi-Node Deployment](#multi-node-deployment)
|
|
22
|
+
11. [Error Handling](#error-handling)
|
|
23
|
+
12. [Troubleshooting](#troubleshooting)
|
|
18
24
|
|
|
19
25
|
---
|
|
20
26
|
|
|
21
|
-
##
|
|
27
|
+
## Prerequisites
|
|
28
|
+
|
|
29
|
+
### Installation
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
pip install jarviscore-framework
|
|
33
|
+
```
|
|
22
34
|
|
|
23
|
-
### Your
|
|
35
|
+
### Your LLM Client
|
|
24
36
|
|
|
25
|
-
|
|
37
|
+
Throughout this guide, we use `MyLLMClient()` as a placeholder for your LLM. Replace it with your actual client:
|
|
26
38
|
|
|
27
39
|
```python
|
|
28
|
-
#
|
|
29
|
-
|
|
30
|
-
|
|
40
|
+
# Example: OpenAI
|
|
41
|
+
from openai import OpenAI
|
|
42
|
+
client = OpenAI()
|
|
43
|
+
|
|
44
|
+
def chat(prompt: str) -> str:
|
|
45
|
+
response = client.chat.completions.create(
|
|
46
|
+
model="gpt-4",
|
|
47
|
+
messages=[{"role": "user", "content": prompt}]
|
|
48
|
+
)
|
|
49
|
+
return response.choices[0].message.content
|
|
31
50
|
|
|
32
|
-
|
|
33
|
-
|
|
51
|
+
# Example: Anthropic
|
|
52
|
+
from anthropic import Anthropic
|
|
53
|
+
client = Anthropic()
|
|
34
54
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
55
|
+
def chat(prompt: str) -> str:
|
|
56
|
+
response = client.messages.create(
|
|
57
|
+
model="claude-3-sonnet-20240229",
|
|
58
|
+
max_tokens=1024,
|
|
59
|
+
messages=[{"role": "user", "content": prompt}]
|
|
60
|
+
)
|
|
61
|
+
return response.content[0].text
|
|
38
62
|
|
|
63
|
+
# Example: Local/Custom
|
|
64
|
+
class MyLLMClient:
|
|
65
|
+
def chat(self, prompt: str) -> str:
|
|
66
|
+
# Your implementation
|
|
67
|
+
return "response"
|
|
68
|
+
```
|
|
39
69
|
|
|
40
|
-
|
|
41
|
-
class StandaloneAssistant:
|
|
42
|
-
"""Your existing assistant that needs researcher help."""
|
|
70
|
+
---
|
|
43
71
|
|
|
44
|
-
|
|
45
|
-
self.researcher = researcher # Direct reference
|
|
46
|
-
self.llm = MyLLMClient()
|
|
72
|
+
## Choose Your Mode
|
|
47
73
|
|
|
48
|
-
def help(self, question: str) -> str:
|
|
49
|
-
# Directly calls researcher
|
|
50
|
-
research = self.researcher.research(question)
|
|
51
|
-
return self.llm.chat(f"Based on: {research}\nAnswer: {question}")
|
|
52
74
|
```
|
|
75
|
+
┌─────────────────────────────────────────────────────────────┐
|
|
76
|
+
│ Which mode should I use? │
|
|
77
|
+
└─────────────────────────────────────────────────────────────┘
|
|
78
|
+
│
|
|
79
|
+
▼
|
|
80
|
+
┌───────────────────────────────┐
|
|
81
|
+
│ Do agents need to coordinate │
|
|
82
|
+
│ continuously in real-time? │
|
|
83
|
+
└───────────────────────────────┘
|
|
84
|
+
│ │
|
|
85
|
+
YES NO
|
|
86
|
+
│ │
|
|
87
|
+
▼ ▼
|
|
88
|
+
┌──────────┐ ┌───────────────────────┐
|
|
89
|
+
│ P2P Mode │ │ Do you have task │
|
|
90
|
+
└──────────┘ │ pipelines with │
|
|
91
|
+
│ dependencies? │
|
|
92
|
+
└───────────────────────┘
|
|
93
|
+
│ │
|
|
94
|
+
YES NO
|
|
95
|
+
│ │
|
|
96
|
+
▼ ▼
|
|
97
|
+
┌────────────┐ ┌──────────┐
|
|
98
|
+
│Distributed │ │ P2P Mode │
|
|
99
|
+
│ Mode │ └──────────┘
|
|
100
|
+
└────────────┘
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Quick Comparison
|
|
104
|
+
|
|
105
|
+
| Feature | P2P Mode (CustomAgent) | P2P Mode (ListenerAgent) | Distributed Mode |
|
|
106
|
+
|---------|------------------------|--------------------------|------------------|
|
|
107
|
+
| **Primary method** | `run()` - continuous loop | `on_peer_request()` handlers | `execute_task()` - on-demand |
|
|
108
|
+
| **Communication** | Direct peer messaging | Handler-based (no loop) | Workflow orchestration |
|
|
109
|
+
| **Best for** | Custom message loops | API-first agents, FastAPI | Pipelines, batch processing |
|
|
110
|
+
| **Coordination** | Agents self-coordinate | Framework handles loop | Framework coordinates |
|
|
111
|
+
| **Supports workflows** | No | No | Yes |
|
|
53
112
|
|
|
54
|
-
**
|
|
113
|
+
> **New in v0.3.0**: `ListenerAgent` lets you write P2P agents without managing the `run()` loop yourself. Just implement `on_peer_request()` and `on_peer_notify()` handlers.
|
|
55
114
|
|
|
56
115
|
---
|
|
57
116
|
|
|
58
|
-
|
|
117
|
+
## P2P Mode
|
|
59
118
|
|
|
60
|
-
|
|
119
|
+
P2P mode is for agents that run continuously and communicate directly with each other.
|
|
120
|
+
|
|
121
|
+
### Migration Overview
|
|
122
|
+
|
|
123
|
+
```
|
|
124
|
+
YOUR PROJECT STRUCTURE
|
|
125
|
+
──────────────────────────────────────────────────────────────────
|
|
126
|
+
|
|
127
|
+
BEFORE (standalone): AFTER (with JarvisCore):
|
|
128
|
+
├── my_agent.py ├── agents.py ← Modified agent code
|
|
129
|
+
└── (run directly) └── main.py ← NEW entry point
|
|
130
|
+
▲
|
|
131
|
+
│
|
|
132
|
+
This is now how you
|
|
133
|
+
start your agents
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Step 1: Install the Framework
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
pip install jarviscore-framework
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Step 2: Your Existing Code (Before)
|
|
143
|
+
|
|
144
|
+
Let's say you have a standalone agent like this:
|
|
61
145
|
|
|
62
146
|
```python
|
|
63
|
-
#
|
|
147
|
+
# my_agent.py (YOUR EXISTING CODE)
|
|
148
|
+
class MyResearcher:
|
|
149
|
+
"""Your existing agent - runs standalone."""
|
|
150
|
+
|
|
151
|
+
def __init__(self):
|
|
152
|
+
self.llm = MyLLMClient()
|
|
153
|
+
|
|
154
|
+
def research(self, query: str) -> str:
|
|
155
|
+
return self.llm.chat(f"Research: {query}")
|
|
156
|
+
|
|
157
|
+
# You currently run it directly:
|
|
158
|
+
if __name__ == "__main__":
|
|
159
|
+
agent = MyResearcher()
|
|
160
|
+
result = agent.research("What is AI?")
|
|
161
|
+
print(result)
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Step 3: Modify Your Agent Code → `agents.py`
|
|
165
|
+
|
|
166
|
+
Convert your existing class to inherit from `CustomAgent`:
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
# agents.py (MODIFIED VERSION OF YOUR CODE)
|
|
170
|
+
import asyncio
|
|
64
171
|
from jarviscore.profiles import CustomAgent
|
|
65
172
|
|
|
173
|
+
|
|
66
174
|
class ResearcherAgent(CustomAgent):
|
|
67
|
-
"""
|
|
175
|
+
"""Your agent, now framework-integrated."""
|
|
176
|
+
|
|
177
|
+
# NEW: Required class attributes for discovery
|
|
68
178
|
role = "researcher"
|
|
69
179
|
capabilities = ["research", "analysis"]
|
|
70
180
|
|
|
71
181
|
async def setup(self):
|
|
182
|
+
"""NEW: Called once on startup. Move your __init__ logic here."""
|
|
72
183
|
await super().setup()
|
|
73
|
-
self.llm = MyLLMClient() # Your existing
|
|
184
|
+
self.llm = MyLLMClient() # Your existing initialization
|
|
74
185
|
|
|
75
186
|
async def run(self):
|
|
76
|
-
"""
|
|
187
|
+
"""NEW: Main loop - replaces your if __name__ == '__main__' block."""
|
|
77
188
|
while not self.shutdown_requested:
|
|
78
189
|
if self.peers:
|
|
79
190
|
msg = await self.peers.receive(timeout=0.5)
|
|
80
191
|
if msg and msg.is_request:
|
|
81
192
|
query = msg.data.get("question", "")
|
|
82
|
-
#
|
|
193
|
+
# YOUR EXISTING LOGIC:
|
|
83
194
|
result = self.llm.chat(f"Research: {query}")
|
|
84
195
|
await self.peers.respond(msg, {"response": result})
|
|
85
|
-
|
|
86
|
-
await asyncio.sleep(0.1)
|
|
87
|
-
|
|
88
|
-
async def execute_task(self, task):
|
|
89
|
-
return {"status": "success"} # Required but unused in P2P
|
|
90
|
-
|
|
196
|
+
await asyncio.sleep(0.1)
|
|
91
197
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
capabilities = ["help", "coordination"]
|
|
198
|
+
async def execute_task(self, task: dict) -> dict:
|
|
199
|
+
"""
|
|
200
|
+
Required by base Agent class (@abstractmethod).
|
|
96
201
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
202
|
+
In P2P mode, your main logic lives in run(), not here.
|
|
203
|
+
This must exist because Python requires all abstract methods
|
|
204
|
+
to be implemented, or you get TypeError on instantiation.
|
|
205
|
+
"""
|
|
206
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
207
|
+
```
|
|
100
208
|
|
|
101
|
-
|
|
102
|
-
"""Replaces direct self.researcher reference."""
|
|
103
|
-
if self.peers:
|
|
104
|
-
return await self.peers.as_tool().execute(
|
|
105
|
-
"ask_peer",
|
|
106
|
-
{"role": "researcher", "question": question}
|
|
107
|
-
)
|
|
108
|
-
return "No researcher available"
|
|
209
|
+
**What changed:**
|
|
109
210
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
211
|
+
| Before | After |
|
|
212
|
+
|--------|-------|
|
|
213
|
+
| `class MyResearcher:` | `class ResearcherAgent(CustomAgent):` |
|
|
214
|
+
| `def __init__(self):` | `async def setup(self):` + `await super().setup()` |
|
|
215
|
+
| `if __name__ == "__main__":` | `async def run(self):` loop |
|
|
216
|
+
| Direct method calls | Peer message handling |
|
|
114
217
|
|
|
115
|
-
|
|
116
|
-
"""Listen for requests or external triggers."""
|
|
117
|
-
while not self.shutdown_requested:
|
|
118
|
-
# Your run loop - could listen for HTTP, websocket, etc.
|
|
119
|
-
await asyncio.sleep(0.1)
|
|
218
|
+
> **Note**: This is a minimal example. For the full pattern with **LLM-driven peer communication** (where your LLM autonomously decides when to call other agents), see the [Complete Example](#complete-example-llm-driven-peer-communication) below.
|
|
120
219
|
|
|
121
|
-
|
|
122
|
-
return {"status": "success"}
|
|
123
|
-
```
|
|
220
|
+
### Step 4: Create New Entry Point → `main.py`
|
|
124
221
|
|
|
125
|
-
**
|
|
222
|
+
**This is your NEW main file.** Instead of running `python my_agent.py`, you'll run `python main.py`.
|
|
126
223
|
|
|
127
224
|
```python
|
|
128
|
-
# main.py
|
|
225
|
+
# main.py (NEW FILE - YOUR NEW ENTRY POINT)
|
|
129
226
|
import asyncio
|
|
130
227
|
from jarviscore import Mesh
|
|
131
|
-
from agents import ResearcherAgent
|
|
228
|
+
from agents import ResearcherAgent
|
|
229
|
+
|
|
132
230
|
|
|
133
231
|
async def main():
|
|
232
|
+
# Create the mesh network
|
|
134
233
|
mesh = Mesh(
|
|
135
234
|
mode="p2p",
|
|
136
235
|
config={
|
|
137
|
-
|
|
138
|
-
|
|
236
|
+
"bind_port": 7950, # Port for P2P communication
|
|
237
|
+
"node_name": "my-node", # Identifies this node in the network
|
|
139
238
|
}
|
|
140
239
|
)
|
|
141
240
|
|
|
241
|
+
# Register your agent(s)
|
|
142
242
|
mesh.add(ResearcherAgent)
|
|
143
|
-
mesh.add(AssistantAgent)
|
|
144
243
|
|
|
244
|
+
# Start the mesh (calls setup() on all agents)
|
|
145
245
|
await mesh.start()
|
|
146
246
|
|
|
147
|
-
#
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# Option 2: Manual interaction
|
|
151
|
-
assistant = mesh.get_agent("assistant")
|
|
152
|
-
result = await assistant.help("What is quantum computing?")
|
|
153
|
-
print(result)
|
|
247
|
+
# Run forever - agents handle their own work in run() loops
|
|
248
|
+
await mesh.run_forever()
|
|
154
249
|
|
|
155
|
-
await mesh.stop()
|
|
156
250
|
|
|
157
|
-
|
|
251
|
+
if __name__ == "__main__":
|
|
252
|
+
asyncio.run(main())
|
|
158
253
|
```
|
|
159
254
|
|
|
160
|
-
|
|
255
|
+
**Why a new entry file?**
|
|
161
256
|
|
|
162
|
-
|
|
|
163
|
-
|
|
164
|
-
|
|
|
165
|
-
|
|
|
166
|
-
|
|
|
167
|
-
|
|
|
257
|
+
| Reason | Explanation |
|
|
258
|
+
|--------|-------------|
|
|
259
|
+
| **Mesh setup** | The Mesh handles networking, discovery, and lifecycle |
|
|
260
|
+
| **Multiple agents** | You can add many agents to one mesh |
|
|
261
|
+
| **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
|
|
262
|
+
| **Standard pattern** | Consistent entry point across all JarvisCore projects |
|
|
168
263
|
|
|
169
|
-
###
|
|
264
|
+
### Step 5: Run Your Agents
|
|
170
265
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
266
|
+
```bash
|
|
267
|
+
# OLD WAY (no longer used):
|
|
268
|
+
# python my_agent.py
|
|
269
|
+
|
|
270
|
+
# NEW WAY:
|
|
271
|
+
python main.py
|
|
272
|
+
```
|
|
175
273
|
|
|
176
274
|
---
|
|
177
275
|
|
|
178
|
-
|
|
276
|
+
### Complete Example: LLM-Driven Peer Communication
|
|
179
277
|
|
|
180
|
-
|
|
278
|
+
This is the **key pattern** for P2P mode. Your LLM gets peer tools added to its toolset, and it **autonomously decides** when to ask other agents for help.
|
|
279
|
+
|
|
280
|
+
```
|
|
281
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
282
|
+
│ LLM-DRIVEN PEER COMMUNICATION │
|
|
283
|
+
├─────────────────────────────────────────────────────────────────┤
|
|
284
|
+
│ │
|
|
285
|
+
│ User: "Analyze this sales data" │
|
|
286
|
+
│ │ │
|
|
287
|
+
│ ▼ │
|
|
288
|
+
│ ┌─────────────────────────────────────┐ │
|
|
289
|
+
│ │ ASSISTANT'S LLM │ │
|
|
290
|
+
│ │ │ │
|
|
291
|
+
│ │ Tools available: │ │
|
|
292
|
+
│ │ - web_search (local) │ │
|
|
293
|
+
│ │ - ask_peer (peer) ◄── NEW! │ │
|
|
294
|
+
│ │ - broadcast (peer) ◄── NEW! │ │
|
|
295
|
+
│ │ │ │
|
|
296
|
+
│ │ LLM decides: "I need analysis │ │
|
|
297
|
+
│ │ help, let me ask the analyst" │ │
|
|
298
|
+
│ └─────────────────────────────────────┘ │
|
|
299
|
+
│ │ │
|
|
300
|
+
│ ▼ uses ask_peer tool │
|
|
301
|
+
│ ┌─────────────────────────────────────┐ │
|
|
302
|
+
│ │ ANALYST AGENT │ │
|
|
303
|
+
│ │ (processes with its own LLM) │ │
|
|
304
|
+
│ └─────────────────────────────────────┘ │
|
|
305
|
+
│ │ │
|
|
306
|
+
│ ▼ returns analysis │
|
|
307
|
+
│ ┌─────────────────────────────────────┐ │
|
|
308
|
+
│ │ ASSISTANT'S LLM │ │
|
|
309
|
+
│ │ "Based on the analyst's findings, │ │
|
|
310
|
+
│ │ here's your answer..." │ │
|
|
311
|
+
│ └─────────────────────────────────────┘ │
|
|
312
|
+
│ │
|
|
313
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
314
|
+
```
|
|
181
315
|
|
|
182
|
-
You
|
|
316
|
+
**The key insight**: You add peer tools to your LLM's toolset. The LLM decides when to use them.
|
|
183
317
|
|
|
184
318
|
```python
|
|
185
|
-
#
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
self.llm = MyLLMClient()
|
|
319
|
+
# agents.py
|
|
320
|
+
import asyncio
|
|
321
|
+
from jarviscore.profiles import CustomAgent
|
|
189
322
|
|
|
190
|
-
def execute(self, task: str) -> dict:
|
|
191
|
-
result = self.llm.chat(f"Research: {task}")
|
|
192
|
-
return {"output": result}
|
|
193
323
|
|
|
324
|
+
class AnalystAgent(CustomAgent):
|
|
325
|
+
"""
|
|
326
|
+
Analyst agent - specialists in data analysis.
|
|
194
327
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
328
|
+
This agent:
|
|
329
|
+
1. Listens for incoming requests from peers
|
|
330
|
+
2. Processes requests using its own LLM
|
|
331
|
+
3. Responds with analysis results
|
|
332
|
+
"""
|
|
333
|
+
role = "analyst"
|
|
334
|
+
capabilities = ["analysis", "data_interpretation", "reporting"]
|
|
198
335
|
|
|
199
|
-
def
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
336
|
+
async def setup(self):
|
|
337
|
+
await super().setup()
|
|
338
|
+
self.llm = MyLLMClient() # Your LLM client
|
|
339
|
+
|
|
340
|
+
def get_tools(self) -> list:
|
|
341
|
+
"""
|
|
342
|
+
Tools available to THIS agent's LLM.
|
|
343
|
+
|
|
344
|
+
The analyst has local analysis tools.
|
|
345
|
+
It can also ask other peers if needed.
|
|
346
|
+
"""
|
|
347
|
+
tools = [
|
|
348
|
+
{
|
|
349
|
+
"name": "statistical_analysis",
|
|
350
|
+
"description": "Run statistical analysis on numeric data",
|
|
351
|
+
"input_schema": {
|
|
352
|
+
"type": "object",
|
|
353
|
+
"properties": {
|
|
354
|
+
"data": {"type": "string", "description": "Data to analyze"}
|
|
355
|
+
},
|
|
356
|
+
"required": ["data"]
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
]
|
|
360
|
+
|
|
361
|
+
# ADD PEER TOOLS - so LLM can ask other agents if needed
|
|
362
|
+
if self.peers:
|
|
363
|
+
tools.extend(self.peers.as_tool().schema)
|
|
205
364
|
|
|
365
|
+
return tools
|
|
206
366
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
writer = StandaloneWriter()
|
|
367
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
368
|
+
"""
|
|
369
|
+
Execute a tool by name.
|
|
211
370
|
|
|
212
|
-
|
|
213
|
-
|
|
371
|
+
Routes to peer tools or local tools as appropriate.
|
|
372
|
+
"""
|
|
373
|
+
# PEER TOOLS - check and execute
|
|
374
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
375
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
214
376
|
|
|
215
|
-
|
|
216
|
-
|
|
377
|
+
# LOCAL TOOLS
|
|
378
|
+
if tool_name == "statistical_analysis":
|
|
379
|
+
data = args.get("data", "")
|
|
380
|
+
return f"Analysis of '{data}': mean=150.3, std=23.4, trend=positive"
|
|
217
381
|
|
|
218
|
-
|
|
219
|
-
```
|
|
382
|
+
return f"Unknown tool: {tool_name}"
|
|
220
383
|
|
|
221
|
-
|
|
384
|
+
async def process_with_llm(self, query: str) -> str:
|
|
385
|
+
"""Process a request using LLM with tools."""
|
|
386
|
+
system_prompt = """You are an expert data analyst.
|
|
387
|
+
You have tools for statistical analysis.
|
|
388
|
+
Analyze data thoroughly and provide insights."""
|
|
222
389
|
|
|
223
|
-
|
|
390
|
+
tools = self.get_tools()
|
|
391
|
+
messages = [{"role": "user", "content": query}]
|
|
224
392
|
|
|
225
|
-
|
|
393
|
+
# Call LLM with tools
|
|
394
|
+
response = self.llm.chat(messages, tools=tools, system=system_prompt)
|
|
226
395
|
|
|
227
|
-
|
|
396
|
+
# Handle tool use if LLM decides to use a tool
|
|
397
|
+
if response.get("type") == "tool_use":
|
|
398
|
+
tool_result = await self.execute_tool(
|
|
399
|
+
response["tool_name"],
|
|
400
|
+
response["tool_args"]
|
|
401
|
+
)
|
|
402
|
+
# Continue conversation with tool result
|
|
403
|
+
response = self.llm.continue_with_tool_result(
|
|
404
|
+
messages, response["tool_use_id"], tool_result
|
|
405
|
+
)
|
|
228
406
|
|
|
229
|
-
|
|
230
|
-
# agents.py
|
|
231
|
-
from jarviscore.profiles import CustomAgent
|
|
407
|
+
return response.get("content", "Analysis complete.")
|
|
232
408
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
409
|
+
async def run(self):
|
|
410
|
+
"""Listen for incoming requests from peers."""
|
|
411
|
+
while not self.shutdown_requested:
|
|
412
|
+
if self.peers:
|
|
413
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
414
|
+
if msg and msg.is_request:
|
|
415
|
+
query = msg.data.get("question", msg.data.get("query", ""))
|
|
236
416
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
self.llm = MyLLMClient()
|
|
417
|
+
# Process with LLM
|
|
418
|
+
result = await self.process_with_llm(query)
|
|
240
419
|
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
return {
|
|
247
|
-
"status": "success",
|
|
248
|
-
"output": result
|
|
249
|
-
}
|
|
420
|
+
await self.peers.respond(msg, {"response": result})
|
|
421
|
+
await asyncio.sleep(0.1)
|
|
422
|
+
|
|
423
|
+
async def execute_task(self, task: dict) -> dict:
|
|
424
|
+
"""Required by base class."""
|
|
425
|
+
return {"status": "success"}
|
|
250
426
|
|
|
251
427
|
|
|
252
|
-
class
|
|
253
|
-
|
|
254
|
-
|
|
428
|
+
class AssistantAgent(CustomAgent):
|
|
429
|
+
"""
|
|
430
|
+
Assistant agent - coordinates with other specialists.
|
|
431
|
+
|
|
432
|
+
This agent:
|
|
433
|
+
1. Has its own LLM for reasoning
|
|
434
|
+
2. Has peer tools (ask_peer, broadcast) in its toolset
|
|
435
|
+
3. LLM AUTONOMOUSLY decides when to ask other agents
|
|
436
|
+
"""
|
|
437
|
+
role = "assistant"
|
|
438
|
+
capabilities = ["chat", "coordination", "search"]
|
|
255
439
|
|
|
256
440
|
async def setup(self):
|
|
257
441
|
await super().setup()
|
|
258
|
-
self.llm = MyLLMClient()
|
|
442
|
+
self.llm = MyLLMClient() # Your LLM client
|
|
443
|
+
self.tool_calls = [] # Track tool usage
|
|
444
|
+
|
|
445
|
+
def get_tools(self) -> list:
|
|
446
|
+
"""
|
|
447
|
+
Tools available to THIS agent's LLM.
|
|
448
|
+
|
|
449
|
+
IMPORTANT: This includes PEER TOOLS!
|
|
450
|
+
The LLM sees ask_peer, broadcast_update, list_peers
|
|
451
|
+
and decides when to use them.
|
|
452
|
+
"""
|
|
453
|
+
# Local tools
|
|
454
|
+
tools = [
|
|
455
|
+
{
|
|
456
|
+
"name": "web_search",
|
|
457
|
+
"description": "Search the web for information",
|
|
458
|
+
"input_schema": {
|
|
459
|
+
"type": "object",
|
|
460
|
+
"properties": {
|
|
461
|
+
"query": {"type": "string", "description": "Search query"}
|
|
462
|
+
},
|
|
463
|
+
"required": ["query"]
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
]
|
|
467
|
+
|
|
468
|
+
# ADD PEER TOOLS TO LLM'S TOOLSET
|
|
469
|
+
# This is the key! LLM will see:
|
|
470
|
+
# - ask_peer: Ask another agent for help
|
|
471
|
+
# - broadcast_update: Send message to all peers
|
|
472
|
+
# - list_peers: See available agents
|
|
473
|
+
if self.peers:
|
|
474
|
+
tools.extend(self.peers.as_tool().schema)
|
|
259
475
|
|
|
260
|
-
|
|
261
|
-
"""Context from previous steps is automatically passed."""
|
|
262
|
-
task_desc = task.get("task", "")
|
|
263
|
-
context = task.get("context", {}) # From depends_on steps
|
|
476
|
+
return tools
|
|
264
477
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
478
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
479
|
+
"""
|
|
480
|
+
Execute a tool by name.
|
|
268
481
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
482
|
+
When LLM calls ask_peer, this routes to the peer system.
|
|
483
|
+
"""
|
|
484
|
+
self.tool_calls.append({"tool": tool_name, "args": args})
|
|
485
|
+
|
|
486
|
+
# PEER TOOLS - route to peer system
|
|
487
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
488
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
489
|
+
|
|
490
|
+
# LOCAL TOOLS
|
|
491
|
+
if tool_name == "web_search":
|
|
492
|
+
return f"Search results for '{args.get('query')}': Found 10 articles."
|
|
493
|
+
|
|
494
|
+
return f"Unknown tool: {tool_name}"
|
|
495
|
+
|
|
496
|
+
async def chat(self, user_message: str) -> str:
|
|
497
|
+
"""
|
|
498
|
+
Complete LLM chat with autonomous tool use.
|
|
499
|
+
|
|
500
|
+
The LLM sees all tools (including peer tools) and decides
|
|
501
|
+
which to use. If user asks for analysis, LLM will use
|
|
502
|
+
ask_peer to contact the analyst.
|
|
503
|
+
"""
|
|
504
|
+
# System prompt tells LLM about its capabilities
|
|
505
|
+
system_prompt = """You are a helpful assistant.
|
|
506
|
+
|
|
507
|
+
You have access to these capabilities:
|
|
508
|
+
- web_search: Search the web for information
|
|
509
|
+
- ask_peer: Ask specialist agents for help (e.g., analyst for data analysis)
|
|
510
|
+
- broadcast_update: Send updates to all connected agents
|
|
511
|
+
- list_peers: See what other agents are available
|
|
512
|
+
|
|
513
|
+
When a user needs data analysis, USE ask_peer to ask the analyst.
|
|
514
|
+
When a user needs web information, USE web_search.
|
|
515
|
+
Be concise in your responses."""
|
|
516
|
+
|
|
517
|
+
tools = self.get_tools()
|
|
518
|
+
messages = [{"role": "user", "content": user_message}]
|
|
519
|
+
|
|
520
|
+
# Call LLM - it will decide which tools to use
|
|
521
|
+
response = self.llm.chat(messages, tools=tools, system=system_prompt)
|
|
522
|
+
|
|
523
|
+
# Handle tool use loop
|
|
524
|
+
while response.get("type") == "tool_use":
|
|
525
|
+
tool_name = response["tool_name"]
|
|
526
|
+
tool_args = response["tool_args"]
|
|
527
|
+
|
|
528
|
+
# Execute the tool (might be ask_peer!)
|
|
529
|
+
tool_result = await self.execute_tool(tool_name, tool_args)
|
|
530
|
+
|
|
531
|
+
# Continue conversation with tool result
|
|
532
|
+
response = self.llm.continue_with_tool_result(
|
|
533
|
+
messages, response["tool_use_id"], tool_result, tools
|
|
534
|
+
)
|
|
275
535
|
|
|
276
|
-
|
|
536
|
+
return response.get("content", "")
|
|
537
|
+
|
|
538
|
+
async def run(self):
|
|
539
|
+
"""Main loop - listen for incoming requests."""
|
|
540
|
+
while not self.shutdown_requested:
|
|
541
|
+
if self.peers:
|
|
542
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
543
|
+
if msg and msg.is_request:
|
|
544
|
+
query = msg.data.get("query", "")
|
|
545
|
+
result = await self.chat(query)
|
|
546
|
+
await self.peers.respond(msg, {"response": result})
|
|
547
|
+
await asyncio.sleep(0.1)
|
|
548
|
+
|
|
549
|
+
async def execute_task(self, task: dict) -> dict:
|
|
550
|
+
"""Required by base class."""
|
|
551
|
+
return {"status": "success"}
|
|
552
|
+
```
|
|
277
553
|
|
|
278
554
|
```python
|
|
279
555
|
# main.py
|
|
280
556
|
import asyncio
|
|
281
557
|
from jarviscore import Mesh
|
|
282
|
-
from agents import
|
|
558
|
+
from agents import AnalystAgent, AssistantAgent
|
|
559
|
+
|
|
283
560
|
|
|
284
561
|
async def main():
|
|
285
562
|
mesh = Mesh(
|
|
286
|
-
mode="
|
|
563
|
+
mode="p2p",
|
|
287
564
|
config={
|
|
288
|
-
|
|
289
|
-
|
|
565
|
+
"bind_port": 7950,
|
|
566
|
+
"node_name": "my-agents",
|
|
290
567
|
}
|
|
291
568
|
)
|
|
292
569
|
|
|
293
|
-
|
|
294
|
-
mesh.add(
|
|
570
|
+
# Add both agents
|
|
571
|
+
mesh.add(AnalystAgent)
|
|
572
|
+
assistant = mesh.add(AssistantAgent)
|
|
295
573
|
|
|
296
574
|
await mesh.start()
|
|
297
575
|
|
|
298
|
-
#
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
},
|
|
305
|
-
{
|
|
306
|
-
"id": "write",
|
|
307
|
-
"agent": "writer",
|
|
308
|
-
"task": "Write article about the research",
|
|
309
|
-
"depends_on": ["research"] # Auto-injects context
|
|
310
|
-
}
|
|
311
|
-
])
|
|
576
|
+
# Start analyst listening in background
|
|
577
|
+
analyst = mesh.get_agent("analyst")
|
|
578
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
579
|
+
|
|
580
|
+
# Give time for setup
|
|
581
|
+
await asyncio.sleep(0.5)
|
|
312
582
|
|
|
313
|
-
|
|
314
|
-
print(
|
|
583
|
+
# User asks a question - LLM will autonomously decide to use ask_peer
|
|
584
|
+
print("User: Please analyze the Q4 sales trends")
|
|
585
|
+
response = await assistant.chat("Please analyze the Q4 sales trends")
|
|
586
|
+
print(f"Assistant: {response}")
|
|
315
587
|
|
|
588
|
+
# Check what tools were used
|
|
589
|
+
print(f"\nTools used: {assistant.tool_calls}")
|
|
590
|
+
# Output: [{'tool': 'ask_peer', 'args': {'role': 'analyst', 'question': '...'}}]
|
|
591
|
+
|
|
592
|
+
# Cleanup
|
|
593
|
+
analyst.request_shutdown()
|
|
594
|
+
analyst_task.cancel()
|
|
316
595
|
await mesh.stop()
|
|
317
596
|
|
|
318
|
-
|
|
597
|
+
|
|
598
|
+
if __name__ == "__main__":
|
|
599
|
+
asyncio.run(main())
|
|
319
600
|
```
|
|
320
601
|
|
|
321
|
-
###
|
|
602
|
+
### Key Concepts for P2P Mode
|
|
322
603
|
|
|
323
|
-
|
|
324
|
-
|--------|-------|
|
|
325
|
-
| Manual `context` passing | `depends_on` + automatic injection |
|
|
326
|
-
| Manual orchestration | `mesh.workflow()` handles it |
|
|
327
|
-
| Same process only | Can span multiple machines |
|
|
328
|
-
| No retries | Framework handles failures |
|
|
604
|
+
#### Adding Peer Tools to Your LLM
|
|
329
605
|
|
|
330
|
-
|
|
606
|
+
This is the most important pattern. Add peer tools to `get_tools()`:
|
|
331
607
|
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
608
|
+
```python
|
|
609
|
+
def get_tools(self) -> list:
|
|
610
|
+
tools = [
|
|
611
|
+
# Your local tools...
|
|
612
|
+
]
|
|
336
613
|
|
|
337
|
-
|
|
614
|
+
# ADD PEER TOOLS - LLM will see ask_peer, broadcast, list_peers
|
|
615
|
+
if self.peers:
|
|
616
|
+
tools.extend(self.peers.as_tool().schema)
|
|
338
617
|
|
|
339
|
-
|
|
618
|
+
return tools
|
|
619
|
+
```
|
|
620
|
+
|
|
621
|
+
#### Routing Tool Execution
|
|
340
622
|
|
|
341
|
-
|
|
623
|
+
Route tool calls to either peer tools or local tools:
|
|
342
624
|
|
|
343
|
-
**Machine 1:**
|
|
344
625
|
```python
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
626
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
627
|
+
# Check peer tools first
|
|
628
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
629
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
630
|
+
|
|
631
|
+
# Then local tools
|
|
632
|
+
if tool_name == "my_local_tool":
|
|
633
|
+
return self.my_local_tool(args)
|
|
634
|
+
|
|
635
|
+
return f"Unknown tool: {tool_name}"
|
|
353
636
|
```
|
|
354
637
|
|
|
355
|
-
|
|
638
|
+
#### System Prompt for Peer Awareness
|
|
639
|
+
|
|
640
|
+
Tell the LLM about peer capabilities:
|
|
641
|
+
|
|
356
642
|
```python
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
await mesh.start()
|
|
365
|
-
await mesh.serve_forever()
|
|
643
|
+
system_prompt = """You are a helpful assistant.
|
|
644
|
+
|
|
645
|
+
You have access to:
|
|
646
|
+
- ask_peer: Ask specialist agents for help
|
|
647
|
+
- broadcast_update: Send updates to all agents
|
|
648
|
+
|
|
649
|
+
When a user needs specialized help, USE ask_peer to contact the right agent."""
|
|
366
650
|
```
|
|
367
651
|
|
|
368
|
-
|
|
652
|
+
#### The `run()` Loop
|
|
653
|
+
|
|
654
|
+
Listen for incoming requests and process with LLM:
|
|
655
|
+
|
|
656
|
+
```python
|
|
657
|
+
async def run(self):
|
|
658
|
+
while not self.shutdown_requested:
|
|
659
|
+
if self.peers:
|
|
660
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
661
|
+
if msg and msg.is_request:
|
|
662
|
+
result = await self.process_with_llm(msg.data)
|
|
663
|
+
await self.peers.respond(msg, {"response": result})
|
|
664
|
+
await asyncio.sleep(0.1)
|
|
665
|
+
```
|
|
369
666
|
|
|
370
667
|
---
|
|
371
668
|
|
|
372
|
-
##
|
|
669
|
+
## ListenerAgent (v0.3.0)
|
|
373
670
|
|
|
374
|
-
|
|
375
|
-
|----------|------|
|
|
376
|
-
| Agents run continuously, self-coordinate | **P2P** |
|
|
377
|
-
| Chatbot with specialist agents | **P2P** |
|
|
378
|
-
| Task pipelines with dependencies | **Distributed** |
|
|
379
|
-
| Need workflow orchestration | **Distributed** |
|
|
380
|
-
| Both continuous + workflows | **Distributed** (supports both) |
|
|
671
|
+
**ListenerAgent** is for developers who want P2P communication without writing the `run()` loop themselves.
|
|
381
672
|
|
|
382
|
-
|
|
673
|
+
### The Problem with CustomAgent for P2P
|
|
383
674
|
|
|
384
|
-
|
|
675
|
+
Every P2P CustomAgent needs this boilerplate:
|
|
385
676
|
|
|
386
|
-
### P2P Mode
|
|
387
677
|
```python
|
|
678
|
+
# BEFORE (CustomAgent) - You write the same loop every time
|
|
388
679
|
class MyAgent(CustomAgent):
|
|
389
|
-
role = "
|
|
390
|
-
capabilities = ["
|
|
680
|
+
role = "processor"
|
|
681
|
+
capabilities = ["processing"]
|
|
391
682
|
|
|
392
|
-
async def run(self):
|
|
683
|
+
async def run(self):
|
|
684
|
+
"""You have to write this loop for every P2P agent."""
|
|
393
685
|
while not self.shutdown_requested:
|
|
394
|
-
|
|
395
|
-
|
|
686
|
+
if self.peers:
|
|
687
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
688
|
+
if msg and msg.is_request:
|
|
689
|
+
# Handle request
|
|
690
|
+
result = self.process(msg.data)
|
|
691
|
+
await self.peers.respond(msg, {"response": result})
|
|
692
|
+
elif msg and msg.is_notify:
|
|
693
|
+
# Handle notification
|
|
694
|
+
self.handle_notify(msg.data)
|
|
695
|
+
await asyncio.sleep(0.1)
|
|
396
696
|
|
|
397
|
-
|
|
398
|
-
|
|
697
|
+
async def execute_task(self, task):
|
|
698
|
+
"""Still required even though you're using run()."""
|
|
699
|
+
return {"status": "success"}
|
|
399
700
|
```
|
|
400
701
|
|
|
401
|
-
###
|
|
702
|
+
### The Solution: ListenerAgent
|
|
703
|
+
|
|
402
704
|
```python
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
705
|
+
# AFTER (ListenerAgent) - Just implement the handlers
|
|
706
|
+
from jarviscore.profiles import ListenerAgent
|
|
707
|
+
|
|
708
|
+
class MyAgent(ListenerAgent):
|
|
709
|
+
role = "processor"
|
|
710
|
+
capabilities = ["processing"]
|
|
711
|
+
|
|
712
|
+
async def on_peer_request(self, msg):
|
|
713
|
+
"""Called when another agent sends a request."""
|
|
714
|
+
return {"result": msg.data.get("task", "").upper()}
|
|
715
|
+
|
|
716
|
+
async def on_peer_notify(self, msg):
|
|
717
|
+
"""Called when another agent broadcasts a notification."""
|
|
718
|
+
print(f"Notification received: {msg.data}")
|
|
719
|
+
```
|
|
720
|
+
|
|
721
|
+
**What you no longer need:**
|
|
722
|
+
- ❌ `run()` loop with `while not self.shutdown_requested`
|
|
723
|
+
- ❌ `self.peers.receive()` and `self.peers.respond()` boilerplate
|
|
724
|
+
- ❌ `execute_task()` stub method
|
|
725
|
+
- ❌ `asyncio.sleep()` timing
|
|
726
|
+
|
|
727
|
+
**What the framework handles:**
|
|
728
|
+
- ✅ Message receiving loop
|
|
729
|
+
- ✅ Routing requests to `on_peer_request()`
|
|
730
|
+
- ✅ Routing notifications to `on_peer_notify()`
|
|
731
|
+
- ✅ Automatic response sending
|
|
732
|
+
- ✅ Shutdown handling
|
|
733
|
+
|
|
734
|
+
### Complete ListenerAgent Example
|
|
735
|
+
|
|
736
|
+
```python
|
|
737
|
+
# agents.py
|
|
738
|
+
from jarviscore.profiles import ListenerAgent
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
class AnalystAgent(ListenerAgent):
|
|
742
|
+
"""A data analyst that responds to peer requests."""
|
|
743
|
+
|
|
744
|
+
role = "analyst"
|
|
745
|
+
capabilities = ["analysis", "data_interpretation"]
|
|
746
|
+
|
|
747
|
+
async def setup(self):
|
|
748
|
+
await super().setup()
|
|
749
|
+
self.llm = MyLLMClient() # Your LLM client
|
|
750
|
+
|
|
751
|
+
async def on_peer_request(self, msg):
|
|
752
|
+
"""
|
|
753
|
+
Handle incoming requests from other agents.
|
|
754
|
+
|
|
755
|
+
Args:
|
|
756
|
+
msg: IncomingMessage with msg.data, msg.sender_role, etc.
|
|
757
|
+
|
|
758
|
+
Returns:
|
|
759
|
+
dict: Response sent back to the requesting agent
|
|
760
|
+
"""
|
|
761
|
+
query = msg.data.get("question", "")
|
|
762
|
+
|
|
763
|
+
# Your analysis logic
|
|
764
|
+
result = self.llm.chat(f"Analyze: {query}")
|
|
406
765
|
|
|
407
|
-
|
|
408
|
-
# Your logic
|
|
409
|
-
return {"status": "success", "output": result}
|
|
766
|
+
return {"response": result, "status": "success"}
|
|
410
767
|
|
|
411
|
-
|
|
412
|
-
|
|
768
|
+
async def on_peer_notify(self, msg):
|
|
769
|
+
"""
|
|
770
|
+
Handle broadcast notifications.
|
|
771
|
+
|
|
772
|
+
Args:
|
|
773
|
+
msg: IncomingMessage with notification data
|
|
774
|
+
|
|
775
|
+
Returns:
|
|
776
|
+
None (notifications don't expect responses)
|
|
777
|
+
"""
|
|
778
|
+
print(f"[{self.role}] Received notification: {msg.data}")
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
class AssistantAgent(ListenerAgent):
|
|
782
|
+
"""An assistant that coordinates with specialists."""
|
|
783
|
+
|
|
784
|
+
role = "assistant"
|
|
785
|
+
capabilities = ["chat", "coordination"]
|
|
786
|
+
|
|
787
|
+
async def setup(self):
|
|
788
|
+
await super().setup()
|
|
789
|
+
self.llm = MyLLMClient()
|
|
790
|
+
|
|
791
|
+
async def on_peer_request(self, msg):
|
|
792
|
+
"""Handle incoming chat requests."""
|
|
793
|
+
query = msg.data.get("query", "")
|
|
794
|
+
|
|
795
|
+
# Use peer tools to ask specialists
|
|
796
|
+
if self.peers and "data" in query.lower():
|
|
797
|
+
# Ask the analyst for help
|
|
798
|
+
analyst_response = await self.peers.as_tool().execute(
|
|
799
|
+
"ask_peer",
|
|
800
|
+
{"role": "analyst", "question": query}
|
|
801
|
+
)
|
|
802
|
+
return {"response": analyst_response.get("response", "")}
|
|
803
|
+
|
|
804
|
+
# Handle directly
|
|
805
|
+
return {"response": self.llm.chat(query)}
|
|
806
|
+
```
|
|
807
|
+
|
|
808
|
+
```python
|
|
809
|
+
# main.py
|
|
810
|
+
import asyncio
|
|
811
|
+
from jarviscore import Mesh
|
|
812
|
+
from agents import AnalystAgent, AssistantAgent
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
async def main():
|
|
816
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7950})
|
|
817
|
+
|
|
818
|
+
mesh.add(AnalystAgent)
|
|
819
|
+
mesh.add(AssistantAgent)
|
|
820
|
+
|
|
821
|
+
await mesh.start()
|
|
822
|
+
|
|
823
|
+
# Agents automatically run their listeners
|
|
824
|
+
await mesh.run_forever()
|
|
825
|
+
|
|
826
|
+
|
|
827
|
+
if __name__ == "__main__":
|
|
828
|
+
asyncio.run(main())
|
|
413
829
|
```
|
|
414
830
|
|
|
415
|
-
|
|
831
|
+
### When to Use ListenerAgent vs CustomAgent
|
|
832
|
+
|
|
833
|
+
| Use ListenerAgent when... | Use CustomAgent when... |
|
|
834
|
+
|---------------------------|-------------------------|
|
|
835
|
+
| You want the simplest P2P agent | You need custom message loop timing |
|
|
836
|
+
| Request/response pattern fits your use case | You need to initiate messages proactively |
|
|
837
|
+
| You're integrating with FastAPI | You need fine-grained control over the loop |
|
|
838
|
+
| You want less boilerplate | You have complex coordination logic |
|
|
839
|
+
|
|
840
|
+
### ListenerAgent with FastAPI
|
|
841
|
+
|
|
842
|
+
ListenerAgent shines with FastAPI integration. See [FastAPI Integration](#fastapi-integration-v030) below.
|
|
843
|
+
|
|
844
|
+
---
|
|
845
|
+
|
|
846
|
+
## Distributed Mode
|
|
847
|
+
|
|
848
|
+
Distributed mode is for task pipelines where the framework orchestrates execution order and passes data between steps.
|
|
849
|
+
|
|
850
|
+
### Migration Overview
|
|
851
|
+
|
|
852
|
+
```
|
|
853
|
+
YOUR PROJECT STRUCTURE
|
|
854
|
+
──────────────────────────────────────────────────────────────────
|
|
855
|
+
|
|
856
|
+
BEFORE (standalone): AFTER (with JarvisCore):
|
|
857
|
+
├── pipeline.py ├── agents.py ← Modified agent code
|
|
858
|
+
└── (manual orchestration) └── main.py ← NEW entry point
|
|
859
|
+
▲
|
|
860
|
+
│
|
|
861
|
+
This is now how you
|
|
862
|
+
start your pipeline
|
|
863
|
+
```
|
|
864
|
+
|
|
865
|
+
### Step 1: Install the Framework
|
|
866
|
+
|
|
867
|
+
```bash
|
|
868
|
+
pip install jarviscore-framework
|
|
869
|
+
```
|
|
870
|
+
|
|
871
|
+
### Step 2: Your Existing Code (Before)
|
|
872
|
+
|
|
873
|
+
Let's say you have a manual pipeline like this:
|
|
874
|
+
|
|
875
|
+
```python
|
|
876
|
+
# pipeline.py (YOUR EXISTING CODE)
|
|
877
|
+
class Researcher:
|
|
878
|
+
def execute(self, task: str) -> dict:
|
|
879
|
+
return {"output": f"Research on: {task}"}
|
|
880
|
+
|
|
881
|
+
class Writer:
|
|
882
|
+
def execute(self, task: str, context: dict = None) -> dict:
|
|
883
|
+
return {"output": f"Article based on: {context}"}
|
|
884
|
+
|
|
885
|
+
# Manual orchestration - you pass data between steps yourself:
|
|
886
|
+
if __name__ == "__main__":
|
|
887
|
+
researcher = Researcher()
|
|
888
|
+
writer = Writer()
|
|
889
|
+
|
|
890
|
+
research = researcher.execute("AI trends")
|
|
891
|
+
article = writer.execute("Write article", context=research) # Manual!
|
|
892
|
+
print(article)
|
|
893
|
+
```
|
|
894
|
+
|
|
895
|
+
**Problems with this approach:**
|
|
896
|
+
- You manually pass context between steps
|
|
897
|
+
- No dependency management
|
|
898
|
+
- Hard to run on multiple machines
|
|
899
|
+
- No automatic retries on failure
|
|
900
|
+
|
|
901
|
+
### Step 3: Modify Your Agent Code → `agents.py`
|
|
902
|
+
|
|
903
|
+
Convert your existing classes to inherit from `CustomAgent`:
|
|
904
|
+
|
|
905
|
+
```python
|
|
906
|
+
# agents.py (MODIFIED VERSION OF YOUR CODE)
|
|
907
|
+
from jarviscore.profiles import CustomAgent
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
class ResearcherAgent(CustomAgent):
|
|
911
|
+
"""Your researcher, now framework-integrated."""
|
|
912
|
+
|
|
913
|
+
# NEW: Required class attributes
|
|
914
|
+
role = "researcher"
|
|
915
|
+
capabilities = ["research"]
|
|
916
|
+
|
|
917
|
+
async def setup(self):
|
|
918
|
+
"""NEW: Called once on startup."""
|
|
919
|
+
await super().setup()
|
|
920
|
+
# Your initialization here (DB connections, LLM clients, etc.)
|
|
921
|
+
|
|
922
|
+
async def execute_task(self, task: dict) -> dict:
|
|
923
|
+
"""
|
|
924
|
+
MODIFIED: Now receives a task dict, returns a result dict.
|
|
925
|
+
|
|
926
|
+
The framework calls this method - you don't call it manually.
|
|
927
|
+
"""
|
|
928
|
+
task_desc = task.get("task", "")
|
|
929
|
+
|
|
930
|
+
# YOUR EXISTING LOGIC:
|
|
931
|
+
result = f"Research on: {task_desc}"
|
|
932
|
+
|
|
933
|
+
# NEW: Return format for framework
|
|
934
|
+
return {
|
|
935
|
+
"status": "success",
|
|
936
|
+
"output": result
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
|
|
940
|
+
class WriterAgent(CustomAgent):
|
|
941
|
+
"""Your writer, now framework-integrated."""
|
|
942
|
+
|
|
943
|
+
role = "writer"
|
|
944
|
+
capabilities = ["writing"]
|
|
945
|
+
|
|
946
|
+
async def setup(self):
|
|
947
|
+
await super().setup()
|
|
948
|
+
|
|
949
|
+
async def execute_task(self, task: dict) -> dict:
|
|
950
|
+
"""
|
|
951
|
+
Context from previous steps is AUTOMATICALLY injected.
|
|
952
|
+
No more manual passing!
|
|
953
|
+
"""
|
|
954
|
+
task_desc = task.get("task", "")
|
|
955
|
+
context = task.get("context", {}) # ← Framework injects this!
|
|
956
|
+
|
|
957
|
+
# YOUR EXISTING LOGIC:
|
|
958
|
+
research_output = context.get("research", {}).get("output", "")
|
|
959
|
+
result = f"Article based on: {research_output}"
|
|
960
|
+
|
|
961
|
+
return {
|
|
962
|
+
"status": "success",
|
|
963
|
+
"output": result
|
|
964
|
+
}
|
|
965
|
+
```
|
|
966
|
+
|
|
967
|
+
**What changed:**
|
|
968
|
+
|
|
969
|
+
| Before | After |
|
|
970
|
+
|--------|-------|
|
|
971
|
+
| `class Researcher:` | `class ResearcherAgent(CustomAgent):` |
|
|
972
|
+
| `def execute(self, task):` | `async def execute_task(self, task: dict):` |
|
|
973
|
+
| Return anything | Return `{"status": "...", "output": ...}` |
|
|
974
|
+
| Manual `context=research` | Framework auto-injects via `depends_on` |
|
|
975
|
+
|
|
976
|
+
### Step 4: Create New Entry Point → `main.py`
|
|
977
|
+
|
|
978
|
+
**This is your NEW main file.** Instead of running `python pipeline.py`, you'll run `python main.py`.
|
|
979
|
+
|
|
980
|
+
```python
|
|
981
|
+
# main.py (NEW FILE - YOUR NEW ENTRY POINT)
|
|
982
|
+
import asyncio
|
|
983
|
+
from jarviscore import Mesh
|
|
984
|
+
from agents import ResearcherAgent, WriterAgent
|
|
985
|
+
|
|
986
|
+
|
|
987
|
+
async def main():
|
|
988
|
+
# Create the mesh network
|
|
989
|
+
mesh = Mesh(
|
|
990
|
+
mode="distributed",
|
|
991
|
+
config={
|
|
992
|
+
"bind_port": 7950,
|
|
993
|
+
"node_name": "pipeline-node",
|
|
994
|
+
}
|
|
995
|
+
)
|
|
996
|
+
|
|
997
|
+
# Register your agents
|
|
998
|
+
mesh.add(ResearcherAgent)
|
|
999
|
+
mesh.add(WriterAgent)
|
|
1000
|
+
|
|
1001
|
+
# Start the mesh (calls setup() on all agents)
|
|
1002
|
+
await mesh.start()
|
|
1003
|
+
|
|
1004
|
+
# Define your workflow - framework handles orchestration!
|
|
1005
|
+
results = await mesh.workflow("content-pipeline", [
|
|
1006
|
+
{
|
|
1007
|
+
"id": "research", # Step identifier
|
|
1008
|
+
"agent": "researcher", # Which agent handles this
|
|
1009
|
+
"task": "AI trends 2024" # Task description
|
|
1010
|
+
},
|
|
1011
|
+
{
|
|
1012
|
+
"id": "write",
|
|
1013
|
+
"agent": "writer",
|
|
1014
|
+
"task": "Write a blog post",
|
|
1015
|
+
"depends_on": ["research"] # ← Framework auto-injects research output!
|
|
1016
|
+
}
|
|
1017
|
+
])
|
|
1018
|
+
|
|
1019
|
+
# Results in workflow order
|
|
1020
|
+
print("Research:", results[0]["output"])
|
|
1021
|
+
print("Article:", results[1]["output"])
|
|
1022
|
+
|
|
1023
|
+
await mesh.stop()
|
|
1024
|
+
|
|
1025
|
+
|
|
1026
|
+
if __name__ == "__main__":
|
|
1027
|
+
asyncio.run(main())
|
|
1028
|
+
```
|
|
1029
|
+
|
|
1030
|
+
**Why a new entry file?**
|
|
1031
|
+
|
|
1032
|
+
| Reason | Explanation |
|
|
1033
|
+
|--------|-------------|
|
|
1034
|
+
| **Workflow orchestration** | `mesh.workflow()` handles dependencies, ordering, retries |
|
|
1035
|
+
| **No manual context passing** | `depends_on` automatically injects previous step outputs |
|
|
1036
|
+
| **Multiple agents** | Register all agents in one place |
|
|
1037
|
+
| **Multi-node ready** | Same code works across machines with `seed_nodes` config |
|
|
1038
|
+
| **Clean separation** | Agent logic in `agents.py`, orchestration in `main.py` |
|
|
1039
|
+
|
|
1040
|
+
### Step 5: Run Your Pipeline
|
|
1041
|
+
|
|
1042
|
+
```bash
|
|
1043
|
+
# OLD WAY (no longer used):
|
|
1044
|
+
# python pipeline.py
|
|
1045
|
+
|
|
1046
|
+
# NEW WAY:
|
|
1047
|
+
python main.py
|
|
1048
|
+
```
|
|
1049
|
+
|
|
1050
|
+
---
|
|
1051
|
+
|
|
1052
|
+
### Complete Example: Three-Stage Content Pipeline
|
|
1053
|
+
|
|
1054
|
+
This example shows a research → write → review pipeline.
|
|
1055
|
+
|
|
1056
|
+
```python
|
|
1057
|
+
# agents.py
|
|
1058
|
+
from jarviscore.profiles import CustomAgent
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
class ResearcherAgent(CustomAgent):
|
|
1062
|
+
"""Researches topics and returns findings."""
|
|
1063
|
+
|
|
1064
|
+
role = "researcher"
|
|
1065
|
+
capabilities = ["research"]
|
|
1066
|
+
|
|
1067
|
+
async def setup(self):
|
|
1068
|
+
await super().setup()
|
|
1069
|
+
# self.llm = MyLLMClient()
|
|
1070
|
+
|
|
1071
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1072
|
+
topic = task.get("task", "")
|
|
1073
|
+
|
|
1074
|
+
# Your research logic
|
|
1075
|
+
findings = f"Research findings on: {topic}"
|
|
1076
|
+
# findings = self.llm.chat(f"Research: {topic}")
|
|
1077
|
+
|
|
1078
|
+
return {
|
|
1079
|
+
"status": "success",
|
|
1080
|
+
"output": findings
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
|
|
1084
|
+
class WriterAgent(CustomAgent):
|
|
1085
|
+
"""Writes content based on research."""
|
|
1086
|
+
|
|
1087
|
+
role = "writer"
|
|
1088
|
+
capabilities = ["writing"]
|
|
1089
|
+
|
|
1090
|
+
async def setup(self):
|
|
1091
|
+
await super().setup()
|
|
1092
|
+
# self.llm = MyLLMClient()
|
|
1093
|
+
|
|
1094
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1095
|
+
instruction = task.get("task", "")
|
|
1096
|
+
context = task.get("context", {}) # Output from depends_on steps
|
|
1097
|
+
|
|
1098
|
+
# Combine context from previous steps
|
|
1099
|
+
research = context.get("research", {}).get("output", "")
|
|
1100
|
+
|
|
1101
|
+
# Your writing logic
|
|
1102
|
+
article = f"Article based on: {research}\nTopic: {instruction}"
|
|
1103
|
+
# article = self.llm.chat(f"Based on: {research}\nWrite: {instruction}")
|
|
1104
|
+
|
|
1105
|
+
return {
|
|
1106
|
+
"status": "success",
|
|
1107
|
+
"output": article
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
|
|
1111
|
+
class EditorAgent(CustomAgent):
|
|
1112
|
+
"""Reviews and polishes content."""
|
|
1113
|
+
|
|
1114
|
+
role = "editor"
|
|
1115
|
+
capabilities = ["editing", "review"]
|
|
1116
|
+
|
|
1117
|
+
async def setup(self):
|
|
1118
|
+
await super().setup()
|
|
1119
|
+
|
|
1120
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1121
|
+
instruction = task.get("task", "")
|
|
1122
|
+
context = task.get("context", {})
|
|
1123
|
+
|
|
1124
|
+
# Get output from the writing step
|
|
1125
|
+
draft = context.get("write", {}).get("output", "")
|
|
1126
|
+
|
|
1127
|
+
# Your editing logic
|
|
1128
|
+
polished = f"[EDITED] {draft}"
|
|
1129
|
+
|
|
1130
|
+
return {
|
|
1131
|
+
"status": "success",
|
|
1132
|
+
"output": polished
|
|
1133
|
+
}
|
|
1134
|
+
```
|
|
1135
|
+
|
|
1136
|
+
```python
|
|
1137
|
+
# main.py
|
|
1138
|
+
import asyncio
|
|
1139
|
+
from jarviscore import Mesh
|
|
1140
|
+
from agents import ResearcherAgent, WriterAgent, EditorAgent
|
|
1141
|
+
|
|
1142
|
+
|
|
1143
|
+
async def main():
|
|
1144
|
+
mesh = Mesh(
|
|
1145
|
+
mode="distributed",
|
|
1146
|
+
config={
|
|
1147
|
+
"bind_port": 7950,
|
|
1148
|
+
"node_name": "content-node",
|
|
1149
|
+
}
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
mesh.add(ResearcherAgent)
|
|
1153
|
+
mesh.add(WriterAgent)
|
|
1154
|
+
mesh.add(EditorAgent)
|
|
1155
|
+
|
|
1156
|
+
await mesh.start()
|
|
1157
|
+
|
|
1158
|
+
# Define a multi-step workflow with dependencies
|
|
1159
|
+
results = await mesh.workflow("content-pipeline", [
|
|
1160
|
+
{
|
|
1161
|
+
"id": "research", # Unique step identifier
|
|
1162
|
+
"agent": "researcher", # Which agent handles this
|
|
1163
|
+
"task": "AI trends in 2024" # Task description
|
|
1164
|
+
},
|
|
1165
|
+
{
|
|
1166
|
+
"id": "write",
|
|
1167
|
+
"agent": "writer",
|
|
1168
|
+
"task": "Write a blog post about the research",
|
|
1169
|
+
"depends_on": ["research"] # Wait for research, inject its output
|
|
1170
|
+
},
|
|
1171
|
+
{
|
|
1172
|
+
"id": "edit",
|
|
1173
|
+
"agent": "editor",
|
|
1174
|
+
"task": "Polish and improve the article",
|
|
1175
|
+
"depends_on": ["write"] # Wait for writing step
|
|
1176
|
+
}
|
|
1177
|
+
])
|
|
1178
|
+
|
|
1179
|
+
# Results are in workflow order
|
|
1180
|
+
print("Research:", results[0]["output"])
|
|
1181
|
+
print("Draft:", results[1]["output"])
|
|
1182
|
+
print("Final:", results[2]["output"])
|
|
1183
|
+
|
|
1184
|
+
await mesh.stop()
|
|
1185
|
+
|
|
1186
|
+
|
|
1187
|
+
if __name__ == "__main__":
|
|
1188
|
+
asyncio.run(main())
|
|
1189
|
+
```
|
|
1190
|
+
|
|
1191
|
+
### Key Concepts for Distributed Mode
|
|
1192
|
+
|
|
1193
|
+
#### The `execute_task()` Method
|
|
1194
|
+
|
|
1195
|
+
Called by the workflow engine when a task is assigned to your agent.
|
|
1196
|
+
|
|
1197
|
+
```python
|
|
1198
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1199
|
+
# task dict contains:
|
|
1200
|
+
# - "id": str - the step ID from the workflow
|
|
1201
|
+
# - "task": str - the task description
|
|
1202
|
+
# - "context": dict - outputs from depends_on steps (keyed by step ID)
|
|
1203
|
+
|
|
1204
|
+
return {
|
|
1205
|
+
"status": "success", # or "error"
|
|
1206
|
+
"output": result, # your result data
|
|
1207
|
+
# "error": "message" # if status is "error"
|
|
1208
|
+
}
|
|
1209
|
+
```
|
|
1210
|
+
|
|
1211
|
+
#### The `task` Dictionary Structure
|
|
1212
|
+
|
|
1213
|
+
```python
|
|
1214
|
+
{
|
|
1215
|
+
"id": "step_id", # Step identifier from workflow
|
|
1216
|
+
"task": "task description", # What to do
|
|
1217
|
+
"context": { # Outputs from dependencies
|
|
1218
|
+
"previous_step_id": {
|
|
1219
|
+
"status": "success",
|
|
1220
|
+
"output": "..." # Whatever previous step returned
|
|
1221
|
+
}
|
|
1222
|
+
}
|
|
1223
|
+
}
|
|
1224
|
+
```
|
|
1225
|
+
|
|
1226
|
+
#### Workflow Step Definition
|
|
1227
|
+
|
|
1228
|
+
```python
|
|
1229
|
+
{
|
|
1230
|
+
"id": "unique_step_id", # Required: unique identifier
|
|
1231
|
+
"agent": "agent_role", # Required: which agent handles this
|
|
1232
|
+
"task": "description", # Required: task description
|
|
1233
|
+
"depends_on": ["step1", ...] # Optional: steps that must complete first
|
|
1234
|
+
}
|
|
1235
|
+
```
|
|
1236
|
+
|
|
1237
|
+
#### Parallel Execution
|
|
1238
|
+
|
|
1239
|
+
Steps without `depends_on` or with satisfied dependencies run in parallel:
|
|
1240
|
+
|
|
1241
|
+
```python
|
|
1242
|
+
results = await mesh.workflow("parallel-example", [
|
|
1243
|
+
{"id": "a", "agent": "worker", "task": "Task A"}, # Runs immediately
|
|
1244
|
+
{"id": "b", "agent": "worker", "task": "Task B"}, # Runs in parallel with A
|
|
1245
|
+
{"id": "c", "agent": "worker", "task": "Task C",
|
|
1246
|
+
"depends_on": ["a", "b"]}, # Waits for A and B
|
|
1247
|
+
])
|
|
1248
|
+
```
|
|
1249
|
+
|
|
1250
|
+
---
|
|
1251
|
+
|
|
1252
|
+
## Cognitive Discovery (v0.3.0)
|
|
1253
|
+
|
|
1254
|
+
**Cognitive Discovery** lets your LLM dynamically learn about available peers instead of hardcoding agent names in prompts.
|
|
1255
|
+
|
|
1256
|
+
### The Problem: Hardcoded Peer Names
|
|
1257
|
+
|
|
1258
|
+
Before v0.3.0, you had to hardcode peer information in your system prompts:
|
|
1259
|
+
|
|
1260
|
+
```python
|
|
1261
|
+
# BEFORE: Hardcoded peer names - breaks when peers change
|
|
1262
|
+
system_prompt = """You are a helpful assistant.
|
|
1263
|
+
|
|
1264
|
+
You have access to:
|
|
1265
|
+
- ask_peer: Ask specialist agents for help
|
|
1266
|
+
- Use role="analyst" for data analysis
|
|
1267
|
+
- Use role="researcher" for research tasks
|
|
1268
|
+
- Use role="writer" for content creation
|
|
1269
|
+
|
|
1270
|
+
When a user needs data analysis, USE ask_peer with role="analyst"."""
|
|
1271
|
+
```
|
|
1272
|
+
|
|
1273
|
+
**Problems:**
|
|
1274
|
+
- If you add a new agent, you must update every prompt
|
|
1275
|
+
- If an agent is offline, the LLM still tries to call it
|
|
1276
|
+
- Prompts become stale as your system evolves
|
|
1277
|
+
- Difficult to manage across many agents
|
|
1278
|
+
|
|
1279
|
+
### The Solution: `get_cognitive_context()`
|
|
1280
|
+
|
|
1281
|
+
```python
|
|
1282
|
+
# AFTER: Dynamic peer awareness - always up to date
|
|
1283
|
+
async def get_system_prompt(self) -> str:
|
|
1284
|
+
base_prompt = """You are a helpful assistant.
|
|
1285
|
+
|
|
1286
|
+
You have access to peer tools for collaborating with other agents."""
|
|
1287
|
+
|
|
1288
|
+
# Generate LLM-ready peer descriptions dynamically
|
|
1289
|
+
if self.peers:
|
|
1290
|
+
peer_context = self.peers.get_cognitive_context()
|
|
1291
|
+
return f"{base_prompt}\n\n{peer_context}"
|
|
1292
|
+
|
|
1293
|
+
return base_prompt
|
|
1294
|
+
```
|
|
1295
|
+
|
|
1296
|
+
The `get_cognitive_context()` method generates text like:
|
|
1297
|
+
|
|
1298
|
+
```
|
|
1299
|
+
Available Peers:
|
|
1300
|
+
- analyst (capabilities: analysis, data_interpretation)
|
|
1301
|
+
Use ask_peer with role="analyst" for data analysis tasks
|
|
1302
|
+
- researcher (capabilities: research, web_search)
|
|
1303
|
+
Use ask_peer with role="researcher" for research tasks
|
|
1304
|
+
```
|
|
1305
|
+
|
|
1306
|
+
### Complete Example: Dynamic Peer Discovery
|
|
1307
|
+
|
|
1308
|
+
```python
|
|
1309
|
+
# agents.py
|
|
1310
|
+
from jarviscore.profiles import CustomAgent
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
class AssistantAgent(CustomAgent):
|
|
1314
|
+
"""An assistant that dynamically discovers and uses peers."""
|
|
1315
|
+
|
|
1316
|
+
role = "assistant"
|
|
1317
|
+
capabilities = ["chat", "coordination"]
|
|
1318
|
+
|
|
1319
|
+
async def setup(self):
|
|
1320
|
+
await super().setup()
|
|
1321
|
+
self.llm = MyLLMClient()
|
|
1322
|
+
|
|
1323
|
+
def get_system_prompt(self) -> str:
|
|
1324
|
+
"""Build system prompt with dynamic peer context."""
|
|
1325
|
+
base_prompt = """You are a helpful AI assistant.
|
|
1326
|
+
|
|
1327
|
+
When users ask questions that require specialized knowledge:
|
|
1328
|
+
1. Check what peers are available
|
|
1329
|
+
2. Use ask_peer to get help from the right specialist
|
|
1330
|
+
3. Synthesize their response for the user"""
|
|
1331
|
+
|
|
1332
|
+
# DYNAMIC: Add current peer information
|
|
1333
|
+
if self.peers:
|
|
1334
|
+
peer_context = self.peers.get_cognitive_context()
|
|
1335
|
+
return f"{base_prompt}\n\n{peer_context}"
|
|
1336
|
+
|
|
1337
|
+
return base_prompt
|
|
1338
|
+
|
|
1339
|
+
def get_tools(self) -> list:
|
|
1340
|
+
"""Get tools including peer tools."""
|
|
1341
|
+
tools = [
|
|
1342
|
+
# Your local tools...
|
|
1343
|
+
]
|
|
1344
|
+
|
|
1345
|
+
if self.peers:
|
|
1346
|
+
tools.extend(self.peers.as_tool().schema)
|
|
1347
|
+
|
|
1348
|
+
return tools
|
|
1349
|
+
|
|
1350
|
+
async def chat(self, user_message: str) -> str:
|
|
1351
|
+
"""Chat with dynamic peer awareness."""
|
|
1352
|
+
# System prompt now includes current peer info
|
|
1353
|
+
system = self.get_system_prompt()
|
|
1354
|
+
tools = self.get_tools()
|
|
1355
|
+
|
|
1356
|
+
response = self.llm.chat(
|
|
1357
|
+
messages=[{"role": "user", "content": user_message}],
|
|
1358
|
+
tools=tools,
|
|
1359
|
+
system=system
|
|
1360
|
+
)
|
|
1361
|
+
|
|
1362
|
+
# Handle tool use...
|
|
1363
|
+
return response.get("content", "")
|
|
1364
|
+
```
|
|
1365
|
+
|
|
1366
|
+
### Benefits of Cognitive Discovery
|
|
1367
|
+
|
|
1368
|
+
| Before (Hardcoded) | After (Dynamic) |
|
|
1369
|
+
|--------------------|-----------------|
|
|
1370
|
+
| Update prompts manually when peers change | Prompts auto-update |
|
|
1371
|
+
| LLM tries to call offline agents | Only shows available agents |
|
|
1372
|
+
| Difficult to manage at scale | Scales automatically |
|
|
1373
|
+
| Stale documentation in prompts | Always current |
|
|
1374
|
+
|
|
1375
|
+
---
|
|
1376
|
+
|
|
1377
|
+
## FastAPI Integration (v0.3.0)
|
|
1378
|
+
|
|
1379
|
+
**JarvisLifespan** reduces FastAPI integration from ~100 lines to 3 lines.
|
|
1380
|
+
|
|
1381
|
+
### The Problem: Manual Lifecycle Management
|
|
1382
|
+
|
|
1383
|
+
Before v0.3.0, integrating an agent with FastAPI required manual lifecycle management:
|
|
1384
|
+
|
|
1385
|
+
```python
|
|
1386
|
+
# BEFORE: ~100 lines of boilerplate
|
|
1387
|
+
from contextlib import asynccontextmanager
|
|
1388
|
+
from fastapi import FastAPI
|
|
1389
|
+
from jarviscore import Mesh
|
|
1390
|
+
from jarviscore.profiles import CustomAgent
|
|
1391
|
+
import asyncio
|
|
1392
|
+
|
|
1393
|
+
|
|
1394
|
+
class MyAgent(CustomAgent):
|
|
1395
|
+
role = "processor"
|
|
1396
|
+
capabilities = ["processing"]
|
|
1397
|
+
|
|
1398
|
+
async def run(self):
|
|
1399
|
+
while not self.shutdown_requested:
|
|
1400
|
+
if self.peers:
|
|
1401
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
1402
|
+
if msg and msg.is_request:
|
|
1403
|
+
result = self.process(msg.data)
|
|
1404
|
+
await self.peers.respond(msg, {"response": result})
|
|
1405
|
+
await asyncio.sleep(0.1)
|
|
1406
|
+
|
|
1407
|
+
async def execute_task(self, task):
|
|
1408
|
+
return {"status": "success"}
|
|
1409
|
+
|
|
1410
|
+
|
|
1411
|
+
# Manual lifecycle management
|
|
1412
|
+
mesh = None
|
|
1413
|
+
agent = None
|
|
1414
|
+
run_task = None
|
|
1415
|
+
|
|
1416
|
+
|
|
1417
|
+
@asynccontextmanager
|
|
1418
|
+
async def lifespan(app: FastAPI):
|
|
1419
|
+
global mesh, agent, run_task
|
|
1420
|
+
|
|
1421
|
+
# Startup
|
|
1422
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7950})
|
|
1423
|
+
agent = mesh.add(MyAgent)
|
|
1424
|
+
await mesh.start()
|
|
1425
|
+
run_task = asyncio.create_task(agent.run())
|
|
1426
|
+
|
|
1427
|
+
yield
|
|
1428
|
+
|
|
1429
|
+
# Shutdown
|
|
1430
|
+
agent.request_shutdown()
|
|
1431
|
+
run_task.cancel()
|
|
1432
|
+
await mesh.stop()
|
|
1433
|
+
|
|
1434
|
+
|
|
1435
|
+
app = FastAPI(lifespan=lifespan)
|
|
1436
|
+
|
|
1437
|
+
|
|
1438
|
+
@app.post("/process")
|
|
1439
|
+
async def process(data: dict):
|
|
1440
|
+
# Your endpoint logic
|
|
1441
|
+
return {"result": "processed"}
|
|
1442
|
+
```
|
|
1443
|
+
|
|
1444
|
+
### The Solution: JarvisLifespan
|
|
1445
|
+
|
|
1446
|
+
```python
|
|
1447
|
+
# AFTER: 3 lines to integrate
|
|
1448
|
+
from fastapi import FastAPI
|
|
1449
|
+
from jarviscore.profiles import ListenerAgent
|
|
1450
|
+
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
1451
|
+
|
|
1452
|
+
|
|
1453
|
+
class ProcessorAgent(ListenerAgent):
|
|
1454
|
+
role = "processor"
|
|
1455
|
+
capabilities = ["processing"]
|
|
1456
|
+
|
|
1457
|
+
async def on_peer_request(self, msg):
|
|
1458
|
+
return {"result": msg.data.get("task", "").upper()}
|
|
1459
|
+
|
|
1460
|
+
|
|
1461
|
+
# That's it - 3 lines!
|
|
1462
|
+
app = FastAPI(lifespan=JarvisLifespan(ProcessorAgent(), mode="p2p"))
|
|
1463
|
+
|
|
1464
|
+
|
|
1465
|
+
@app.post("/process")
|
|
1466
|
+
async def process(data: dict):
|
|
1467
|
+
return {"result": "processed"}
|
|
1468
|
+
```
|
|
1469
|
+
|
|
1470
|
+
### JarvisLifespan Configuration
|
|
1471
|
+
|
|
1472
|
+
```python
|
|
1473
|
+
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
1474
|
+
|
|
1475
|
+
# Basic usage
|
|
1476
|
+
app = FastAPI(lifespan=JarvisLifespan(agent, mode="p2p"))
|
|
1477
|
+
|
|
1478
|
+
# With configuration
|
|
1479
|
+
app = FastAPI(
|
|
1480
|
+
lifespan=JarvisLifespan(
|
|
1481
|
+
agent,
|
|
1482
|
+
mode="p2p", # or "distributed"
|
|
1483
|
+
bind_port=7950, # P2P port
|
|
1484
|
+
seed_nodes="ip:port", # For multi-node
|
|
1485
|
+
)
|
|
1486
|
+
)
|
|
1487
|
+
```
|
|
1488
|
+
|
|
1489
|
+
### Complete FastAPI Example
|
|
1490
|
+
|
|
1491
|
+
```python
|
|
1492
|
+
# app.py
|
|
1493
|
+
from fastapi import FastAPI, HTTPException
|
|
1494
|
+
from pydantic import BaseModel
|
|
1495
|
+
from jarviscore.profiles import ListenerAgent
|
|
1496
|
+
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
1497
|
+
|
|
1498
|
+
|
|
1499
|
+
class AnalysisRequest(BaseModel):
|
|
1500
|
+
data: str
|
|
1501
|
+
|
|
1502
|
+
|
|
1503
|
+
class AnalystAgent(ListenerAgent):
|
|
1504
|
+
"""Agent that handles both API requests and P2P messages."""
|
|
1505
|
+
|
|
1506
|
+
role = "analyst"
|
|
1507
|
+
capabilities = ["analysis"]
|
|
1508
|
+
|
|
1509
|
+
async def setup(self):
|
|
1510
|
+
await super().setup()
|
|
1511
|
+
self.llm = MyLLMClient()
|
|
1512
|
+
|
|
1513
|
+
async def on_peer_request(self, msg):
|
|
1514
|
+
"""Handle requests from other agents in the mesh."""
|
|
1515
|
+
query = msg.data.get("question", "")
|
|
1516
|
+
result = self.llm.chat(f"Analyze: {query}")
|
|
1517
|
+
return {"response": result}
|
|
1518
|
+
|
|
1519
|
+
def analyze(self, data: str) -> dict:
|
|
1520
|
+
"""Method called by API endpoint."""
|
|
1521
|
+
result = self.llm.chat(f"Analyze this data: {data}")
|
|
1522
|
+
return {"analysis": result}
|
|
1523
|
+
|
|
1524
|
+
|
|
1525
|
+
# Create agent instance
|
|
1526
|
+
analyst = AnalystAgent()
|
|
1527
|
+
|
|
1528
|
+
# Create FastAPI app with automatic lifecycle management
|
|
1529
|
+
app = FastAPI(
|
|
1530
|
+
title="Analyst Service",
|
|
1531
|
+
lifespan=JarvisLifespan(analyst, mode="p2p", bind_port=7950)
|
|
1532
|
+
)
|
|
1533
|
+
|
|
1534
|
+
|
|
1535
|
+
@app.post("/analyze")
|
|
1536
|
+
async def analyze(request: AnalysisRequest):
|
|
1537
|
+
"""API endpoint - also accessible as a peer in the mesh."""
|
|
1538
|
+
result = analyst.analyze(request.data)
|
|
1539
|
+
return result
|
|
1540
|
+
|
|
1541
|
+
|
|
1542
|
+
@app.get("/peers")
|
|
1543
|
+
async def list_peers():
|
|
1544
|
+
"""See what other agents are in the mesh."""
|
|
1545
|
+
if analyst.peers:
|
|
1546
|
+
return {"peers": analyst.peers.list()}
|
|
1547
|
+
return {"peers": []}
|
|
1548
|
+
```
|
|
1549
|
+
|
|
1550
|
+
Run with:
|
|
1551
|
+
```bash
|
|
1552
|
+
uvicorn app:app --host 0.0.0.0 --port 8000
|
|
1553
|
+
```
|
|
1554
|
+
|
|
1555
|
+
Your agent is now:
|
|
1556
|
+
- Serving HTTP API on port 8000
|
|
1557
|
+
- Participating in P2P mesh on port 7950
|
|
1558
|
+
- Discoverable by other agents
|
|
1559
|
+
- Automatically handles lifecycle
|
|
1560
|
+
|
|
1561
|
+
### Testing the Flow
|
|
1562
|
+
|
|
1563
|
+
**Step 1: Start the FastAPI server (Terminal 1)**
|
|
1564
|
+
```bash
|
|
1565
|
+
python examples/fastapi_integration_example.py
|
|
1566
|
+
```
|
|
1567
|
+
|
|
1568
|
+
**Step 2: Join a scout agent (Terminal 2)**
|
|
1569
|
+
```bash
|
|
1570
|
+
python examples/fastapi_integration_example.py --join-as scout
|
|
1571
|
+
```
|
|
1572
|
+
|
|
1573
|
+
**Step 3: Test with curl (Terminal 3)**
|
|
1574
|
+
```bash
|
|
1575
|
+
# Chat with assistant (may delegate to analyst)
|
|
1576
|
+
curl -X POST http://localhost:8000/chat -H "Content-Type: application/json" -d '{"message": "Analyze Q4 sales trends"}'
|
|
1577
|
+
|
|
1578
|
+
# Ask analyst directly
|
|
1579
|
+
curl -X POST http://localhost:8000/ask/analyst -H "Content-Type: application/json" -d '{"message": "What are key revenue metrics?"}'
|
|
1580
|
+
|
|
1581
|
+
# See what each agent knows about peers (cognitive context)
|
|
1582
|
+
curl http://localhost:8000/agents
|
|
1583
|
+
```
|
|
1584
|
+
|
|
1585
|
+
**Expected flow for `/chat`:**
|
|
1586
|
+
1. Request goes to **assistant** agent
|
|
1587
|
+
2. Assistant's LLM sees peers via `get_cognitive_context()`
|
|
1588
|
+
3. LLM decides to delegate to **analyst** (data analysis request)
|
|
1589
|
+
4. Assistant uses `ask_peer` tool → P2P message to analyst
|
|
1590
|
+
5. Analyst processes and responds via P2P
|
|
1591
|
+
6. Response includes `"delegated_to": "analyst"` and `"peer_data"`
|
|
1592
|
+
|
|
1593
|
+
**Example response:**
|
|
1594
|
+
```json
|
|
1595
|
+
{
|
|
1596
|
+
"message": "Analyze Q4 sales trends",
|
|
1597
|
+
"response": "Based on the analyst's findings...",
|
|
1598
|
+
"delegated_to": "analyst",
|
|
1599
|
+
"peer_data": {"analysis": "...", "confidence": 0.9}
|
|
1600
|
+
}
|
|
1601
|
+
```
|
|
1602
|
+
|
|
1603
|
+
---
|
|
1604
|
+
|
|
1605
|
+
## Cloud Deployment (v0.3.0)
|
|
1606
|
+
|
|
1607
|
+
**Self-registration** lets agents join existing meshes without a central orchestrator - perfect for Docker, Kubernetes, and auto-scaling.
|
|
1608
|
+
|
|
1609
|
+
### The Problem: Central Orchestrator Required
|
|
1610
|
+
|
|
1611
|
+
Before v0.3.0, all agents had to be registered with a central Mesh:
|
|
1612
|
+
|
|
1613
|
+
```python
|
|
1614
|
+
# BEFORE: Central orchestrator pattern
|
|
1615
|
+
# You needed one "main" node that registered all agents
|
|
1616
|
+
|
|
1617
|
+
# main_node.py (central orchestrator)
|
|
1618
|
+
mesh = Mesh(mode="distributed", config={"bind_port": 7950})
|
|
1619
|
+
mesh.add(ResearcherAgent) # Must be on this node
|
|
1620
|
+
mesh.add(WriterAgent) # Must be on this node
|
|
1621
|
+
await mesh.start()
|
|
1622
|
+
```
|
|
1623
|
+
|
|
1624
|
+
**Problems with this approach:**
|
|
1625
|
+
- Single point of failure
|
|
1626
|
+
- Can't easily scale agent instances
|
|
1627
|
+
- Doesn't work well with Kubernetes/Docker
|
|
1628
|
+
- All agents must be on the same node or manually configured
|
|
1629
|
+
|
|
1630
|
+
### The Solution: `join_mesh()` and `leave_mesh()`
|
|
1631
|
+
|
|
1632
|
+
```python
|
|
1633
|
+
# AFTER: Self-registering agents
|
|
1634
|
+
# Each agent can join any mesh independently
|
|
1635
|
+
|
|
1636
|
+
# agent_container.py (runs in Docker/K8s)
|
|
1637
|
+
from jarviscore.profiles import ListenerAgent
|
|
1638
|
+
import os
|
|
1639
|
+
|
|
1640
|
+
|
|
1641
|
+
class WorkerAgent(ListenerAgent):
|
|
1642
|
+
role = "worker"
|
|
1643
|
+
capabilities = ["processing"]
|
|
1644
|
+
|
|
1645
|
+
async def on_peer_request(self, msg):
|
|
1646
|
+
return {"result": "processed"}
|
|
1647
|
+
|
|
1648
|
+
|
|
1649
|
+
async def main():
|
|
1650
|
+
agent = WorkerAgent()
|
|
1651
|
+
await agent.setup()
|
|
1652
|
+
|
|
1653
|
+
# Join existing mesh via environment variable
|
|
1654
|
+
seed_nodes = os.environ.get("JARVISCORE_SEED_NODES", "mesh-service:7950")
|
|
1655
|
+
await agent.join_mesh(seed_nodes=seed_nodes)
|
|
1656
|
+
|
|
1657
|
+
# Agent is now part of the mesh, discoverable by others
|
|
1658
|
+
await agent.serve_forever()
|
|
1659
|
+
|
|
1660
|
+
# Clean shutdown
|
|
1661
|
+
await agent.leave_mesh()
|
|
1662
|
+
```
|
|
1663
|
+
|
|
1664
|
+
### Environment Variables for Cloud
|
|
1665
|
+
|
|
1666
|
+
| Variable | Description | Example |
|
|
1667
|
+
|----------|-------------|---------|
|
|
1668
|
+
| `JARVISCORE_SEED_NODES` | Comma-separated list of mesh nodes | `"10.0.0.1:7950,10.0.0.2:7950"` |
|
|
1669
|
+
| `JARVISCORE_MESH_ENDPOINT` | This agent's reachable address | `"worker-pod-abc:7950"` |
|
|
1670
|
+
| `JARVISCORE_BIND_PORT` | Port to listen on | `"7950"` |
|
|
1671
|
+
|
|
1672
|
+
### Docker Deployment Example
|
|
1673
|
+
|
|
1674
|
+
```dockerfile
|
|
1675
|
+
# Dockerfile
|
|
1676
|
+
FROM python:3.11-slim
|
|
1677
|
+
WORKDIR /app
|
|
1678
|
+
COPY requirements.txt .
|
|
1679
|
+
RUN pip install -r requirements.txt
|
|
1680
|
+
COPY . .
|
|
1681
|
+
CMD ["python", "agent.py"]
|
|
1682
|
+
```
|
|
1683
|
+
|
|
1684
|
+
```python
|
|
1685
|
+
# agent.py
|
|
1686
|
+
import asyncio
|
|
1687
|
+
import os
|
|
1688
|
+
from jarviscore.profiles import ListenerAgent
|
|
1689
|
+
|
|
1690
|
+
|
|
1691
|
+
class WorkerAgent(ListenerAgent):
|
|
1692
|
+
role = "worker"
|
|
1693
|
+
capabilities = ["processing"]
|
|
1694
|
+
|
|
1695
|
+
async def on_peer_request(self, msg):
|
|
1696
|
+
task = msg.data.get("task", "")
|
|
1697
|
+
return {"result": f"Processed: {task}"}
|
|
1698
|
+
|
|
1699
|
+
|
|
1700
|
+
async def main():
|
|
1701
|
+
agent = WorkerAgent()
|
|
1702
|
+
await agent.setup()
|
|
1703
|
+
|
|
1704
|
+
# Configuration from environment
|
|
1705
|
+
seed_nodes = os.environ.get("JARVISCORE_SEED_NODES")
|
|
1706
|
+
mesh_endpoint = os.environ.get("JARVISCORE_MESH_ENDPOINT")
|
|
1707
|
+
|
|
1708
|
+
if seed_nodes:
|
|
1709
|
+
await agent.join_mesh(
|
|
1710
|
+
seed_nodes=seed_nodes,
|
|
1711
|
+
advertise_endpoint=mesh_endpoint
|
|
1712
|
+
)
|
|
1713
|
+
print(f"Joined mesh via {seed_nodes}")
|
|
1714
|
+
else:
|
|
1715
|
+
print("Running standalone (no JARVISCORE_SEED_NODES)")
|
|
1716
|
+
|
|
1717
|
+
await agent.serve_forever()
|
|
1718
|
+
|
|
1719
|
+
|
|
1720
|
+
if __name__ == "__main__":
|
|
1721
|
+
asyncio.run(main())
|
|
1722
|
+
```
|
|
1723
|
+
|
|
1724
|
+
```yaml
|
|
1725
|
+
# docker-compose.yml
|
|
1726
|
+
version: '3.8'
|
|
1727
|
+
services:
|
|
1728
|
+
mesh-seed:
|
|
1729
|
+
build: .
|
|
1730
|
+
environment:
|
|
1731
|
+
- JARVISCORE_BIND_PORT=7950
|
|
1732
|
+
ports:
|
|
1733
|
+
- "7950:7950"
|
|
1734
|
+
|
|
1735
|
+
worker-1:
|
|
1736
|
+
build: .
|
|
1737
|
+
environment:
|
|
1738
|
+
- JARVISCORE_SEED_NODES=mesh-seed:7950
|
|
1739
|
+
- JARVISCORE_MESH_ENDPOINT=worker-1:7950
|
|
1740
|
+
depends_on:
|
|
1741
|
+
- mesh-seed
|
|
1742
|
+
|
|
1743
|
+
worker-2:
|
|
1744
|
+
build: .
|
|
1745
|
+
environment:
|
|
1746
|
+
- JARVISCORE_SEED_NODES=mesh-seed:7950
|
|
1747
|
+
- JARVISCORE_MESH_ENDPOINT=worker-2:7950
|
|
1748
|
+
depends_on:
|
|
1749
|
+
- mesh-seed
|
|
1750
|
+
```
|
|
1751
|
+
|
|
1752
|
+
### Kubernetes Deployment Example
|
|
1753
|
+
|
|
1754
|
+
```yaml
|
|
1755
|
+
# k8s-deployment.yaml
|
|
1756
|
+
apiVersion: apps/v1
|
|
1757
|
+
kind: Deployment
|
|
1758
|
+
metadata:
|
|
1759
|
+
name: jarvis-worker
|
|
1760
|
+
spec:
|
|
1761
|
+
replicas: 3 # Scale as needed
|
|
1762
|
+
selector:
|
|
1763
|
+
matchLabels:
|
|
1764
|
+
app: jarvis-worker
|
|
1765
|
+
template:
|
|
1766
|
+
metadata:
|
|
1767
|
+
labels:
|
|
1768
|
+
app: jarvis-worker
|
|
1769
|
+
spec:
|
|
1770
|
+
containers:
|
|
1771
|
+
- name: worker
|
|
1772
|
+
image: myregistry/jarvis-worker:latest
|
|
1773
|
+
env:
|
|
1774
|
+
- name: JARVISCORE_SEED_NODES
|
|
1775
|
+
value: "jarvis-mesh-service:7950"
|
|
1776
|
+
- name: JARVISCORE_MESH_ENDPOINT
|
|
1777
|
+
valueFrom:
|
|
1778
|
+
fieldRef:
|
|
1779
|
+
fieldPath: status.podIP
|
|
1780
|
+
ports:
|
|
1781
|
+
- containerPort: 7950
|
|
1782
|
+
---
|
|
1783
|
+
apiVersion: v1
|
|
1784
|
+
kind: Service
|
|
1785
|
+
metadata:
|
|
1786
|
+
name: jarvis-mesh-service
|
|
1787
|
+
spec:
|
|
1788
|
+
selector:
|
|
1789
|
+
app: jarvis-mesh-seed
|
|
1790
|
+
ports:
|
|
1791
|
+
- port: 7950
|
|
1792
|
+
targetPort: 7950
|
|
1793
|
+
```
|
|
1794
|
+
|
|
1795
|
+
### How Self-Registration Works
|
|
1796
|
+
|
|
1797
|
+
```
|
|
1798
|
+
┌─────────────────────────────────────────────────────────────┐
|
|
1799
|
+
│ SELF-REGISTRATION FLOW │
|
|
1800
|
+
├─────────────────────────────────────────────────────────────┤
|
|
1801
|
+
│ │
|
|
1802
|
+
│ 1. New container starts │
|
|
1803
|
+
│ │ │
|
|
1804
|
+
│ ▼ │
|
|
1805
|
+
│ 2. agent.join_mesh(seed_nodes="mesh:7950") │
|
|
1806
|
+
│ │ │
|
|
1807
|
+
│ ▼ │
|
|
1808
|
+
│ 3. Agent connects to seed node │
|
|
1809
|
+
│ │ │
|
|
1810
|
+
│ ▼ │
|
|
1811
|
+
│ 4. SWIM protocol discovers all peers │
|
|
1812
|
+
│ │ │
|
|
1813
|
+
│ ▼ │
|
|
1814
|
+
│ 5. Agent registers its role/capabilities │
|
|
1815
|
+
│ │ │
|
|
1816
|
+
│ ▼ │
|
|
1817
|
+
│ 6. Other agents can now discover and call this agent │
|
|
1818
|
+
│ │
|
|
1819
|
+
└─────────────────────────────────────────────────────────────┘
|
|
1820
|
+
```
|
|
1821
|
+
|
|
1822
|
+
### RemoteAgentProxy (Automatic)
|
|
1823
|
+
|
|
1824
|
+
When agents join from different nodes, the framework automatically creates `RemoteAgentProxy` objects. You don't need to do anything special - the mesh handles it:
|
|
1825
|
+
|
|
1826
|
+
```python
|
|
1827
|
+
# On any node, you can discover and call remote agents
|
|
1828
|
+
if agent.peers:
|
|
1829
|
+
# This works whether the peer is local or remote
|
|
1830
|
+
response = await agent.peers.as_tool().execute(
|
|
1831
|
+
"ask_peer",
|
|
1832
|
+
{"role": "worker", "question": "Process this data"}
|
|
1833
|
+
)
|
|
1834
|
+
```
|
|
1835
|
+
|
|
1836
|
+
---
|
|
1837
|
+
|
|
1838
|
+
## API Reference
|
|
1839
|
+
|
|
1840
|
+
### CustomAgent Class Attributes
|
|
1841
|
+
|
|
1842
|
+
| Attribute | Type | Required | Description |
|
|
1843
|
+
|-----------|------|----------|-------------|
|
|
1844
|
+
| `role` | `str` | Yes | Unique identifier for this agent type (e.g., `"researcher"`) |
|
|
1845
|
+
| `capabilities` | `list[str]` | Yes | List of capabilities for discovery (e.g., `["research", "analysis"]`) |
|
|
1846
|
+
|
|
1847
|
+
### CustomAgent Methods
|
|
1848
|
+
|
|
1849
|
+
| Method | Mode | Description |
|
|
1850
|
+
|--------|------|-------------|
|
|
1851
|
+
| `setup()` | Both | Called once on startup. Initialize resources here. Always call `await super().setup()` |
|
|
1852
|
+
| `run()` | P2P | Main loop for continuous operation. Required for P2P mode |
|
|
1853
|
+
| `execute_task(task)` | Distributed | Handle a workflow task. Required for Distributed mode |
|
|
1854
|
+
| `join_mesh(seed_nodes, ...)` | Both | **(v0.3.0)** Self-register with an existing mesh |
|
|
1855
|
+
| `leave_mesh()` | Both | **(v0.3.0)** Gracefully leave the mesh |
|
|
1856
|
+
| `serve_forever()` | Both | **(v0.3.0)** Block until shutdown signal |
|
|
1857
|
+
|
|
1858
|
+
### ListenerAgent Class (v0.3.0)
|
|
1859
|
+
|
|
1860
|
+
ListenerAgent extends CustomAgent with handler-based P2P communication.
|
|
1861
|
+
|
|
1862
|
+
| Attribute/Method | Type | Description |
|
|
1863
|
+
|------------------|------|-------------|
|
|
1864
|
+
| `role` | `str` | Required. Unique identifier for this agent type |
|
|
1865
|
+
| `capabilities` | `list[str]` | Required. List of capabilities for discovery |
|
|
1866
|
+
| `on_peer_request(msg)` | async method | Handle incoming requests. Return dict to respond |
|
|
1867
|
+
| `on_peer_notify(msg)` | async method | Handle broadcast notifications. No return needed |
|
|
1868
|
+
|
|
1869
|
+
**Note:** ListenerAgent does not require `run()` or `execute_task()` implementations.
|
|
1870
|
+
|
|
1871
|
+
### Why `execute_task()` is Required in P2P Mode
|
|
1872
|
+
|
|
1873
|
+
You may notice that P2P agents must implement `execute_task()` even though they primarily use `run()`. Here's why:
|
|
1874
|
+
|
|
1875
|
+
```
|
|
1876
|
+
Agent (base class)
|
|
1877
|
+
│
|
|
1878
|
+
├── @abstractmethod execute_task() ← Python REQUIRES this to be implemented
|
|
1879
|
+
│
|
|
1880
|
+
└── run() ← Optional, default does nothing
|
|
1881
|
+
```
|
|
1882
|
+
|
|
1883
|
+
**The technical reason:**
|
|
1884
|
+
|
|
1885
|
+
1. `Agent.execute_task()` is declared as `@abstractmethod` in `core/agent.py`
|
|
1886
|
+
2. Python's ABC (Abstract Base Class) requires ALL abstract methods to be implemented
|
|
1887
|
+
3. If you don't implement it, Python raises:
|
|
1888
|
+
```
|
|
1889
|
+
TypeError: Can't instantiate abstract class MyAgent with abstract method execute_task
|
|
1890
|
+
```
|
|
1891
|
+
|
|
1892
|
+
**The design reason:**
|
|
1893
|
+
|
|
1894
|
+
- **Unified interface**: All agents can be called via `execute_task()`, regardless of mode
|
|
1895
|
+
- **Flexibility**: A P2P agent can still participate in workflows if needed
|
|
1896
|
+
- **Testing**: You can test any agent by calling `execute_task()` directly
|
|
1897
|
+
|
|
1898
|
+
**What to put in it for P2P mode:**
|
|
1899
|
+
|
|
1900
|
+
```python
|
|
1901
|
+
async def execute_task(self, task: dict) -> dict:
|
|
1902
|
+
"""Minimal implementation - main logic is in run()."""
|
|
1903
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
1904
|
+
```
|
|
1905
|
+
|
|
1906
|
+
### Peer Tools (P2P Mode)
|
|
1907
|
+
|
|
1908
|
+
Access via `self.peers.as_tool().execute(tool_name, params)`:
|
|
1909
|
+
|
|
1910
|
+
| Tool | Parameters | Description |
|
|
1911
|
+
|------|------------|-------------|
|
|
1912
|
+
| `ask_peer` | `{"role": str, "question": str}` | Send a request to a peer by role and wait for response |
|
|
1913
|
+
| `broadcast` | `{"message": str}` | Send a message to all connected peers |
|
|
1914
|
+
| `list_peers` | `{}` | Get list of available peers and their capabilities |
|
|
1915
|
+
|
|
1916
|
+
### PeerClient Methods (v0.3.0)
|
|
1917
|
+
|
|
1918
|
+
Access via `self.peers`:
|
|
1919
|
+
|
|
1920
|
+
| Method | Returns | Description |
|
|
1921
|
+
|--------|---------|-------------|
|
|
1922
|
+
| `get_cognitive_context()` | `str` | Generate LLM-ready text describing available peers |
|
|
1923
|
+
| `list()` | `list[PeerInfo]` | Get list of connected peers |
|
|
1924
|
+
| `as_tool()` | `PeerTool` | Get peer tools for LLM tool use |
|
|
1925
|
+
| `receive(timeout)` | `IncomingMessage` | Receive next message (for CustomAgent run loops) |
|
|
1926
|
+
| `respond(msg, data)` | `None` | Respond to a request message |
|
|
1927
|
+
|
|
1928
|
+
### JarvisLifespan (v0.3.0)
|
|
1929
|
+
|
|
1930
|
+
FastAPI integration helper:
|
|
1931
|
+
|
|
1932
|
+
```python
|
|
1933
|
+
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
1934
|
+
|
|
1935
|
+
JarvisLifespan(
|
|
1936
|
+
agent, # Agent instance
|
|
1937
|
+
mode="p2p", # "p2p" or "distributed"
|
|
1938
|
+
bind_port=7950, # Optional: P2P port
|
|
1939
|
+
seed_nodes="ip:port", # Optional: for multi-node
|
|
1940
|
+
)
|
|
1941
|
+
```
|
|
1942
|
+
|
|
1943
|
+
### Mesh Configuration
|
|
1944
|
+
|
|
1945
|
+
```python
|
|
1946
|
+
mesh = Mesh(
|
|
1947
|
+
mode="p2p" | "distributed",
|
|
1948
|
+
config={
|
|
1949
|
+
"bind_host": "0.0.0.0", # IP to bind to (default: "127.0.0.1")
|
|
1950
|
+
"bind_port": 7950, # Port to listen on
|
|
1951
|
+
"node_name": "my-node", # Human-readable node name
|
|
1952
|
+
"seed_nodes": "ip:port,ip:port", # Comma-separated list of known nodes
|
|
1953
|
+
}
|
|
1954
|
+
)
|
|
1955
|
+
```
|
|
1956
|
+
|
|
1957
|
+
### Mesh Methods
|
|
1958
|
+
|
|
1959
|
+
| Method | Description |
|
|
1960
|
+
|--------|-------------|
|
|
1961
|
+
| `mesh.add(AgentClass)` | Register an agent class |
|
|
1962
|
+
| `mesh.start()` | Initialize and start all agents |
|
|
1963
|
+
| `mesh.stop()` | Gracefully shut down all agents |
|
|
1964
|
+
| `mesh.run_forever()` | Block until shutdown signal |
|
|
1965
|
+
| `mesh.serve_forever()` | Same as `run_forever()` |
|
|
1966
|
+
| `mesh.get_agent(role)` | Get agent instance by role |
|
|
1967
|
+
| `mesh.workflow(name, steps)` | Run a workflow (Distributed mode) |
|
|
1968
|
+
|
|
1969
|
+
---
|
|
1970
|
+
|
|
1971
|
+
## Multi-Node Deployment
|
|
1972
|
+
|
|
1973
|
+
Run agents across multiple machines. Nodes discover each other via seed nodes.
|
|
1974
|
+
|
|
1975
|
+
### Machine 1: Research Node
|
|
1976
|
+
|
|
1977
|
+
```python
|
|
1978
|
+
# research_node.py
|
|
1979
|
+
import asyncio
|
|
1980
|
+
from jarviscore import Mesh
|
|
1981
|
+
from agents import ResearcherAgent
|
|
1982
|
+
|
|
1983
|
+
|
|
1984
|
+
async def main():
|
|
1985
|
+
mesh = Mesh(
|
|
1986
|
+
mode="distributed",
|
|
1987
|
+
config={
|
|
1988
|
+
"bind_host": "0.0.0.0", # Accept connections from any IP
|
|
1989
|
+
"bind_port": 7950,
|
|
1990
|
+
"node_name": "research-node",
|
|
1991
|
+
}
|
|
1992
|
+
)
|
|
1993
|
+
|
|
1994
|
+
mesh.add(ResearcherAgent)
|
|
1995
|
+
await mesh.start()
|
|
1996
|
+
|
|
1997
|
+
print("Research node running on port 7950...")
|
|
1998
|
+
await mesh.serve_forever()
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
if __name__ == "__main__":
|
|
2002
|
+
asyncio.run(main())
|
|
2003
|
+
```
|
|
2004
|
+
|
|
2005
|
+
### Machine 2: Writer Node + Orchestrator
|
|
2006
|
+
|
|
2007
|
+
```python
|
|
2008
|
+
# writer_node.py
|
|
2009
|
+
import asyncio
|
|
2010
|
+
from jarviscore import Mesh
|
|
2011
|
+
from agents import WriterAgent
|
|
2012
|
+
|
|
2013
|
+
|
|
2014
|
+
async def main():
|
|
2015
|
+
mesh = Mesh(
|
|
2016
|
+
mode="distributed",
|
|
2017
|
+
config={
|
|
2018
|
+
"bind_host": "0.0.0.0",
|
|
2019
|
+
"bind_port": 7950,
|
|
2020
|
+
"node_name": "writer-node",
|
|
2021
|
+
"seed_nodes": "192.168.1.10:7950", # IP of research node
|
|
2022
|
+
}
|
|
2023
|
+
)
|
|
2024
|
+
|
|
2025
|
+
mesh.add(WriterAgent)
|
|
2026
|
+
await mesh.start()
|
|
2027
|
+
|
|
2028
|
+
# Wait for nodes to discover each other
|
|
2029
|
+
await asyncio.sleep(2)
|
|
2030
|
+
|
|
2031
|
+
# Run workflow - tasks automatically route to correct nodes
|
|
2032
|
+
results = await mesh.workflow("cross-node-pipeline", [
|
|
2033
|
+
{"id": "research", "agent": "researcher", "task": "AI trends"},
|
|
2034
|
+
{"id": "write", "agent": "writer", "task": "Write article",
|
|
2035
|
+
"depends_on": ["research"]},
|
|
2036
|
+
])
|
|
2037
|
+
|
|
2038
|
+
print(results)
|
|
2039
|
+
await mesh.stop()
|
|
2040
|
+
|
|
2041
|
+
|
|
2042
|
+
if __name__ == "__main__":
|
|
2043
|
+
asyncio.run(main())
|
|
2044
|
+
```
|
|
2045
|
+
|
|
2046
|
+
### How Node Discovery Works
|
|
2047
|
+
|
|
2048
|
+
1. On startup, nodes connect to seed nodes
|
|
2049
|
+
2. Seed nodes share their known peers
|
|
2050
|
+
3. Nodes exchange agent capability information
|
|
2051
|
+
4. Workflows automatically route tasks to nodes with matching agents
|
|
2052
|
+
|
|
2053
|
+
---
|
|
2054
|
+
|
|
2055
|
+
## Error Handling
|
|
2056
|
+
|
|
2057
|
+
### In P2P Mode
|
|
2058
|
+
|
|
2059
|
+
```python
|
|
2060
|
+
async def run(self):
|
|
2061
|
+
while not self.shutdown_requested:
|
|
2062
|
+
try:
|
|
2063
|
+
if self.peers:
|
|
2064
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
2065
|
+
if msg and msg.is_request:
|
|
2066
|
+
try:
|
|
2067
|
+
result = await self.process(msg.data)
|
|
2068
|
+
await self.peers.respond(msg, {"response": result})
|
|
2069
|
+
except Exception as e:
|
|
2070
|
+
await self.peers.respond(msg, {
|
|
2071
|
+
"error": str(e),
|
|
2072
|
+
"status": "failed"
|
|
2073
|
+
})
|
|
2074
|
+
except Exception as e:
|
|
2075
|
+
print(f"Error in run loop: {e}")
|
|
2076
|
+
|
|
2077
|
+
await asyncio.sleep(0.1)
|
|
2078
|
+
```
|
|
2079
|
+
|
|
2080
|
+
### In Distributed Mode
|
|
2081
|
+
|
|
2082
|
+
```python
|
|
2083
|
+
async def execute_task(self, task: dict) -> dict:
|
|
2084
|
+
try:
|
|
2085
|
+
result = await self.do_work(task)
|
|
2086
|
+
return {
|
|
2087
|
+
"status": "success",
|
|
2088
|
+
"output": result
|
|
2089
|
+
}
|
|
2090
|
+
except ValueError as e:
|
|
2091
|
+
return {
|
|
2092
|
+
"status": "error",
|
|
2093
|
+
"error": f"Invalid input: {e}"
|
|
2094
|
+
}
|
|
2095
|
+
except Exception as e:
|
|
2096
|
+
return {
|
|
2097
|
+
"status": "error",
|
|
2098
|
+
"error": f"Unexpected error: {e}"
|
|
2099
|
+
}
|
|
2100
|
+
```
|
|
2101
|
+
|
|
2102
|
+
### Handling Missing Peers
|
|
2103
|
+
|
|
2104
|
+
```python
|
|
2105
|
+
async def ask_researcher(self, question: str) -> str:
|
|
2106
|
+
if not self.peers:
|
|
2107
|
+
raise RuntimeError("Peer system not initialized")
|
|
2108
|
+
|
|
2109
|
+
try:
|
|
2110
|
+
response = await asyncio.wait_for(
|
|
2111
|
+
self.peers.as_tool().execute(
|
|
2112
|
+
"ask_peer",
|
|
2113
|
+
{"role": "researcher", "question": question}
|
|
2114
|
+
),
|
|
2115
|
+
timeout=30.0 # 30 second timeout
|
|
2116
|
+
)
|
|
2117
|
+
return response.get("response", "")
|
|
2118
|
+
except asyncio.TimeoutError:
|
|
2119
|
+
raise RuntimeError("Researcher did not respond in time")
|
|
2120
|
+
except Exception as e:
|
|
2121
|
+
raise RuntimeError(f"Failed to contact researcher: {e}")
|
|
2122
|
+
```
|
|
2123
|
+
|
|
2124
|
+
---
|
|
2125
|
+
|
|
2126
|
+
## Troubleshooting
|
|
2127
|
+
|
|
2128
|
+
### Agent not receiving messages
|
|
2129
|
+
|
|
2130
|
+
**Problem**: `self.peers.receive()` always returns `None`
|
|
2131
|
+
|
|
2132
|
+
**Solutions**:
|
|
2133
|
+
1. Ensure the sending agent is using the correct `role` in `ask_peer`
|
|
2134
|
+
2. Check that both agents are registered with the mesh
|
|
2135
|
+
3. Verify `await super().setup()` is called in your `setup()` method
|
|
2136
|
+
4. Add logging to confirm your `run()` loop is executing
|
|
2137
|
+
|
|
2138
|
+
### Workflow tasks not executing
|
|
2139
|
+
|
|
2140
|
+
**Problem**: `mesh.workflow()` hangs or returns empty results
|
|
2141
|
+
|
|
2142
|
+
**Solutions**:
|
|
2143
|
+
1. Verify agent `role` matches the `agent` field in workflow steps
|
|
2144
|
+
2. Check `execute_task()` returns a dict with `status` key
|
|
2145
|
+
3. Ensure all `depends_on` step IDs exist in the workflow
|
|
2146
|
+
4. Check for circular dependencies
|
|
2147
|
+
|
|
2148
|
+
### Nodes not discovering each other
|
|
2149
|
+
|
|
2150
|
+
**Problem**: Multi-node setup, but workflows fail to find agents
|
|
2151
|
+
|
|
2152
|
+
**Solutions**:
|
|
2153
|
+
1. Verify `seed_nodes` IP and port are correct
|
|
2154
|
+
2. Check firewall allows connections on the bind port
|
|
2155
|
+
3. Ensure `bind_host` is `"0.0.0.0"` (not `"127.0.0.1"`) for remote connections
|
|
2156
|
+
4. Wait a few seconds after `mesh.start()` for discovery to complete
|
|
2157
|
+
|
|
2158
|
+
### "Peer system not available" errors
|
|
2159
|
+
|
|
2160
|
+
**Problem**: `self.peers` is `None`
|
|
2161
|
+
|
|
2162
|
+
**Solutions**:
|
|
2163
|
+
1. Only access `self.peers` after `setup()` completes
|
|
2164
|
+
2. Check that mesh is started with `await mesh.start()`
|
|
2165
|
+
3. Verify the agent was added with `mesh.add(AgentClass)`
|
|
2166
|
+
|
|
2167
|
+
---
|
|
2168
|
+
|
|
2169
|
+
## Examples
|
|
2170
|
+
|
|
2171
|
+
For complete, runnable examples, see:
|
|
2172
|
+
|
|
2173
|
+
- `examples/customagent_p2p_example.py` - P2P mode with LLM-driven peer communication
|
|
2174
|
+
- `examples/customagent_distributed_example.py` - Distributed mode with workflows
|
|
2175
|
+
- `examples/listeneragent_cognitive_discovery_example.py` - ListenerAgent + cognitive discovery (v0.3.0)
|
|
2176
|
+
- `examples/fastapi_integration_example.py` - FastAPI + JarvisLifespan (v0.3.0)
|
|
2177
|
+
- `examples/cloud_deployment_example.py` - Self-registration with join_mesh (v0.3.0)
|
|
2178
|
+
|
|
2179
|
+
---
|
|
2180
|
+
|
|
2181
|
+
*CustomAgent Guide - JarvisCore Framework v0.3.0*
|