jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +49 -36
- jarviscore/adapter/__init__.py +15 -9
- jarviscore/adapter/decorator.py +23 -19
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/scaffold.py +1 -1
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +35 -21
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +106 -13
- jarviscore/docs/TROUBLESHOOTING.md +144 -6
- jarviscore/docs/USER_GUIDE.md +138 -361
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/METADATA +60 -54
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
# CustomAgent Guide
|
|
2
|
+
|
|
3
|
+
CustomAgent is for users who **already have working code** and want to integrate with JarvisCore.
|
|
4
|
+
|
|
5
|
+
You keep your execution logic. Framework provides:
|
|
6
|
+
- Agent discovery and communication
|
|
7
|
+
- Workflow orchestration (distributed mode)
|
|
8
|
+
- P2P peer tools (ask_peer, broadcast, etc.)
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Quick Reference
|
|
13
|
+
|
|
14
|
+
| Mode | Use Case | Agent Method |
|
|
15
|
+
|------|----------|--------------|
|
|
16
|
+
| **P2P** | Direct agent communication | `run()` loop |
|
|
17
|
+
| **Distributed** | Multi-node workflows | `execute_task()` |
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## P2P Mode: Standalone → Framework
|
|
22
|
+
|
|
23
|
+
### Your Standalone Agents (Before)
|
|
24
|
+
|
|
25
|
+
You have two agents that communicate directly:
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
# standalone_researcher.py
|
|
29
|
+
class StandaloneResearcher:
|
|
30
|
+
"""Your existing researcher agent."""
|
|
31
|
+
|
|
32
|
+
def __init__(self):
|
|
33
|
+
self.llm = MyLLMClient()
|
|
34
|
+
|
|
35
|
+
def research(self, query: str) -> str:
|
|
36
|
+
"""Your existing research logic."""
|
|
37
|
+
return self.llm.chat(f"Research: {query}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# standalone_assistant.py
|
|
41
|
+
class StandaloneAssistant:
|
|
42
|
+
"""Your existing assistant that needs researcher help."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, researcher: StandaloneResearcher):
|
|
45
|
+
self.researcher = researcher # Direct reference
|
|
46
|
+
self.llm = MyLLMClient()
|
|
47
|
+
|
|
48
|
+
def help(self, question: str) -> str:
|
|
49
|
+
# Directly calls researcher
|
|
50
|
+
research = self.researcher.research(question)
|
|
51
|
+
return self.llm.chat(f"Based on: {research}\nAnswer: {question}")
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
**Problem**: Agents are tightly coupled. Can't run on different machines.
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
### With JarvisCore P2P Mode (After)
|
|
59
|
+
|
|
60
|
+
**Step 1: Create `agents.py`** - Convert to CustomAgent
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
# agents.py
|
|
64
|
+
from jarviscore.profiles import CustomAgent
|
|
65
|
+
|
|
66
|
+
class ResearcherAgent(CustomAgent):
|
|
67
|
+
"""Same logic, now framework-integrated."""
|
|
68
|
+
role = "researcher"
|
|
69
|
+
capabilities = ["research", "analysis"]
|
|
70
|
+
|
|
71
|
+
async def setup(self):
|
|
72
|
+
await super().setup()
|
|
73
|
+
self.llm = MyLLMClient() # Your existing LLM
|
|
74
|
+
|
|
75
|
+
async def run(self):
|
|
76
|
+
"""REQUIRED for P2P: Listen for peer requests."""
|
|
77
|
+
while not self.shutdown_requested:
|
|
78
|
+
if self.peers:
|
|
79
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
80
|
+
if msg and msg.is_request:
|
|
81
|
+
query = msg.data.get("question", "")
|
|
82
|
+
# Your existing logic
|
|
83
|
+
result = self.llm.chat(f"Research: {query}")
|
|
84
|
+
await self.peers.respond(msg, {"response": result})
|
|
85
|
+
else:
|
|
86
|
+
await asyncio.sleep(0.1)
|
|
87
|
+
|
|
88
|
+
async def execute_task(self, task):
|
|
89
|
+
return {"status": "success"} # Required but unused in P2P
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class AssistantAgent(CustomAgent):
|
|
93
|
+
"""Same logic, now uses peer tools instead of direct reference."""
|
|
94
|
+
role = "assistant"
|
|
95
|
+
capabilities = ["help", "coordination"]
|
|
96
|
+
|
|
97
|
+
async def setup(self):
|
|
98
|
+
await super().setup()
|
|
99
|
+
self.llm = MyLLMClient()
|
|
100
|
+
|
|
101
|
+
async def ask_researcher(self, question: str) -> str:
|
|
102
|
+
"""Replaces direct self.researcher reference."""
|
|
103
|
+
if self.peers:
|
|
104
|
+
return await self.peers.as_tool().execute(
|
|
105
|
+
"ask_peer",
|
|
106
|
+
{"role": "researcher", "question": question}
|
|
107
|
+
)
|
|
108
|
+
return "No researcher available"
|
|
109
|
+
|
|
110
|
+
async def help(self, question: str) -> str:
|
|
111
|
+
"""Your existing logic, now uses peer communication."""
|
|
112
|
+
research = await self.ask_researcher(question)
|
|
113
|
+
return self.llm.chat(f"Based on: {research}\nAnswer: {question}")
|
|
114
|
+
|
|
115
|
+
async def run(self):
|
|
116
|
+
"""Listen for requests or external triggers."""
|
|
117
|
+
while not self.shutdown_requested:
|
|
118
|
+
# Your run loop - could listen for HTTP, websocket, etc.
|
|
119
|
+
await asyncio.sleep(0.1)
|
|
120
|
+
|
|
121
|
+
async def execute_task(self, task):
|
|
122
|
+
return {"status": "success"}
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
**Step 2: Create `main.py`** - Run with mesh
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
# main.py
|
|
129
|
+
import asyncio
|
|
130
|
+
from jarviscore import Mesh
|
|
131
|
+
from agents import ResearcherAgent, AssistantAgent
|
|
132
|
+
|
|
133
|
+
async def main():
|
|
134
|
+
mesh = Mesh(
|
|
135
|
+
mode="p2p",
|
|
136
|
+
config={
|
|
137
|
+
'bind_port': 7950,
|
|
138
|
+
'node_name': 'my-agents',
|
|
139
|
+
}
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
mesh.add(ResearcherAgent)
|
|
143
|
+
mesh.add(AssistantAgent)
|
|
144
|
+
|
|
145
|
+
await mesh.start()
|
|
146
|
+
|
|
147
|
+
# Option 1: Run forever (agents handle their own work)
|
|
148
|
+
# await mesh.run_forever()
|
|
149
|
+
|
|
150
|
+
# Option 2: Manual interaction
|
|
151
|
+
assistant = mesh.get_agent("assistant")
|
|
152
|
+
result = await assistant.help("What is quantum computing?")
|
|
153
|
+
print(result)
|
|
154
|
+
|
|
155
|
+
await mesh.stop()
|
|
156
|
+
|
|
157
|
+
asyncio.run(main())
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### What Changed
|
|
161
|
+
|
|
162
|
+
| Before | After |
|
|
163
|
+
|--------|-------|
|
|
164
|
+
| Direct object reference | `self.peers.as_tool().execute("ask_peer", ...)` |
|
|
165
|
+
| Tightly coupled | Loosely coupled via peer discovery |
|
|
166
|
+
| Same process only | Can run on different machines |
|
|
167
|
+
| No discovery | Automatic agent discovery |
|
|
168
|
+
|
|
169
|
+
### Key Additions
|
|
170
|
+
|
|
171
|
+
1. **Inherit from `CustomAgent`** instead of plain class
|
|
172
|
+
2. **Add `role` and `capabilities`** class attributes
|
|
173
|
+
3. **Implement `run()`** method for continuous listening
|
|
174
|
+
4. **Use `self.peers`** for communication instead of direct references
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
## Distributed Mode: Standalone → Framework
|
|
179
|
+
|
|
180
|
+
### Your Standalone Pipeline (Before)
|
|
181
|
+
|
|
182
|
+
You have agents that execute in a pipeline:
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
# standalone_pipeline.py
|
|
186
|
+
class StandaloneResearcher:
|
|
187
|
+
def __init__(self):
|
|
188
|
+
self.llm = MyLLMClient()
|
|
189
|
+
|
|
190
|
+
def execute(self, task: str) -> dict:
|
|
191
|
+
result = self.llm.chat(f"Research: {task}")
|
|
192
|
+
return {"output": result}
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class StandaloneWriter:
|
|
196
|
+
def __init__(self):
|
|
197
|
+
self.llm = MyLLMClient()
|
|
198
|
+
|
|
199
|
+
def execute(self, task: str, context: dict = None) -> dict:
|
|
200
|
+
prompt = task
|
|
201
|
+
if context:
|
|
202
|
+
prompt = f"Based on: {context}\n\n{task}"
|
|
203
|
+
result = self.llm.chat(prompt)
|
|
204
|
+
return {"output": result}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# Manual orchestration
|
|
208
|
+
def run_pipeline():
|
|
209
|
+
researcher = StandaloneResearcher()
|
|
210
|
+
writer = StandaloneWriter()
|
|
211
|
+
|
|
212
|
+
# Step 1
|
|
213
|
+
research = researcher.execute("Research AI trends")
|
|
214
|
+
|
|
215
|
+
# Step 2 - manually pass context
|
|
216
|
+
article = writer.execute("Write article", context=research["output"])
|
|
217
|
+
|
|
218
|
+
return article
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
**Problem**: Manual orchestration. No dependency management. Single machine.
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
### With JarvisCore Distributed Mode (After)
|
|
226
|
+
|
|
227
|
+
**Step 1: Create `agents.py`** - Convert to CustomAgent
|
|
228
|
+
|
|
229
|
+
```python
|
|
230
|
+
# agents.py
|
|
231
|
+
from jarviscore.profiles import CustomAgent
|
|
232
|
+
|
|
233
|
+
class ResearcherAgent(CustomAgent):
|
|
234
|
+
role = "researcher"
|
|
235
|
+
capabilities = ["research"]
|
|
236
|
+
|
|
237
|
+
async def setup(self):
|
|
238
|
+
await super().setup()
|
|
239
|
+
self.llm = MyLLMClient()
|
|
240
|
+
|
|
241
|
+
async def execute_task(self, task):
|
|
242
|
+
"""REQUIRED for Distributed: Called by workflow engine."""
|
|
243
|
+
task_desc = task.get("task", "")
|
|
244
|
+
# Your existing logic
|
|
245
|
+
result = self.llm.chat(f"Research: {task_desc}")
|
|
246
|
+
return {
|
|
247
|
+
"status": "success",
|
|
248
|
+
"output": result
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class WriterAgent(CustomAgent):
|
|
253
|
+
role = "writer"
|
|
254
|
+
capabilities = ["writing"]
|
|
255
|
+
|
|
256
|
+
async def setup(self):
|
|
257
|
+
await super().setup()
|
|
258
|
+
self.llm = MyLLMClient()
|
|
259
|
+
|
|
260
|
+
async def execute_task(self, task):
|
|
261
|
+
"""Context from previous steps is automatically passed."""
|
|
262
|
+
task_desc = task.get("task", "")
|
|
263
|
+
context = task.get("context", {}) # From depends_on steps
|
|
264
|
+
|
|
265
|
+
prompt = task_desc
|
|
266
|
+
if context:
|
|
267
|
+
prompt = f"Based on: {context}\n\n{task_desc}"
|
|
268
|
+
|
|
269
|
+
result = self.llm.chat(prompt)
|
|
270
|
+
return {
|
|
271
|
+
"status": "success",
|
|
272
|
+
"output": result
|
|
273
|
+
}
|
|
274
|
+
```
|
|
275
|
+
|
|
276
|
+
**Step 2: Create `main.py`** - Run with mesh
|
|
277
|
+
|
|
278
|
+
```python
|
|
279
|
+
# main.py
|
|
280
|
+
import asyncio
|
|
281
|
+
from jarviscore import Mesh
|
|
282
|
+
from agents import ResearcherAgent, WriterAgent
|
|
283
|
+
|
|
284
|
+
async def main():
|
|
285
|
+
mesh = Mesh(
|
|
286
|
+
mode="distributed",
|
|
287
|
+
config={
|
|
288
|
+
'bind_port': 7950,
|
|
289
|
+
'node_name': 'content-node',
|
|
290
|
+
}
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
mesh.add(ResearcherAgent)
|
|
294
|
+
mesh.add(WriterAgent)
|
|
295
|
+
|
|
296
|
+
await mesh.start()
|
|
297
|
+
|
|
298
|
+
# Workflow engine handles orchestration
|
|
299
|
+
results = await mesh.workflow("content-pipeline", [
|
|
300
|
+
{
|
|
301
|
+
"id": "research",
|
|
302
|
+
"agent": "researcher",
|
|
303
|
+
"task": "Research AI trends"
|
|
304
|
+
},
|
|
305
|
+
{
|
|
306
|
+
"id": "write",
|
|
307
|
+
"agent": "writer",
|
|
308
|
+
"task": "Write article about the research",
|
|
309
|
+
"depends_on": ["research"] # Auto-injects context
|
|
310
|
+
}
|
|
311
|
+
])
|
|
312
|
+
|
|
313
|
+
print(results[0]["output"]) # Research
|
|
314
|
+
print(results[1]["output"]) # Article
|
|
315
|
+
|
|
316
|
+
await mesh.stop()
|
|
317
|
+
|
|
318
|
+
asyncio.run(main())
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
### What Changed
|
|
322
|
+
|
|
323
|
+
| Before | After |
|
|
324
|
+
|--------|-------|
|
|
325
|
+
| Manual `context` passing | `depends_on` + automatic injection |
|
|
326
|
+
| Manual orchestration | `mesh.workflow()` handles it |
|
|
327
|
+
| Same process only | Can span multiple machines |
|
|
328
|
+
| No retries | Framework handles failures |
|
|
329
|
+
|
|
330
|
+
### Key Additions
|
|
331
|
+
|
|
332
|
+
1. **Inherit from `CustomAgent`**
|
|
333
|
+
2. **Add `role` and `capabilities`**
|
|
334
|
+
3. **Implement `execute_task(task)`** - receives `task` dict with `context`
|
|
335
|
+
4. **Use `mesh.workflow()`** with `depends_on` for dependencies
|
|
336
|
+
|
|
337
|
+
---
|
|
338
|
+
|
|
339
|
+
## Multi-Node Distributed
|
|
340
|
+
|
|
341
|
+
Same agents, different machines:
|
|
342
|
+
|
|
343
|
+
**Machine 1:**
|
|
344
|
+
```python
|
|
345
|
+
mesh = Mesh(mode="distributed", config={
|
|
346
|
+
'bind_host': '0.0.0.0',
|
|
347
|
+
'bind_port': 7950,
|
|
348
|
+
'node_name': 'research-node',
|
|
349
|
+
})
|
|
350
|
+
mesh.add(ResearcherAgent)
|
|
351
|
+
await mesh.start()
|
|
352
|
+
await mesh.serve_forever()
|
|
353
|
+
```
|
|
354
|
+
|
|
355
|
+
**Machine 2:**
|
|
356
|
+
```python
|
|
357
|
+
mesh = Mesh(mode="distributed", config={
|
|
358
|
+
'bind_host': '0.0.0.0',
|
|
359
|
+
'bind_port': 7950,
|
|
360
|
+
'node_name': 'writer-node',
|
|
361
|
+
'seed_nodes': '192.168.1.10:7950', # Machine 1
|
|
362
|
+
})
|
|
363
|
+
mesh.add(WriterAgent)
|
|
364
|
+
await mesh.start()
|
|
365
|
+
await mesh.serve_forever()
|
|
366
|
+
```
|
|
367
|
+
|
|
368
|
+
Workflows automatically route to the right machine.
|
|
369
|
+
|
|
370
|
+
---
|
|
371
|
+
|
|
372
|
+
## P2P vs Distributed: Which to Use?
|
|
373
|
+
|
|
374
|
+
| Scenario | Mode |
|
|
375
|
+
|----------|------|
|
|
376
|
+
| Agents run continuously, self-coordinate | **P2P** |
|
|
377
|
+
| Chatbot with specialist agents | **P2P** |
|
|
378
|
+
| Task pipelines with dependencies | **Distributed** |
|
|
379
|
+
| Need workflow orchestration | **Distributed** |
|
|
380
|
+
| Both continuous + workflows | **Distributed** (supports both) |
|
|
381
|
+
|
|
382
|
+
---
|
|
383
|
+
|
|
384
|
+
## Summary
|
|
385
|
+
|
|
386
|
+
### P2P Mode
|
|
387
|
+
```python
|
|
388
|
+
class MyAgent(CustomAgent):
|
|
389
|
+
role = "my_role"
|
|
390
|
+
capabilities = ["my_cap"]
|
|
391
|
+
|
|
392
|
+
async def run(self): # Required
|
|
393
|
+
while not self.shutdown_requested:
|
|
394
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
395
|
+
# Handle messages
|
|
396
|
+
|
|
397
|
+
mesh = Mesh(mode="p2p", config={'bind_port': 7950})
|
|
398
|
+
await mesh.run_forever()
|
|
399
|
+
```
|
|
400
|
+
|
|
401
|
+
### Distributed Mode
|
|
402
|
+
```python
|
|
403
|
+
class MyAgent(CustomAgent):
|
|
404
|
+
role = "my_role"
|
|
405
|
+
capabilities = ["my_cap"]
|
|
406
|
+
|
|
407
|
+
async def execute_task(self, task): # Required
|
|
408
|
+
# Your logic
|
|
409
|
+
return {"status": "success", "output": result}
|
|
410
|
+
|
|
411
|
+
mesh = Mesh(mode="distributed", config={'bind_port': 7950})
|
|
412
|
+
results = await mesh.workflow("my-workflow", [...])
|
|
413
|
+
```
|
|
414
|
+
|
|
415
|
+
See `examples/customagent_p2p_example.py` and `examples/customagent_distributed_example.py` for complete examples.
|
|
@@ -4,11 +4,32 @@ Build your first AI agent in 5 minutes!
|
|
|
4
4
|
|
|
5
5
|
---
|
|
6
6
|
|
|
7
|
+
## Choose Your Path
|
|
8
|
+
|
|
9
|
+
### Profiles (How agents execute)
|
|
10
|
+
|
|
11
|
+
| Profile | Best For | LLM Required |
|
|
12
|
+
|---------|----------|--------------|
|
|
13
|
+
| **AutoAgent** | Rapid prototyping, LLM generates code from prompts | Yes |
|
|
14
|
+
| **CustomAgent** | Existing code, full control (LangChain, CrewAI, etc.) | Optional |
|
|
15
|
+
|
|
16
|
+
### Execution Modes (How agents are orchestrated)
|
|
17
|
+
|
|
18
|
+
| Mode | Use Case | Start Here |
|
|
19
|
+
|------|----------|------------|
|
|
20
|
+
| **Autonomous** | Single machine, simple pipelines | ✅ This guide |
|
|
21
|
+
| **P2P** | Direct agent communication, swarms | [CustomAgent Guide](CUSTOMAGENT_GUIDE.md) |
|
|
22
|
+
| **Distributed** | Multi-node production systems | [AutoAgent Guide](AUTOAGENT_GUIDE.md) |
|
|
23
|
+
|
|
24
|
+
**Recommendation:** Start with **AutoAgent + Autonomous mode** below, then explore other modes.
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
7
28
|
## What You'll Build
|
|
8
29
|
|
|
9
30
|
An **AutoAgent** that takes natural language prompts and automatically:
|
|
10
31
|
1. Generates Python code using an LLM
|
|
11
|
-
2. Executes the code securely
|
|
32
|
+
2. Executes the code securely in a sandbox
|
|
12
33
|
3. Returns the result
|
|
13
34
|
|
|
14
35
|
**No manual coding required** - just describe what you want!
|
|
@@ -178,6 +199,56 @@ Execution time: 4.23s
|
|
|
178
199
|
|
|
179
200
|
---
|
|
180
201
|
|
|
202
|
+
## Step 5: Try CustomAgent (Alternative Path)
|
|
203
|
+
|
|
204
|
+
If you have existing agents or don't need LLM code generation, use **CustomAgent**:
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
import asyncio
|
|
208
|
+
from jarviscore import Mesh
|
|
209
|
+
from jarviscore.profiles import CustomAgent
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class MyAgent(CustomAgent):
|
|
213
|
+
role = "processor"
|
|
214
|
+
capabilities = ["data_processing"]
|
|
215
|
+
|
|
216
|
+
async def execute_task(self, task):
|
|
217
|
+
"""Your existing logic goes here."""
|
|
218
|
+
data = task.get("params", {}).get("data", [])
|
|
219
|
+
result = [x * 2 for x in data]
|
|
220
|
+
return {"status": "success", "output": result}
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
async def main():
|
|
224
|
+
# CustomAgent uses "distributed" (workflow + P2P) or "p2p" (P2P only)
|
|
225
|
+
mesh = Mesh(mode="distributed", config={
|
|
226
|
+
'bind_port': 7950,
|
|
227
|
+
'node_name': 'custom-node',
|
|
228
|
+
})
|
|
229
|
+
mesh.add(MyAgent)
|
|
230
|
+
await mesh.start()
|
|
231
|
+
|
|
232
|
+
results = await mesh.workflow("custom-demo", [
|
|
233
|
+
{"agent": "processor", "task": "Process data", "params": {"data": [1, 2, 3]}}
|
|
234
|
+
])
|
|
235
|
+
|
|
236
|
+
print(results[0]["output"]) # [2, 4, 6]
|
|
237
|
+
await mesh.stop()
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
asyncio.run(main())
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
**Key Benefits:**
|
|
244
|
+
- No LLM API required (no costs!)
|
|
245
|
+
- Keep your existing logic
|
|
246
|
+
- Works with any framework (LangChain, CrewAI, etc.)
|
|
247
|
+
|
|
248
|
+
**For more:** See [CustomAgent Guide](CUSTOMAGENT_GUIDE.md) for P2P mode and multi-node examples.
|
|
249
|
+
|
|
250
|
+
---
|
|
251
|
+
|
|
181
252
|
## What Just Happened?
|
|
182
253
|
|
|
183
254
|
Behind the scenes, JarvisCore:
|
|
@@ -199,7 +270,7 @@ All from a single natural language prompt!
|
|
|
199
270
|
|
|
200
271
|
---
|
|
201
272
|
|
|
202
|
-
## Step 5: Try More Complex Examples
|
|
273
|
+
## Step 5: Try More Complex AutoAgent Profile Examples
|
|
203
274
|
|
|
204
275
|
### Example 1: Data Processing
|
|
205
276
|
|
|
@@ -305,19 +376,42 @@ class MyAgent(AutoAgent):
|
|
|
305
376
|
system_prompt = "Instructions for the LLM" # How to generate code
|
|
306
377
|
```
|
|
307
378
|
|
|
308
|
-
### 2.
|
|
379
|
+
### 2. CustomAgent Profile
|
|
380
|
+
|
|
381
|
+
The `CustomAgent` profile lets you bring your own execution logic:
|
|
382
|
+
|
|
383
|
+
```python
|
|
384
|
+
class MyAgent(CustomAgent):
|
|
385
|
+
role = "unique_name"
|
|
386
|
+
capabilities = ["skill1", "skill2"]
|
|
387
|
+
|
|
388
|
+
async def execute_task(self, task): # For workflow steps (distributed)
|
|
389
|
+
return {"status": "success", "output": ...}
|
|
390
|
+
|
|
391
|
+
async def run(self): # For continuous loop (p2p)
|
|
392
|
+
while not self.shutdown_requested:
|
|
393
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
394
|
+
...
|
|
395
|
+
```
|
|
396
|
+
|
|
397
|
+
### 3. Mesh
|
|
309
398
|
|
|
310
399
|
The `Mesh` is the orchestrator that manages agents and workflows:
|
|
311
400
|
|
|
312
401
|
```python
|
|
313
|
-
mesh = Mesh(mode="autonomous") #
|
|
402
|
+
mesh = Mesh(mode="autonomous") # Or "p2p", "distributed"
|
|
314
403
|
mesh.add(MyAgent) # Register your agent
|
|
315
404
|
await mesh.start() # Initialize
|
|
316
405
|
results = await mesh.workflow(...) # Execute tasks
|
|
317
406
|
await mesh.stop() # Cleanup
|
|
318
407
|
```
|
|
319
408
|
|
|
320
|
-
|
|
409
|
+
**Modes:**
|
|
410
|
+
- `autonomous`: Workflow engine only (AutoAgent)
|
|
411
|
+
- `p2p`: P2P coordinator for agent-to-agent communication (CustomAgent)
|
|
412
|
+
- `distributed`: Both workflow engine AND P2P (CustomAgent)
|
|
413
|
+
|
|
414
|
+
### 4. Workflow
|
|
321
415
|
|
|
322
416
|
A workflow is a list of tasks to execute:
|
|
323
417
|
|
|
@@ -331,7 +425,7 @@ results = await mesh.workflow("workflow-id", [
|
|
|
331
425
|
])
|
|
332
426
|
```
|
|
333
427
|
|
|
334
|
-
###
|
|
428
|
+
### 5. Results
|
|
335
429
|
|
|
336
430
|
Each task returns a result dict:
|
|
337
431
|
|
|
@@ -497,11 +591,12 @@ Check `repairs` in the result to see how many fixes were needed.
|
|
|
497
591
|
|
|
498
592
|
## Next Steps
|
|
499
593
|
|
|
500
|
-
1. **
|
|
501
|
-
2. **
|
|
502
|
-
3. **
|
|
503
|
-
4. **
|
|
504
|
-
5. **
|
|
594
|
+
1. **AutoAgent Guide**: Multi-node distributed mode → [AUTOAGENT_GUIDE.md](AUTOAGENT_GUIDE.md)
|
|
595
|
+
2. **CustomAgent Guide**: P2P and distributed with your code → [CUSTOMAGENT_GUIDE.md](CUSTOMAGENT_GUIDE.md)
|
|
596
|
+
3. **User Guide**: Complete documentation → [USER_GUIDE.md](USER_GUIDE.md)
|
|
597
|
+
4. **API Reference**: [API_REFERENCE.md](API_REFERENCE.md)
|
|
598
|
+
5. **Configuration**: [CONFIGURATION.md](CONFIGURATION.md)
|
|
599
|
+
6. **Examples**: Check out `examples/` directory
|
|
505
600
|
|
|
506
601
|
---
|
|
507
602
|
|
|
@@ -599,5 +694,3 @@ Need help?
|
|
|
599
694
|
---
|
|
600
695
|
|
|
601
696
|
**🚀 Happy building with JarvisCore!**
|
|
602
|
-
|
|
603
|
-
*Built for the AutoAgent/Prompt-Dev generation - where AI writes the code, you write the prompts.*
|