jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +49 -36
- jarviscore/adapter/__init__.py +15 -9
- jarviscore/adapter/decorator.py +23 -19
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/scaffold.py +1 -1
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +35 -21
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +106 -13
- jarviscore/docs/TROUBLESHOOTING.md +144 -6
- jarviscore/docs/USER_GUIDE.md +138 -361
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/METADATA +60 -54
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
jarviscore/core/mesh.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Mesh - Central orchestrator for JarvisCore framework.
|
|
3
3
|
|
|
4
|
-
The Mesh coordinates agent execution and provides
|
|
4
|
+
The Mesh coordinates agent execution and provides three operational modes:
|
|
5
5
|
- Autonomous: Execute multi-step workflows with dependency resolution
|
|
6
6
|
- Distributed: Run as P2P service responding to task requests
|
|
7
|
+
- P2P: Agents run their own loops with direct peer-to-peer communication
|
|
7
8
|
|
|
8
9
|
Day 1: Foundation with agent registration and setup
|
|
9
10
|
Day 2: P2P integration for agent discovery and coordination
|
|
@@ -11,6 +12,7 @@ Day 3: Full workflow orchestration with state management
|
|
|
11
12
|
"""
|
|
12
13
|
from typing import List, Dict, Any, Optional, Type
|
|
13
14
|
from enum import Enum
|
|
15
|
+
import asyncio
|
|
14
16
|
import logging
|
|
15
17
|
|
|
16
18
|
from .agent import Agent
|
|
@@ -21,7 +23,8 @@ logger = logging.getLogger(__name__)
|
|
|
21
23
|
class MeshMode(Enum):
|
|
22
24
|
"""Operational modes for Mesh."""
|
|
23
25
|
AUTONOMOUS = "autonomous" # Execute workflows locally
|
|
24
|
-
DISTRIBUTED = "distributed" # Run as P2P service
|
|
26
|
+
DISTRIBUTED = "distributed" # Run as P2P service (workflow-driven)
|
|
27
|
+
P2P = "p2p" # Agents run own loops with direct peer communication
|
|
25
28
|
|
|
26
29
|
|
|
27
30
|
class Mesh:
|
|
@@ -29,18 +32,23 @@ class Mesh:
|
|
|
29
32
|
Central orchestrator for JarvisCore agent framework.
|
|
30
33
|
|
|
31
34
|
The Mesh manages agent lifecycle, coordinates execution, and provides
|
|
32
|
-
|
|
35
|
+
three operational modes:
|
|
33
36
|
|
|
34
37
|
1. **Autonomous Mode**: Execute multi-step workflows locally
|
|
35
38
|
- User defines workflow steps with dependencies
|
|
36
39
|
- Mesh routes tasks to capable agents
|
|
37
40
|
- Handles crash recovery and checkpointing
|
|
38
41
|
|
|
39
|
-
2. **Distributed Mode**: Run as P2P service
|
|
42
|
+
2. **Distributed Mode**: Run as P2P service (workflow-driven)
|
|
40
43
|
- Agents join P2P network and announce capabilities
|
|
41
44
|
- Receive and execute tasks from other nodes
|
|
42
45
|
- Coordinate with remote agents for complex workflows
|
|
43
46
|
|
|
47
|
+
3. **P2P Mode**: Direct agent-to-agent communication
|
|
48
|
+
- Agents run their own execution loops via run() method
|
|
49
|
+
- Agents communicate directly via self.peers client
|
|
50
|
+
- No workflow engine - agents control their own flow
|
|
51
|
+
|
|
44
52
|
Example (Autonomous):
|
|
45
53
|
mesh = Mesh(mode="autonomous")
|
|
46
54
|
mesh.add(ScraperAgent)
|
|
@@ -58,7 +66,15 @@ class Mesh:
|
|
|
58
66
|
mesh.add(DatabaseAgent)
|
|
59
67
|
|
|
60
68
|
await mesh.start()
|
|
61
|
-
await mesh.serve_forever() # Run as service
|
|
69
|
+
await mesh.serve_forever() # Run as workflow service
|
|
70
|
+
|
|
71
|
+
Example (P2P):
|
|
72
|
+
mesh = Mesh(mode="p2p")
|
|
73
|
+
mesh.add(ScoutAgent) # Has run() method
|
|
74
|
+
mesh.add(AnalystAgent) # Has run() method
|
|
75
|
+
|
|
76
|
+
await mesh.start()
|
|
77
|
+
await mesh.run_forever() # Agents run their own loops
|
|
62
78
|
"""
|
|
63
79
|
|
|
64
80
|
def __init__(
|
|
@@ -70,9 +86,9 @@ class Mesh:
|
|
|
70
86
|
Initialize Mesh orchestrator.
|
|
71
87
|
|
|
72
88
|
Args:
|
|
73
|
-
mode: Operational mode ("autonomous" or "
|
|
89
|
+
mode: Operational mode ("autonomous", "distributed", or "p2p")
|
|
74
90
|
config: Optional configuration dictionary:
|
|
75
|
-
- p2p_enabled: Enable P2P networking (default: True for distributed)
|
|
91
|
+
- p2p_enabled: Enable P2P networking (default: True for distributed/p2p)
|
|
76
92
|
- state_backend: "file", "redis", "mongodb" (default: "file")
|
|
77
93
|
- event_store: Path or connection string for event storage
|
|
78
94
|
- checkpoint_interval: Save checkpoints every N steps (default: 1)
|
|
@@ -86,7 +102,7 @@ class Mesh:
|
|
|
86
102
|
self.mode = MeshMode(mode)
|
|
87
103
|
except ValueError:
|
|
88
104
|
raise ValueError(
|
|
89
|
-
f"Invalid mode '{mode}'. Must be 'autonomous' or '
|
|
105
|
+
f"Invalid mode '{mode}'. Must be 'autonomous', 'distributed', or 'p2p'"
|
|
90
106
|
)
|
|
91
107
|
|
|
92
108
|
self.config = config or {}
|
|
@@ -107,7 +123,7 @@ class Mesh:
|
|
|
107
123
|
|
|
108
124
|
def add(
|
|
109
125
|
self,
|
|
110
|
-
|
|
126
|
+
agent_class_or_instance,
|
|
111
127
|
agent_id: Optional[str] = None,
|
|
112
128
|
**kwargs
|
|
113
129
|
) -> Agent:
|
|
@@ -115,30 +131,39 @@ class Mesh:
|
|
|
115
131
|
Register an agent with the mesh.
|
|
116
132
|
|
|
117
133
|
Args:
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
134
|
+
agent_class_or_instance: Agent class to instantiate, or pre-instantiated
|
|
135
|
+
agent (from wrap() function). Must inherit from Agent.
|
|
136
|
+
agent_id: Optional unique identifier for the agent (ignored if instance)
|
|
137
|
+
**kwargs: Additional arguments passed to agent constructor (ignored if instance)
|
|
121
138
|
|
|
122
139
|
Returns:
|
|
123
|
-
|
|
140
|
+
Agent instance
|
|
124
141
|
|
|
125
142
|
Raises:
|
|
126
143
|
ValueError: If agent with same role already registered
|
|
127
|
-
TypeError: If
|
|
144
|
+
TypeError: If agent doesn't inherit from Agent
|
|
128
145
|
|
|
129
146
|
Example:
|
|
130
147
|
mesh = Mesh()
|
|
148
|
+
|
|
149
|
+
# Add a class (will be instantiated)
|
|
131
150
|
scraper = mesh.add(ScraperAgent, agent_id="scraper-1")
|
|
132
|
-
processor = mesh.add(ProcessorAgent)
|
|
133
|
-
"""
|
|
134
|
-
# Validate agent class
|
|
135
|
-
if not issubclass(agent_class, Agent):
|
|
136
|
-
raise TypeError(
|
|
137
|
-
f"{agent_class.__name__} must inherit from Agent base class"
|
|
138
|
-
)
|
|
139
151
|
|
|
140
|
-
|
|
141
|
-
|
|
152
|
+
# Add a pre-instantiated agent (from wrap())
|
|
153
|
+
wrapped = wrap(my_instance, role="processor", capabilities=["processing"])
|
|
154
|
+
mesh.add(wrapped)
|
|
155
|
+
"""
|
|
156
|
+
# Check if it's already an instance (from wrap() function)
|
|
157
|
+
if isinstance(agent_class_or_instance, Agent):
|
|
158
|
+
agent = agent_class_or_instance
|
|
159
|
+
else:
|
|
160
|
+
# It's a class - validate and instantiate
|
|
161
|
+
agent_class = agent_class_or_instance
|
|
162
|
+
if not issubclass(agent_class, Agent):
|
|
163
|
+
raise TypeError(
|
|
164
|
+
f"{agent_class.__name__} must inherit from Agent base class"
|
|
165
|
+
)
|
|
166
|
+
agent = agent_class(agent_id=agent_id, **kwargs)
|
|
142
167
|
|
|
143
168
|
# Check for duplicate agent_ids
|
|
144
169
|
if agent.agent_id in self._agent_ids:
|
|
@@ -149,7 +174,9 @@ class Mesh:
|
|
|
149
174
|
|
|
150
175
|
# If agent_id was NOT explicitly provided (auto-generated),
|
|
151
176
|
# prevent duplicate roles to avoid accidents
|
|
152
|
-
|
|
177
|
+
# For instances (from wrap()), check if it's a new role
|
|
178
|
+
is_instance = isinstance(agent_class_or_instance, Agent)
|
|
179
|
+
if not is_instance and agent_id is None and agent.role in self._agent_registry:
|
|
153
180
|
raise ValueError(
|
|
154
181
|
f"Agent with role '{agent.role}' already registered. "
|
|
155
182
|
f"Use agent_id parameter to create multiple agents with same role."
|
|
@@ -215,8 +242,8 @@ class Mesh:
|
|
|
215
242
|
self._logger.error(f"Failed to setup agent {agent.agent_id}: {e}")
|
|
216
243
|
raise
|
|
217
244
|
|
|
218
|
-
# Initialize P2P coordinator
|
|
219
|
-
if self.mode
|
|
245
|
+
# Initialize P2P coordinator for distributed and p2p modes
|
|
246
|
+
if self.mode in (MeshMode.DISTRIBUTED, MeshMode.P2P) or self.config.get("p2p_enabled", False):
|
|
220
247
|
self._logger.info("Initializing P2P coordinator...")
|
|
221
248
|
from jarviscore.p2p import P2PCoordinator
|
|
222
249
|
from jarviscore.config import get_config_from_dict
|
|
@@ -233,8 +260,13 @@ class Mesh:
|
|
|
233
260
|
await self._p2p_coordinator.announce_capabilities()
|
|
234
261
|
self._logger.info("✓ Capabilities announced to mesh")
|
|
235
262
|
|
|
236
|
-
#
|
|
237
|
-
if self.mode == MeshMode.
|
|
263
|
+
# Inject PeerClients for p2p mode
|
|
264
|
+
if self.mode == MeshMode.P2P:
|
|
265
|
+
self._inject_peer_clients()
|
|
266
|
+
self._logger.info("✓ PeerClients injected into agents")
|
|
267
|
+
|
|
268
|
+
# Initialize workflow engine (for autonomous and distributed modes)
|
|
269
|
+
if self.mode in (MeshMode.AUTONOMOUS, MeshMode.DISTRIBUTED):
|
|
238
270
|
self._logger.info("Initializing workflow engine...")
|
|
239
271
|
from jarviscore.orchestration import WorkflowEngine
|
|
240
272
|
|
|
@@ -300,10 +332,11 @@ class Mesh:
|
|
|
300
332
|
if not self._started:
|
|
301
333
|
raise RuntimeError("Mesh not started. Call await mesh.start() first.")
|
|
302
334
|
|
|
303
|
-
if self.mode
|
|
335
|
+
if self.mode == MeshMode.P2P:
|
|
304
336
|
raise RuntimeError(
|
|
305
|
-
f"workflow()
|
|
306
|
-
f"
|
|
337
|
+
f"workflow() not available in p2p mode. "
|
|
338
|
+
f"P2P mode uses agent.run() loops with direct peer communication. "
|
|
339
|
+
f"Use autonomous or distributed mode for workflow orchestration."
|
|
307
340
|
)
|
|
308
341
|
|
|
309
342
|
self._logger.info(f"Executing workflow: {workflow_id} with {len(steps)} step(s)")
|
|
@@ -366,10 +399,11 @@ class Mesh:
|
|
|
366
399
|
Stop mesh and cleanup resources.
|
|
367
400
|
|
|
368
401
|
This method:
|
|
369
|
-
1.
|
|
370
|
-
2.
|
|
371
|
-
3.
|
|
372
|
-
4.
|
|
402
|
+
1. Requests shutdown for all agents (p2p mode)
|
|
403
|
+
2. Calls teardown() on all agents
|
|
404
|
+
3. Disconnects from P2P network
|
|
405
|
+
4. Saves state and checkpoints
|
|
406
|
+
5. Closes all connections
|
|
373
407
|
|
|
374
408
|
Example:
|
|
375
409
|
await mesh.stop()
|
|
@@ -379,6 +413,15 @@ class Mesh:
|
|
|
379
413
|
|
|
380
414
|
self._logger.info("Stopping mesh...")
|
|
381
415
|
|
|
416
|
+
# Request shutdown for all agents (for p2p mode loops)
|
|
417
|
+
for agent in self.agents:
|
|
418
|
+
agent.request_shutdown()
|
|
419
|
+
|
|
420
|
+
# Unregister peer clients
|
|
421
|
+
if self._p2p_coordinator:
|
|
422
|
+
for agent in self.agents:
|
|
423
|
+
self._p2p_coordinator.unregister_peer_client(agent.agent_id)
|
|
424
|
+
|
|
382
425
|
# Teardown agents
|
|
383
426
|
for agent in self.agents:
|
|
384
427
|
try:
|
|
@@ -400,6 +443,124 @@ class Mesh:
|
|
|
400
443
|
self._started = False
|
|
401
444
|
self._logger.info("Mesh stopped successfully")
|
|
402
445
|
|
|
446
|
+
def _inject_peer_clients(self):
|
|
447
|
+
"""
|
|
448
|
+
Inject PeerClient instances into all agents.
|
|
449
|
+
|
|
450
|
+
Called during start() in p2p mode. Gives each agent a self.peers
|
|
451
|
+
client for direct peer-to-peer communication.
|
|
452
|
+
"""
|
|
453
|
+
from jarviscore.p2p import PeerClient
|
|
454
|
+
|
|
455
|
+
node_id = ""
|
|
456
|
+
if self._p2p_coordinator and self._p2p_coordinator.swim_manager:
|
|
457
|
+
addr = self._p2p_coordinator.swim_manager.bind_addr
|
|
458
|
+
if addr:
|
|
459
|
+
node_id = f"{addr[0]}:{addr[1]}"
|
|
460
|
+
|
|
461
|
+
for agent in self.agents:
|
|
462
|
+
peer_client = PeerClient(
|
|
463
|
+
coordinator=self._p2p_coordinator,
|
|
464
|
+
agent_id=agent.agent_id,
|
|
465
|
+
agent_role=agent.role,
|
|
466
|
+
agent_registry=self._agent_registry,
|
|
467
|
+
node_id=node_id
|
|
468
|
+
)
|
|
469
|
+
agent.peers = peer_client
|
|
470
|
+
|
|
471
|
+
# Register with coordinator for remote message routing
|
|
472
|
+
if self._p2p_coordinator:
|
|
473
|
+
self._p2p_coordinator.register_peer_client(agent.agent_id, peer_client)
|
|
474
|
+
|
|
475
|
+
self._logger.debug(f"Injected PeerClient into agent: {agent.agent_id}")
|
|
476
|
+
|
|
477
|
+
async def run_forever(self):
|
|
478
|
+
"""
|
|
479
|
+
Run all agent loops concurrently (p2p mode only).
|
|
480
|
+
|
|
481
|
+
In p2p mode, agents run their own execution loops via their run() method.
|
|
482
|
+
This method starts all agent loops and waits until shutdown is requested.
|
|
483
|
+
|
|
484
|
+
Raises:
|
|
485
|
+
RuntimeError: If mesh not started or not in p2p mode
|
|
486
|
+
|
|
487
|
+
Example:
|
|
488
|
+
mesh = Mesh(mode="p2p")
|
|
489
|
+
mesh.add(ScoutAgent) # Has async def run(self)
|
|
490
|
+
mesh.add(AnalystAgent) # Has async def run(self)
|
|
491
|
+
|
|
492
|
+
await mesh.start()
|
|
493
|
+
await mesh.run_forever() # Blocks until Ctrl+C
|
|
494
|
+
"""
|
|
495
|
+
if not self._started:
|
|
496
|
+
raise RuntimeError("Mesh not started. Call await mesh.start() first.")
|
|
497
|
+
|
|
498
|
+
if self.mode != MeshMode.P2P:
|
|
499
|
+
raise RuntimeError(
|
|
500
|
+
f"run_forever() only available in p2p mode. "
|
|
501
|
+
f"Current mode: {self.mode.value}. "
|
|
502
|
+
f"Use workflow() for autonomous mode or serve_forever() for distributed mode."
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
self._logger.info("Starting agent loops in p2p mode...")
|
|
506
|
+
|
|
507
|
+
# Collect all agent run() coroutines
|
|
508
|
+
agent_tasks = []
|
|
509
|
+
for agent in self.agents:
|
|
510
|
+
if hasattr(agent, 'run') and asyncio.iscoroutinefunction(agent.run):
|
|
511
|
+
task = asyncio.create_task(
|
|
512
|
+
self._run_agent_loop(agent),
|
|
513
|
+
name=f"agent-{agent.agent_id}"
|
|
514
|
+
)
|
|
515
|
+
agent_tasks.append(task)
|
|
516
|
+
self._logger.info(f"Started loop for agent: {agent.agent_id}")
|
|
517
|
+
else:
|
|
518
|
+
self._logger.warning(
|
|
519
|
+
f"Agent {agent.agent_id} has no async run() method, skipping"
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
if not agent_tasks:
|
|
523
|
+
raise RuntimeError(
|
|
524
|
+
"No agents with run() method found. "
|
|
525
|
+
"p2p mode requires agents that implement async def run(self)."
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
self._logger.info(f"Running {len(agent_tasks)} agent loop(s). Press Ctrl+C to stop.")
|
|
529
|
+
|
|
530
|
+
# Run until shutdown
|
|
531
|
+
try:
|
|
532
|
+
await asyncio.gather(*agent_tasks)
|
|
533
|
+
except asyncio.CancelledError:
|
|
534
|
+
self._logger.info("Agent loops cancelled")
|
|
535
|
+
except KeyboardInterrupt:
|
|
536
|
+
self._logger.info("Keyboard interrupt received")
|
|
537
|
+
finally:
|
|
538
|
+
# Request shutdown for all agents
|
|
539
|
+
for agent in self.agents:
|
|
540
|
+
agent.request_shutdown()
|
|
541
|
+
|
|
542
|
+
# Cancel any remaining tasks
|
|
543
|
+
for task in agent_tasks:
|
|
544
|
+
if not task.done():
|
|
545
|
+
task.cancel()
|
|
546
|
+
|
|
547
|
+
await self.stop()
|
|
548
|
+
|
|
549
|
+
async def _run_agent_loop(self, agent: Agent):
|
|
550
|
+
"""
|
|
551
|
+
Run a single agent's loop with error handling.
|
|
552
|
+
|
|
553
|
+
Wraps the agent's run() method to catch and log errors.
|
|
554
|
+
"""
|
|
555
|
+
try:
|
|
556
|
+
await agent.run()
|
|
557
|
+
except asyncio.CancelledError:
|
|
558
|
+
self._logger.debug(f"Agent {agent.agent_id} loop cancelled")
|
|
559
|
+
raise
|
|
560
|
+
except Exception as e:
|
|
561
|
+
self._logger.error(f"Agent {agent.agent_id} loop error: {e}")
|
|
562
|
+
raise
|
|
563
|
+
|
|
403
564
|
def _find_agent_for_step(self, step: Dict[str, Any]) -> Optional[Agent]:
|
|
404
565
|
"""
|
|
405
566
|
Find agent capable of executing a step.
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AutoAgent Distributed Mode Example
|
|
3
|
+
|
|
4
|
+
Demonstrates AutoAgent in distributed mode, which combines:
|
|
5
|
+
- P2P network layer (SWIM protocol, ZMQ messaging)
|
|
6
|
+
- Workflow orchestration (step execution, dependencies)
|
|
7
|
+
|
|
8
|
+
This is ideal for multi-node deployments where agents can:
|
|
9
|
+
- Execute on different machines
|
|
10
|
+
- Discover each other via SWIM
|
|
11
|
+
- Run orchestrated workflows across the network
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
python examples/autoagent_distributed_example.py
|
|
15
|
+
|
|
16
|
+
Prerequisites:
|
|
17
|
+
- .env file with LLM API key (CLAUDE_API_KEY, etc.)
|
|
18
|
+
"""
|
|
19
|
+
import asyncio
|
|
20
|
+
import sys
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
24
|
+
|
|
25
|
+
from jarviscore import Mesh
|
|
26
|
+
from jarviscore.profiles import AutoAgent
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
30
|
+
# AUTOAGENT DEFINITIONS
|
|
31
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
32
|
+
|
|
33
|
+
class DataCollectorAgent(AutoAgent):
|
|
34
|
+
"""Collects and generates data."""
|
|
35
|
+
role = "collector"
|
|
36
|
+
capabilities = ["data_collection", "sampling"]
|
|
37
|
+
system_prompt = """
|
|
38
|
+
You are a data collection specialist. Generate sample datasets
|
|
39
|
+
based on specifications. Use Python's standard library only.
|
|
40
|
+
Store results in a variable named 'result' as a dictionary.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class DataProcessorAgent(AutoAgent):
|
|
45
|
+
"""Processes and transforms data."""
|
|
46
|
+
role = "processor"
|
|
47
|
+
capabilities = ["data_processing", "transformation"]
|
|
48
|
+
system_prompt = """
|
|
49
|
+
You are a data processing expert. Transform and clean datasets.
|
|
50
|
+
Apply filters, aggregations, and transformations as needed.
|
|
51
|
+
Use Python's standard library only. Store results in 'result'.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ReportWriterAgent(AutoAgent):
|
|
56
|
+
"""Generates reports from processed data."""
|
|
57
|
+
role = "reporter"
|
|
58
|
+
capabilities = ["reporting", "documentation"]
|
|
59
|
+
system_prompt = """
|
|
60
|
+
You are a technical writer. Create clear, well-formatted reports
|
|
61
|
+
from data. Use markdown formatting. Store the report in 'result'.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
66
|
+
# MAIN EXAMPLE
|
|
67
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
68
|
+
|
|
69
|
+
async def main():
|
|
70
|
+
"""Run AutoAgent distributed mode example."""
|
|
71
|
+
print("\n" + "="*70)
|
|
72
|
+
print("JarvisCore: AutoAgent in Distributed Mode")
|
|
73
|
+
print("="*70)
|
|
74
|
+
|
|
75
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
76
|
+
# KEY DIFFERENCE: mode="distributed" with P2P configuration
|
|
77
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
78
|
+
mesh = Mesh(
|
|
79
|
+
mode="distributed", # Enables P2P + Workflow Engine
|
|
80
|
+
config={
|
|
81
|
+
# P2P Network Configuration
|
|
82
|
+
'bind_host': '127.0.0.1', # Interface to bind to
|
|
83
|
+
'bind_port': 7950, # SWIM protocol port (ZMQ uses +1000)
|
|
84
|
+
'node_name': 'autoagent-node',
|
|
85
|
+
|
|
86
|
+
# For multi-node: uncomment to join existing cluster
|
|
87
|
+
# 'seed_nodes': '192.168.1.10:7950,192.168.1.11:7950',
|
|
88
|
+
|
|
89
|
+
# AutoAgent Configuration
|
|
90
|
+
'execution_timeout': 60, # Max seconds per task
|
|
91
|
+
'max_repair_attempts': 2, # Auto-repair on failure
|
|
92
|
+
'log_directory': './logs', # Result storage
|
|
93
|
+
}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Add agents - same as autonomous mode
|
|
97
|
+
mesh.add(DataCollectorAgent)
|
|
98
|
+
mesh.add(DataProcessorAgent)
|
|
99
|
+
mesh.add(ReportWriterAgent)
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
await mesh.start()
|
|
103
|
+
|
|
104
|
+
print("\n[INFO] Mesh started in DISTRIBUTED mode")
|
|
105
|
+
print(f" - P2P Coordinator: Active (port {mesh.config.get('bind_port', 7950)})")
|
|
106
|
+
print(f" - Workflow Engine: Active")
|
|
107
|
+
print(f" - Agents: {len(mesh.agents)}")
|
|
108
|
+
|
|
109
|
+
# ─────────────────────────────────────────────────────────────────────
|
|
110
|
+
# WORKFLOW EXECUTION - Same API as autonomous mode
|
|
111
|
+
# ─────────────────────────────────────────────────────────────────────
|
|
112
|
+
print("\n" + "-"*70)
|
|
113
|
+
print("Executing Pipeline: Collect → Process → Report")
|
|
114
|
+
print("-"*70)
|
|
115
|
+
|
|
116
|
+
results = await mesh.workflow("distributed-pipeline", [
|
|
117
|
+
{
|
|
118
|
+
"id": "collect",
|
|
119
|
+
"agent": "collector",
|
|
120
|
+
"task": "Generate a dataset of 10 products with name, price, and category"
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
"id": "process",
|
|
124
|
+
"agent": "processor",
|
|
125
|
+
"task": "Calculate total value, average price, and count by category",
|
|
126
|
+
"depends_on": ["collect"]
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
"id": "report",
|
|
130
|
+
"agent": "reporter",
|
|
131
|
+
"task": "Create a summary report with the statistics",
|
|
132
|
+
"depends_on": ["process"]
|
|
133
|
+
}
|
|
134
|
+
])
|
|
135
|
+
|
|
136
|
+
# Display results
|
|
137
|
+
print("\n" + "="*70)
|
|
138
|
+
print("RESULTS")
|
|
139
|
+
print("="*70)
|
|
140
|
+
|
|
141
|
+
for i, result in enumerate(results):
|
|
142
|
+
step_names = ["Data Collection", "Data Processing", "Report Generation"]
|
|
143
|
+
print(f"\n{step_names[i]}:")
|
|
144
|
+
print(f" Status: {result['status']}")
|
|
145
|
+
if result['status'] == 'success':
|
|
146
|
+
output = str(result.get('output', ''))[:200]
|
|
147
|
+
print(f" Output: {output}...")
|
|
148
|
+
else:
|
|
149
|
+
print(f" Error: {result.get('error')}")
|
|
150
|
+
|
|
151
|
+
# Summary
|
|
152
|
+
successes = sum(1 for r in results if r['status'] == 'success')
|
|
153
|
+
print(f"\n{'='*70}")
|
|
154
|
+
print(f"Pipeline Complete: {successes}/{len(results)} steps successful")
|
|
155
|
+
print(f"{'='*70}")
|
|
156
|
+
|
|
157
|
+
await mesh.stop()
|
|
158
|
+
|
|
159
|
+
except Exception as e:
|
|
160
|
+
print(f"\nError: {e}")
|
|
161
|
+
import traceback
|
|
162
|
+
traceback.print_exc()
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
166
|
+
# MULTI-NODE EXAMPLE (Reference)
|
|
167
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
168
|
+
|
|
169
|
+
async def multi_node_example():
|
|
170
|
+
"""
|
|
171
|
+
Example: Running agents across multiple machines.
|
|
172
|
+
|
|
173
|
+
Node 1 (seed node):
|
|
174
|
+
mesh = Mesh(mode="distributed", config={
|
|
175
|
+
'bind_host': '0.0.0.0',
|
|
176
|
+
'bind_port': 7950,
|
|
177
|
+
'node_name': 'node-1',
|
|
178
|
+
})
|
|
179
|
+
mesh.add(DataCollectorAgent)
|
|
180
|
+
await mesh.start()
|
|
181
|
+
await mesh.serve_forever() # Keep running
|
|
182
|
+
|
|
183
|
+
Node 2 (joins cluster):
|
|
184
|
+
mesh = Mesh(mode="distributed", config={
|
|
185
|
+
'bind_host': '0.0.0.0',
|
|
186
|
+
'bind_port': 7950,
|
|
187
|
+
'node_name': 'node-2',
|
|
188
|
+
'seed_nodes': '192.168.1.10:7950', # Node 1's address
|
|
189
|
+
})
|
|
190
|
+
mesh.add(DataProcessorAgent)
|
|
191
|
+
await mesh.start()
|
|
192
|
+
await mesh.serve_forever()
|
|
193
|
+
|
|
194
|
+
Node 3 (joins cluster):
|
|
195
|
+
mesh = Mesh(mode="distributed", config={
|
|
196
|
+
'bind_host': '0.0.0.0',
|
|
197
|
+
'bind_port': 7950,
|
|
198
|
+
'node_name': 'node-3',
|
|
199
|
+
'seed_nodes': '192.168.1.10:7950',
|
|
200
|
+
})
|
|
201
|
+
mesh.add(ReportWriterAgent)
|
|
202
|
+
await mesh.start()
|
|
203
|
+
await mesh.serve_forever()
|
|
204
|
+
|
|
205
|
+
Any node can now execute workflows that span all three!
|
|
206
|
+
"""
|
|
207
|
+
pass
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
if __name__ == "__main__":
|
|
211
|
+
asyncio.run(main())
|