jarviscore-framework 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +60 -15
- jarviscore/adapter/__init__.py +40 -0
- jarviscore/adapter/decorator.py +336 -0
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/check.py +18 -13
- jarviscore/cli/scaffold.py +178 -0
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/context/__init__.py +40 -0
- jarviscore/context/dependency.py +160 -0
- jarviscore/context/jarvis_context.py +207 -0
- jarviscore/context/memory.py +155 -0
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/.env.example +146 -0
- jarviscore/data/__init__.py +7 -0
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/calculator_agent_example.py +77 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/data/examples/multi_agent_workflow.py +132 -0
- jarviscore/data/examples/research_agent_example.py +76 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +41 -23
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +113 -17
- jarviscore/docs/TROUBLESHOOTING.md +155 -13
- jarviscore/docs/USER_GUIDE.md +144 -363
- jarviscore/execution/llm.py +23 -16
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- jarviscore_framework-0.2.0.dist-info/METADATA +143 -0
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_context.py +467 -0
- tests/test_decorator.py +622 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.0.dist-info/METADATA +0 -136
- jarviscore_framework-0.1.0.dist-info/RECORD +0 -55
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,454 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test 8: Distributed Mode - Multi-Node Tests
|
|
3
|
+
|
|
4
|
+
Tests the P2P network layer between multiple mesh instances:
|
|
5
|
+
- SWIM protocol node discovery
|
|
6
|
+
- Capability announcements across nodes
|
|
7
|
+
- Keepalive messaging between nodes
|
|
8
|
+
- Cross-node message routing
|
|
9
|
+
|
|
10
|
+
This file uses MOCKED agents (no real LLM) to test the P2P infrastructure.
|
|
11
|
+
|
|
12
|
+
Run with: pytest tests/test_08_distributed_multi_node.py -v
|
|
13
|
+
"""
|
|
14
|
+
import asyncio
|
|
15
|
+
import sys
|
|
16
|
+
import pytest
|
|
17
|
+
|
|
18
|
+
sys.path.insert(0, '.')
|
|
19
|
+
|
|
20
|
+
from jarviscore import Mesh, Agent
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
24
|
+
# TEST AGENTS
|
|
25
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
26
|
+
|
|
27
|
+
class Node1Agent(Agent):
|
|
28
|
+
"""Agent running on Node 1."""
|
|
29
|
+
role = "node1_worker"
|
|
30
|
+
capabilities = ["processing", "node1_specific"]
|
|
31
|
+
|
|
32
|
+
def __init__(self, agent_id=None):
|
|
33
|
+
super().__init__(agent_id)
|
|
34
|
+
self.messages_received = []
|
|
35
|
+
|
|
36
|
+
async def execute_task(self, task):
|
|
37
|
+
self.messages_received.append(task)
|
|
38
|
+
return {
|
|
39
|
+
"status": "success",
|
|
40
|
+
"output": f"Processed by {self.role} on Node 1",
|
|
41
|
+
"agent": self.agent_id
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Node2Agent(Agent):
|
|
46
|
+
"""Agent running on Node 2."""
|
|
47
|
+
role = "node2_worker"
|
|
48
|
+
capabilities = ["analysis", "node2_specific"]
|
|
49
|
+
|
|
50
|
+
def __init__(self, agent_id=None):
|
|
51
|
+
super().__init__(agent_id)
|
|
52
|
+
self.messages_received = []
|
|
53
|
+
|
|
54
|
+
async def execute_task(self, task):
|
|
55
|
+
self.messages_received.append(task)
|
|
56
|
+
return {
|
|
57
|
+
"status": "success",
|
|
58
|
+
"output": f"Analyzed by {self.role} on Node 2",
|
|
59
|
+
"agent": self.agent_id
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class SharedCapabilityAgent(Agent):
|
|
64
|
+
"""Agent with shared capability (exists on both nodes)."""
|
|
65
|
+
role = "shared_worker"
|
|
66
|
+
capabilities = ["shared_capability", "common_task"]
|
|
67
|
+
|
|
68
|
+
def __init__(self, agent_id=None, node_name="unknown"):
|
|
69
|
+
super().__init__(agent_id)
|
|
70
|
+
self.node_name = node_name
|
|
71
|
+
self.tasks_executed = []
|
|
72
|
+
|
|
73
|
+
async def execute_task(self, task):
|
|
74
|
+
self.tasks_executed.append(task)
|
|
75
|
+
return {
|
|
76
|
+
"status": "success",
|
|
77
|
+
"output": f"Executed on {self.node_name}",
|
|
78
|
+
"agent": self.agent_id,
|
|
79
|
+
"node": self.node_name
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
84
|
+
# FIXTURES
|
|
85
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
86
|
+
|
|
87
|
+
@pytest.fixture
|
|
88
|
+
async def two_node_mesh():
|
|
89
|
+
"""
|
|
90
|
+
Create two distributed mesh instances.
|
|
91
|
+
|
|
92
|
+
Note: Multi-node discovery requires proper SWIM seed configuration.
|
|
93
|
+
For simplicity, these tests focus on two independent nodes.
|
|
94
|
+
"""
|
|
95
|
+
# Node 1
|
|
96
|
+
mesh1 = Mesh(mode="distributed", config={
|
|
97
|
+
'bind_host': '127.0.0.1',
|
|
98
|
+
'bind_port': 7970,
|
|
99
|
+
'node_name': 'node1',
|
|
100
|
+
})
|
|
101
|
+
agent1 = mesh1.add(Node1Agent)
|
|
102
|
+
|
|
103
|
+
# Node 2 (independent - no seed connection for simplicity)
|
|
104
|
+
mesh2 = Mesh(mode="distributed", config={
|
|
105
|
+
'bind_host': '127.0.0.1',
|
|
106
|
+
'bind_port': 7971,
|
|
107
|
+
'node_name': 'node2',
|
|
108
|
+
})
|
|
109
|
+
agent2 = mesh2.add(Node2Agent)
|
|
110
|
+
|
|
111
|
+
# Start both meshes sequentially with delay
|
|
112
|
+
await mesh1.start()
|
|
113
|
+
await asyncio.sleep(1.0) # Wait for Node 1 to fully initialize
|
|
114
|
+
await mesh2.start()
|
|
115
|
+
await asyncio.sleep(0.5) # Wait for Node 2
|
|
116
|
+
|
|
117
|
+
yield mesh1, mesh2, agent1, agent2
|
|
118
|
+
|
|
119
|
+
# Cleanup
|
|
120
|
+
await mesh2.stop()
|
|
121
|
+
await asyncio.sleep(0.5)
|
|
122
|
+
await mesh1.stop()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@pytest.fixture
|
|
126
|
+
async def single_distributed_mesh():
|
|
127
|
+
"""Create a single distributed mesh for basic P2P tests."""
|
|
128
|
+
mesh = Mesh(mode="distributed", config={
|
|
129
|
+
'bind_host': '127.0.0.1',
|
|
130
|
+
'bind_port': 7972,
|
|
131
|
+
'node_name': 'test-node',
|
|
132
|
+
'keepalive_interval': 5, # Fast keepalive for testing
|
|
133
|
+
})
|
|
134
|
+
mesh.add(Node1Agent)
|
|
135
|
+
|
|
136
|
+
await mesh.start()
|
|
137
|
+
|
|
138
|
+
yield mesh
|
|
139
|
+
|
|
140
|
+
await mesh.stop()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
144
|
+
# TEST CLASS: Multi-Node Discovery
|
|
145
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
146
|
+
|
|
147
|
+
class TestMultiNodeDiscovery:
|
|
148
|
+
"""Tests for node discovery via SWIM protocol."""
|
|
149
|
+
|
|
150
|
+
@pytest.mark.asyncio
|
|
151
|
+
async def test_two_nodes_start_successfully(self, two_node_mesh):
|
|
152
|
+
"""Both nodes should start without errors."""
|
|
153
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
154
|
+
|
|
155
|
+
assert mesh1._started is True
|
|
156
|
+
assert mesh2._started is True
|
|
157
|
+
assert mesh1._p2p_coordinator is not None
|
|
158
|
+
assert mesh2._p2p_coordinator is not None
|
|
159
|
+
|
|
160
|
+
@pytest.mark.asyncio
|
|
161
|
+
async def test_swim_nodes_initialized(self, two_node_mesh):
|
|
162
|
+
"""SWIM nodes should be initialized on both meshes."""
|
|
163
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
164
|
+
|
|
165
|
+
assert mesh1._p2p_coordinator.swim_manager is not None
|
|
166
|
+
assert mesh2._p2p_coordinator.swim_manager is not None
|
|
167
|
+
assert mesh1._p2p_coordinator.swim_manager.swim_node is not None
|
|
168
|
+
assert mesh2._p2p_coordinator.swim_manager.swim_node is not None
|
|
169
|
+
|
|
170
|
+
@pytest.mark.asyncio
|
|
171
|
+
async def test_nodes_discover_each_other(self, two_node_mesh):
|
|
172
|
+
"""Nodes should discover each other via SWIM."""
|
|
173
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
174
|
+
|
|
175
|
+
# Give extra time for discovery
|
|
176
|
+
await asyncio.sleep(2.0)
|
|
177
|
+
|
|
178
|
+
# Get member lists from both nodes
|
|
179
|
+
swim1 = mesh1._p2p_coordinator.swim_manager.swim_node
|
|
180
|
+
swim2 = mesh2._p2p_coordinator.swim_manager.swim_node
|
|
181
|
+
|
|
182
|
+
members1 = swim1.get_members() if hasattr(swim1, 'get_members') else []
|
|
183
|
+
members2 = swim2.get_members() if hasattr(swim2, 'get_members') else []
|
|
184
|
+
|
|
185
|
+
# At minimum, each node should see itself
|
|
186
|
+
# With discovery, they should see each other too
|
|
187
|
+
print(f"Node 1 members: {members1}")
|
|
188
|
+
print(f"Node 2 members: {members2}")
|
|
189
|
+
|
|
190
|
+
# Both nodes should be running
|
|
191
|
+
assert mesh1._p2p_coordinator._started
|
|
192
|
+
assert mesh2._p2p_coordinator._started
|
|
193
|
+
|
|
194
|
+
@pytest.mark.asyncio
|
|
195
|
+
async def test_zmq_agents_initialized(self, two_node_mesh):
|
|
196
|
+
"""ZMQ agents should be initialized for messaging."""
|
|
197
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
198
|
+
|
|
199
|
+
assert mesh1._p2p_coordinator.swim_manager.zmq_agent is not None
|
|
200
|
+
assert mesh2._p2p_coordinator.swim_manager.zmq_agent is not None
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
204
|
+
# TEST CLASS: Capability Announcements
|
|
205
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
206
|
+
|
|
207
|
+
class TestCapabilityAnnouncements:
|
|
208
|
+
"""Tests for capability announcements across nodes."""
|
|
209
|
+
|
|
210
|
+
@pytest.mark.asyncio
|
|
211
|
+
async def test_node1_announces_capabilities(self, two_node_mesh):
|
|
212
|
+
"""Node 1 should announce its capabilities."""
|
|
213
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
214
|
+
|
|
215
|
+
cap_map = mesh1._p2p_coordinator._capability_map
|
|
216
|
+
|
|
217
|
+
# Node 1 has "processing" and "node1_specific"
|
|
218
|
+
assert "processing" in cap_map or len(cap_map) > 0
|
|
219
|
+
|
|
220
|
+
@pytest.mark.asyncio
|
|
221
|
+
async def test_node2_announces_capabilities(self, two_node_mesh):
|
|
222
|
+
"""Node 2 should announce its capabilities."""
|
|
223
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
224
|
+
|
|
225
|
+
cap_map = mesh2._p2p_coordinator._capability_map
|
|
226
|
+
|
|
227
|
+
# Node 2 has "analysis" and "node2_specific"
|
|
228
|
+
assert "analysis" in cap_map or len(cap_map) > 0
|
|
229
|
+
|
|
230
|
+
@pytest.mark.asyncio
|
|
231
|
+
async def test_capability_map_populated(self, single_distributed_mesh):
|
|
232
|
+
"""Capability map should be populated after start."""
|
|
233
|
+
mesh = single_distributed_mesh
|
|
234
|
+
|
|
235
|
+
cap_map = mesh._p2p_coordinator._capability_map
|
|
236
|
+
|
|
237
|
+
# Should have capabilities from Node1Agent
|
|
238
|
+
assert len(cap_map) > 0
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
242
|
+
# TEST CLASS: Keepalive
|
|
243
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
244
|
+
|
|
245
|
+
class TestMultiNodeKeepalive:
|
|
246
|
+
"""Tests for keepalive between nodes."""
|
|
247
|
+
|
|
248
|
+
@pytest.mark.asyncio
|
|
249
|
+
async def test_keepalive_manager_initialized(self, single_distributed_mesh):
|
|
250
|
+
"""Keepalive manager should be initialized."""
|
|
251
|
+
mesh = single_distributed_mesh
|
|
252
|
+
|
|
253
|
+
assert mesh._p2p_coordinator.keepalive_manager is not None
|
|
254
|
+
|
|
255
|
+
@pytest.mark.asyncio
|
|
256
|
+
async def test_keepalive_manager_started(self, single_distributed_mesh):
|
|
257
|
+
"""Keepalive manager should be started."""
|
|
258
|
+
mesh = single_distributed_mesh
|
|
259
|
+
|
|
260
|
+
km = mesh._p2p_coordinator.keepalive_manager
|
|
261
|
+
# KeepaliveManager uses _running attribute to track state
|
|
262
|
+
assert km._running is True
|
|
263
|
+
|
|
264
|
+
@pytest.mark.asyncio
|
|
265
|
+
async def test_keepalive_config_applied(self, single_distributed_mesh):
|
|
266
|
+
"""Keepalive config should be applied."""
|
|
267
|
+
mesh = single_distributed_mesh
|
|
268
|
+
|
|
269
|
+
km = mesh._p2p_coordinator.keepalive_manager
|
|
270
|
+
# We set keepalive_interval to 5 in fixture
|
|
271
|
+
assert km.interval == 5
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
275
|
+
# TEST CLASS: Multi-Node Messaging
|
|
276
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
277
|
+
|
|
278
|
+
class TestMultiNodeMessaging:
|
|
279
|
+
"""Tests for messaging between nodes."""
|
|
280
|
+
|
|
281
|
+
@pytest.mark.asyncio
|
|
282
|
+
async def test_broadcaster_initialized_on_both(self, two_node_mesh):
|
|
283
|
+
"""Broadcaster should be initialized on both nodes."""
|
|
284
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
285
|
+
|
|
286
|
+
assert mesh1._p2p_coordinator.broadcaster is not None
|
|
287
|
+
assert mesh2._p2p_coordinator.broadcaster is not None
|
|
288
|
+
|
|
289
|
+
@pytest.mark.asyncio
|
|
290
|
+
async def test_workflow_on_node1(self, two_node_mesh):
|
|
291
|
+
"""Node 1 should execute workflow with its local agent."""
|
|
292
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
293
|
+
|
|
294
|
+
results = await mesh1.workflow("node1-workflow", [
|
|
295
|
+
{"agent": "node1_worker", "task": "Process data on Node 1"}
|
|
296
|
+
])
|
|
297
|
+
|
|
298
|
+
assert len(results) == 1
|
|
299
|
+
assert results[0]["status"] == "success"
|
|
300
|
+
assert "Node 1" in results[0]["output"]
|
|
301
|
+
|
|
302
|
+
@pytest.mark.asyncio
|
|
303
|
+
async def test_workflow_on_node2(self, two_node_mesh):
|
|
304
|
+
"""Node 2 should execute workflow with its local agent."""
|
|
305
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
306
|
+
|
|
307
|
+
results = await mesh2.workflow("node2-workflow", [
|
|
308
|
+
{"agent": "node2_worker", "task": "Analyze data on Node 2"}
|
|
309
|
+
])
|
|
310
|
+
|
|
311
|
+
assert len(results) == 1
|
|
312
|
+
assert results[0]["status"] == "success"
|
|
313
|
+
assert "Node 2" in results[0]["output"]
|
|
314
|
+
|
|
315
|
+
@pytest.mark.asyncio
|
|
316
|
+
async def test_both_nodes_execute_independently(self, two_node_mesh):
|
|
317
|
+
"""Both nodes should execute workflows independently."""
|
|
318
|
+
mesh1, mesh2, agent1, agent2 = two_node_mesh
|
|
319
|
+
|
|
320
|
+
# Execute on both nodes in parallel
|
|
321
|
+
results1, results2 = await asyncio.gather(
|
|
322
|
+
mesh1.workflow("parallel-1", [{"agent": "node1_worker", "task": "Task 1"}]),
|
|
323
|
+
mesh2.workflow("parallel-2", [{"agent": "node2_worker", "task": "Task 2"}])
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
assert results1[0]["status"] == "success"
|
|
327
|
+
assert results2[0]["status"] == "success"
|
|
328
|
+
assert "Node 1" in results1[0]["output"]
|
|
329
|
+
assert "Node 2" in results2[0]["output"]
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
333
|
+
# TEST CLASS: P2P Coordinator State
|
|
334
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
335
|
+
|
|
336
|
+
class TestP2PCoordinatorState:
|
|
337
|
+
"""Tests for P2P coordinator internal state."""
|
|
338
|
+
|
|
339
|
+
@pytest.mark.asyncio
|
|
340
|
+
async def test_coordinator_stores_agent_peer_clients(self, single_distributed_mesh):
|
|
341
|
+
"""Coordinator should track registered peer clients."""
|
|
342
|
+
mesh = single_distributed_mesh
|
|
343
|
+
|
|
344
|
+
# In distributed mode, peer clients are registered
|
|
345
|
+
# (even though they're mainly used in p2p mode)
|
|
346
|
+
assert mesh._p2p_coordinator is not None
|
|
347
|
+
|
|
348
|
+
@pytest.mark.asyncio
|
|
349
|
+
async def test_coordinator_stop_cleans_up(self):
|
|
350
|
+
"""Stopping coordinator should clean up resources."""
|
|
351
|
+
mesh = Mesh(mode="distributed", config={'bind_port': 7973})
|
|
352
|
+
mesh.add(Node1Agent)
|
|
353
|
+
|
|
354
|
+
await mesh.start()
|
|
355
|
+
assert mesh._p2p_coordinator._started is True
|
|
356
|
+
|
|
357
|
+
await mesh.stop()
|
|
358
|
+
assert mesh._p2p_coordinator._started is False
|
|
359
|
+
|
|
360
|
+
@pytest.mark.asyncio
|
|
361
|
+
async def test_multiple_starts_same_port_fails(self):
|
|
362
|
+
"""Starting two meshes on same port should fail."""
|
|
363
|
+
mesh1 = Mesh(mode="distributed", config={'bind_port': 7974})
|
|
364
|
+
mesh1.add(Node1Agent)
|
|
365
|
+
await mesh1.start()
|
|
366
|
+
|
|
367
|
+
mesh2 = Mesh(mode="distributed", config={'bind_port': 7974})
|
|
368
|
+
mesh2.add(Node2Agent)
|
|
369
|
+
|
|
370
|
+
# Should fail because port is already in use
|
|
371
|
+
with pytest.raises(Exception):
|
|
372
|
+
await mesh2.start()
|
|
373
|
+
|
|
374
|
+
await mesh1.stop()
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
378
|
+
# MANUAL DEMONSTRATION
|
|
379
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
380
|
+
|
|
381
|
+
async def run_multi_node_demo():
|
|
382
|
+
"""Demonstrate multi-node distributed mode."""
|
|
383
|
+
print("\n" + "="*70)
|
|
384
|
+
print("DISTRIBUTED MODE - MULTI-NODE DEMONSTRATION")
|
|
385
|
+
print("="*70)
|
|
386
|
+
|
|
387
|
+
# Create Node 1 (seed)
|
|
388
|
+
print("\n[NODE 1] Creating seed node on port 7980...")
|
|
389
|
+
mesh1 = Mesh(mode="distributed", config={
|
|
390
|
+
'bind_host': '127.0.0.1',
|
|
391
|
+
'bind_port': 7980,
|
|
392
|
+
'node_name': 'node1-seed',
|
|
393
|
+
})
|
|
394
|
+
agent1 = mesh1.add(Node1Agent)
|
|
395
|
+
await mesh1.start()
|
|
396
|
+
print(f" - Agent: {agent1.role}")
|
|
397
|
+
print(f" - P2P Coordinator: {mesh1._p2p_coordinator is not None}")
|
|
398
|
+
|
|
399
|
+
# Create Node 2 (joins via seed)
|
|
400
|
+
print("\n[NODE 2] Creating node on port 7981, joining via seed...")
|
|
401
|
+
mesh2 = Mesh(mode="distributed", config={
|
|
402
|
+
'bind_host': '127.0.0.1',
|
|
403
|
+
'bind_port': 7981,
|
|
404
|
+
'node_name': 'node2-joiner',
|
|
405
|
+
'seed_nodes': '127.0.0.1:7980',
|
|
406
|
+
})
|
|
407
|
+
agent2 = mesh2.add(Node2Agent)
|
|
408
|
+
await mesh2.start()
|
|
409
|
+
print(f" - Agent: {agent2.role}")
|
|
410
|
+
print(f" - P2P Coordinator: {mesh2._p2p_coordinator is not None}")
|
|
411
|
+
|
|
412
|
+
# Give time for discovery
|
|
413
|
+
print("\n[DISCOVERY] Waiting for nodes to discover each other...")
|
|
414
|
+
await asyncio.sleep(2.0)
|
|
415
|
+
|
|
416
|
+
# Show capabilities
|
|
417
|
+
print("\n[CAPABILITIES]")
|
|
418
|
+
print(f" Node 1 capabilities: {list(mesh1._p2p_coordinator._capability_map.keys())}")
|
|
419
|
+
print(f" Node 2 capabilities: {list(mesh2._p2p_coordinator._capability_map.keys())}")
|
|
420
|
+
|
|
421
|
+
# Execute workflows on each node
|
|
422
|
+
print("\n[WORKFLOW] Executing on each node...")
|
|
423
|
+
|
|
424
|
+
results1 = await mesh1.workflow("demo-node1", [
|
|
425
|
+
{"agent": "node1_worker", "task": "Process data"}
|
|
426
|
+
])
|
|
427
|
+
print(f" Node 1 result: {results1[0]['output']}")
|
|
428
|
+
|
|
429
|
+
results2 = await mesh2.workflow("demo-node2", [
|
|
430
|
+
{"agent": "node2_worker", "task": "Analyze data"}
|
|
431
|
+
])
|
|
432
|
+
print(f" Node 2 result: {results2[0]['output']}")
|
|
433
|
+
|
|
434
|
+
# Parallel execution
|
|
435
|
+
print("\n[PARALLEL] Executing on both nodes simultaneously...")
|
|
436
|
+
r1, r2 = await asyncio.gather(
|
|
437
|
+
mesh1.workflow("parallel-demo-1", [{"agent": "node1_worker", "task": "Parallel 1"}]),
|
|
438
|
+
mesh2.workflow("parallel-demo-2", [{"agent": "node2_worker", "task": "Parallel 2"}])
|
|
439
|
+
)
|
|
440
|
+
print(f" Node 1: {r1[0]['status']}")
|
|
441
|
+
print(f" Node 2: {r2[0]['status']}")
|
|
442
|
+
|
|
443
|
+
# Cleanup
|
|
444
|
+
print("\n[CLEANUP] Stopping nodes...")
|
|
445
|
+
await mesh2.stop()
|
|
446
|
+
await mesh1.stop()
|
|
447
|
+
|
|
448
|
+
print("\n" + "="*70)
|
|
449
|
+
print("MULTI-NODE DEMONSTRATION COMPLETE")
|
|
450
|
+
print("="*70)
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
if __name__ == "__main__":
|
|
454
|
+
asyncio.run(run_multi_node_demo())
|