jarviscore-framework 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/calculator_agent_example.py +77 -0
- examples/multi_agent_workflow.py +132 -0
- examples/research_agent_example.py +76 -0
- jarviscore/__init__.py +54 -0
- jarviscore/cli/__init__.py +7 -0
- jarviscore/cli/__main__.py +33 -0
- jarviscore/cli/check.py +404 -0
- jarviscore/cli/smoketest.py +371 -0
- jarviscore/config/__init__.py +7 -0
- jarviscore/config/settings.py +128 -0
- jarviscore/core/__init__.py +7 -0
- jarviscore/core/agent.py +163 -0
- jarviscore/core/mesh.py +463 -0
- jarviscore/core/profile.py +64 -0
- jarviscore/docs/API_REFERENCE.md +932 -0
- jarviscore/docs/CONFIGURATION.md +753 -0
- jarviscore/docs/GETTING_STARTED.md +600 -0
- jarviscore/docs/TROUBLESHOOTING.md +424 -0
- jarviscore/docs/USER_GUIDE.md +983 -0
- jarviscore/execution/__init__.py +94 -0
- jarviscore/execution/code_registry.py +298 -0
- jarviscore/execution/generator.py +268 -0
- jarviscore/execution/llm.py +430 -0
- jarviscore/execution/repair.py +283 -0
- jarviscore/execution/result_handler.py +332 -0
- jarviscore/execution/sandbox.py +555 -0
- jarviscore/execution/search.py +281 -0
- jarviscore/orchestration/__init__.py +18 -0
- jarviscore/orchestration/claimer.py +101 -0
- jarviscore/orchestration/dependency.py +143 -0
- jarviscore/orchestration/engine.py +292 -0
- jarviscore/orchestration/status.py +96 -0
- jarviscore/p2p/__init__.py +23 -0
- jarviscore/p2p/broadcaster.py +353 -0
- jarviscore/p2p/coordinator.py +364 -0
- jarviscore/p2p/keepalive.py +361 -0
- jarviscore/p2p/swim_manager.py +290 -0
- jarviscore/profiles/__init__.py +6 -0
- jarviscore/profiles/autoagent.py +264 -0
- jarviscore/profiles/customagent.py +137 -0
- jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
- jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
- jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
- jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
- jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
- tests/conftest.py +44 -0
- tests/test_agent.py +165 -0
- tests/test_autoagent.py +140 -0
- tests/test_autoagent_day4.py +186 -0
- tests/test_customagent.py +248 -0
- tests/test_integration.py +293 -0
- tests/test_llm_fallback.py +185 -0
- tests/test_mesh.py +356 -0
- tests/test_p2p_integration.py +375 -0
- tests/test_remote_sandbox.py +116 -0
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
"""
|
|
2
|
+
P2P Coordinator for JarvisCore Framework
|
|
3
|
+
|
|
4
|
+
Unified P2P coordination layer wrapping swim_p2p library.
|
|
5
|
+
Provides agent discovery, capability announcement, and message routing.
|
|
6
|
+
|
|
7
|
+
Adapted from integration-agent P2P infrastructure
|
|
8
|
+
"""
|
|
9
|
+
import asyncio
|
|
10
|
+
import logging
|
|
11
|
+
from typing import List, Dict, Any, Optional
|
|
12
|
+
|
|
13
|
+
from .swim_manager import SWIMThreadManager
|
|
14
|
+
from .keepalive import P2PKeepaliveManager
|
|
15
|
+
from .broadcaster import StepOutputBroadcaster
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class P2PCoordinator:
|
|
21
|
+
"""
|
|
22
|
+
Simplified P2P coordination layer wrapping swim_p2p library.
|
|
23
|
+
|
|
24
|
+
Provides:
|
|
25
|
+
- SWIM protocol membership management
|
|
26
|
+
- Agent discovery and capability announcement
|
|
27
|
+
- Message routing and broadcasting
|
|
28
|
+
- Smart keepalive with traffic suppression
|
|
29
|
+
- Step output broadcasting
|
|
30
|
+
|
|
31
|
+
Example:
|
|
32
|
+
coordinator = P2PCoordinator(agents, config)
|
|
33
|
+
await coordinator.start()
|
|
34
|
+
await coordinator.announce_capabilities()
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, agents: List, config: Dict):
|
|
38
|
+
"""
|
|
39
|
+
Initialize P2P Coordinator.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
agents: List of Agent instances to coordinate
|
|
43
|
+
config: Configuration dictionary containing:
|
|
44
|
+
- bind_host: Host to bind SWIM (default: 127.0.0.1)
|
|
45
|
+
- bind_port: Port to bind SWIM (default: 7946)
|
|
46
|
+
- node_name: Node identifier (default: jarviscore-node)
|
|
47
|
+
- seed_nodes: Comma-separated seed nodes (default: "")
|
|
48
|
+
- transport_type: udp, tcp, or hybrid (default: hybrid)
|
|
49
|
+
- zmq_port_offset: Offset for ZMQ port (default: 1000)
|
|
50
|
+
- keepalive_enabled: Enable keepalive (default: True)
|
|
51
|
+
- keepalive_interval: Keepalive interval in seconds (default: 90)
|
|
52
|
+
"""
|
|
53
|
+
self.agents = agents
|
|
54
|
+
self.config = config
|
|
55
|
+
|
|
56
|
+
# Core components (from integration-agent)
|
|
57
|
+
self.swim_manager: Optional[SWIMThreadManager] = None
|
|
58
|
+
self.keepalive_manager: Optional[P2PKeepaliveManager] = None
|
|
59
|
+
self.broadcaster: Optional[StepOutputBroadcaster] = None
|
|
60
|
+
|
|
61
|
+
# State
|
|
62
|
+
self._started = False
|
|
63
|
+
self._capability_map: Dict[str, List[str]] = {} # capability -> [agent_ids]
|
|
64
|
+
|
|
65
|
+
async def start(self):
|
|
66
|
+
"""
|
|
67
|
+
Start P2P mesh.
|
|
68
|
+
|
|
69
|
+
Steps:
|
|
70
|
+
1. Start SWIM protocol in dedicated thread
|
|
71
|
+
2. Setup keepalive manager
|
|
72
|
+
3. Setup step output broadcaster
|
|
73
|
+
4. Register message handlers
|
|
74
|
+
"""
|
|
75
|
+
if self._started:
|
|
76
|
+
logger.warning("P2P Coordinator already started")
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
logger.info("Starting P2P coordinator...")
|
|
80
|
+
|
|
81
|
+
# 1. Start SWIM protocol (in dedicated thread)
|
|
82
|
+
logger.info("Initializing SWIM protocol...")
|
|
83
|
+
self.swim_manager = SWIMThreadManager(self.config)
|
|
84
|
+
self.swim_manager.start_swim_in_thread_simple()
|
|
85
|
+
|
|
86
|
+
if not self.swim_manager.wait_for_init(timeout=20):
|
|
87
|
+
raise RuntimeError("SWIM initialization failed")
|
|
88
|
+
logger.info("✓ SWIM protocol started")
|
|
89
|
+
|
|
90
|
+
# 2. Setup keepalive manager
|
|
91
|
+
logger.info("Starting P2P keepalive...")
|
|
92
|
+
# Map jarviscore config keys to P2P_KEEPALIVE_* keys
|
|
93
|
+
keepalive_config = {
|
|
94
|
+
'P2P_KEEPALIVE_ENABLED': self.config.get('keepalive_enabled', True),
|
|
95
|
+
'P2P_KEEPALIVE_INTERVAL': self.config.get('keepalive_interval', 90),
|
|
96
|
+
'P2P_KEEPALIVE_TIMEOUT': self.config.get('keepalive_timeout', 10),
|
|
97
|
+
'P2P_ACTIVITY_SUPPRESS_WINDOW': self.config.get('activity_suppress_window', 60),
|
|
98
|
+
}
|
|
99
|
+
self.keepalive_manager = P2PKeepaliveManager(
|
|
100
|
+
agent_id=self._get_node_id(),
|
|
101
|
+
send_p2p_callback=self._send_p2p_message,
|
|
102
|
+
broadcast_p2p_callback=self._broadcast_p2p_message,
|
|
103
|
+
config=keepalive_config
|
|
104
|
+
)
|
|
105
|
+
await self.keepalive_manager.start()
|
|
106
|
+
logger.info("✓ Keepalive manager started")
|
|
107
|
+
|
|
108
|
+
# 3. Setup broadcaster
|
|
109
|
+
logger.info("Starting step output broadcaster...")
|
|
110
|
+
self.broadcaster = StepOutputBroadcaster(
|
|
111
|
+
agent_id=self._get_node_id(),
|
|
112
|
+
zmq_agent=self.swim_manager.zmq_agent,
|
|
113
|
+
swim_node=self.swim_manager.swim_node
|
|
114
|
+
)
|
|
115
|
+
logger.info("✓ Broadcaster started")
|
|
116
|
+
|
|
117
|
+
# 4. Register message handlers
|
|
118
|
+
self._register_handlers()
|
|
119
|
+
logger.info("✓ Message handlers registered")
|
|
120
|
+
|
|
121
|
+
self._started = True
|
|
122
|
+
logger.info("P2P coordinator started successfully")
|
|
123
|
+
|
|
124
|
+
def _register_handlers(self):
|
|
125
|
+
"""Register framework message handlers with ZMQ router."""
|
|
126
|
+
if not self.swim_manager or not self.swim_manager.zmq_agent:
|
|
127
|
+
logger.error("Cannot register handlers: ZMQ agent not available")
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
zmq = self.swim_manager.zmq_agent
|
|
131
|
+
|
|
132
|
+
# Register message type handlers
|
|
133
|
+
message_types = {
|
|
134
|
+
"STEP_OUTPUT_BROADCAST": self._handle_step_broadcast,
|
|
135
|
+
"STEP_OUTPUT_ACK": self._handle_step_ack,
|
|
136
|
+
"STEP_COMPLETION_NUDGE": self._handle_nudge,
|
|
137
|
+
"STEP_COMPLETION_NUDGE_RESPONSE": self._handle_nudge_response,
|
|
138
|
+
"STEP_DATA_REQUEST": self._handle_data_request,
|
|
139
|
+
"CAPABILITY_ANNOUNCEMENT": self._handle_capability_announcement,
|
|
140
|
+
"CAPABILITY_QUERY": self._handle_capability_query,
|
|
141
|
+
"P2P_KEEPALIVE": self.keepalive_manager.handle_keepalive_received,
|
|
142
|
+
"P2P_KEEPALIVE_ACK": self.keepalive_manager.handle_keepalive_ack,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
for msg_type, handler in message_types.items():
|
|
146
|
+
try:
|
|
147
|
+
zmq.router_manager.register_handler(msg_type, handler)
|
|
148
|
+
logger.debug(f"Registered handler for {msg_type}")
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.error(f"Failed to register handler for {msg_type}: {e}")
|
|
151
|
+
|
|
152
|
+
logger.info(f"Registered {len(message_types)} message handlers")
|
|
153
|
+
|
|
154
|
+
async def announce_capabilities(self):
|
|
155
|
+
"""Broadcast agent capabilities to mesh."""
|
|
156
|
+
if not self._started:
|
|
157
|
+
raise RuntimeError("P2P Coordinator not started")
|
|
158
|
+
|
|
159
|
+
capabilities = {}
|
|
160
|
+
for agent in self.agents:
|
|
161
|
+
for cap in agent.capabilities:
|
|
162
|
+
if cap not in capabilities:
|
|
163
|
+
capabilities[cap] = []
|
|
164
|
+
capabilities[cap].append(agent.agent_id)
|
|
165
|
+
|
|
166
|
+
self._capability_map = capabilities
|
|
167
|
+
|
|
168
|
+
payload = {
|
|
169
|
+
'node_id': self._get_node_id(),
|
|
170
|
+
'capabilities': capabilities
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
# Broadcast using the broadcaster
|
|
174
|
+
await self.broadcaster.broadcast_step_result(
|
|
175
|
+
step_id='capability_announcement',
|
|
176
|
+
workflow_id='system',
|
|
177
|
+
output_data=payload,
|
|
178
|
+
status='success'
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
logger.info(f"Announced capabilities: {list(capabilities.keys())}")
|
|
182
|
+
|
|
183
|
+
async def query_mesh(self, capability: str) -> List[str]:
|
|
184
|
+
"""
|
|
185
|
+
Find agents with specific capability across mesh.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
capability: Required capability
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
List of agent IDs that have the capability
|
|
192
|
+
"""
|
|
193
|
+
# First check local cache
|
|
194
|
+
if capability in self._capability_map:
|
|
195
|
+
return self._capability_map[capability]
|
|
196
|
+
|
|
197
|
+
# TODO Day 3: Implement distributed capability query via P2P
|
|
198
|
+
logger.debug(f"No cached agents found for capability: {capability}")
|
|
199
|
+
return []
|
|
200
|
+
|
|
201
|
+
async def serve(self):
|
|
202
|
+
"""
|
|
203
|
+
Run as service, handling P2P requests indefinitely.
|
|
204
|
+
|
|
205
|
+
This keeps the coordinator running and responding to P2P messages.
|
|
206
|
+
"""
|
|
207
|
+
logger.info("P2P service running (press Ctrl+C to stop)...")
|
|
208
|
+
|
|
209
|
+
try:
|
|
210
|
+
while True:
|
|
211
|
+
await asyncio.sleep(10)
|
|
212
|
+
# Service is event-driven via message handlers
|
|
213
|
+
# Just keep the event loop alive
|
|
214
|
+
except KeyboardInterrupt:
|
|
215
|
+
logger.info("Service interrupted")
|
|
216
|
+
|
|
217
|
+
async def stop(self):
|
|
218
|
+
"""Stop P2P coordinator and cleanup resources."""
|
|
219
|
+
if not self._started:
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
logger.info("Stopping P2P coordinator...")
|
|
223
|
+
|
|
224
|
+
# Stop keepalive manager
|
|
225
|
+
if self.keepalive_manager:
|
|
226
|
+
await self.keepalive_manager.stop()
|
|
227
|
+
logger.info("✓ Keepalive manager stopped")
|
|
228
|
+
|
|
229
|
+
# Stop SWIM manager
|
|
230
|
+
if self.swim_manager:
|
|
231
|
+
self.swim_manager.shutdown()
|
|
232
|
+
logger.info("✓ SWIM manager stopped")
|
|
233
|
+
|
|
234
|
+
self._started = False
|
|
235
|
+
logger.info("P2P coordinator stopped")
|
|
236
|
+
|
|
237
|
+
# Internal helpers
|
|
238
|
+
|
|
239
|
+
def _get_node_id(self) -> str:
|
|
240
|
+
"""Get node identifier from SWIM."""
|
|
241
|
+
if self.swim_manager and self.swim_manager.bind_addr:
|
|
242
|
+
addr = self.swim_manager.bind_addr
|
|
243
|
+
return f"{addr[0]}:{addr[1]}"
|
|
244
|
+
return "unknown"
|
|
245
|
+
|
|
246
|
+
async def _send_p2p_message(self, target: str, msg_type: str, payload: Dict) -> bool:
|
|
247
|
+
"""
|
|
248
|
+
Send message to specific peer.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
target: Target node ID (host:port)
|
|
252
|
+
msg_type: Message type
|
|
253
|
+
payload: Message payload
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
True if sent successfully
|
|
257
|
+
"""
|
|
258
|
+
try:
|
|
259
|
+
if not self.swim_manager or not self.swim_manager.zmq_agent:
|
|
260
|
+
logger.error("Cannot send P2P message: ZMQ agent not available")
|
|
261
|
+
return False
|
|
262
|
+
|
|
263
|
+
await self.swim_manager.zmq_agent.send_message(target, msg_type, payload)
|
|
264
|
+
|
|
265
|
+
# Record activity for keepalive suppression
|
|
266
|
+
if self.keepalive_manager:
|
|
267
|
+
self.keepalive_manager.record_p2p_activity()
|
|
268
|
+
|
|
269
|
+
return True
|
|
270
|
+
except Exception as e:
|
|
271
|
+
logger.error(f"Failed to send P2P message to {target}: {e}")
|
|
272
|
+
return False
|
|
273
|
+
|
|
274
|
+
async def _broadcast_p2p_message(self, msg_type: str, payload: Dict) -> int:
|
|
275
|
+
"""
|
|
276
|
+
Broadcast message to all alive members.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
msg_type: Message type
|
|
280
|
+
payload: Message payload
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
Number of successful sends
|
|
284
|
+
"""
|
|
285
|
+
if not self.swim_manager or not self.swim_manager.swim_node:
|
|
286
|
+
logger.error("Cannot broadcast: SWIM node not available")
|
|
287
|
+
return 0
|
|
288
|
+
|
|
289
|
+
count = 0
|
|
290
|
+
try:
|
|
291
|
+
alive_members = self.swim_manager.swim_node.members.get_alive_members(exclude_self=True)
|
|
292
|
+
|
|
293
|
+
for member in alive_members:
|
|
294
|
+
target = f"{member.addr[0]}:{member.addr[1]}"
|
|
295
|
+
if await self._send_p2p_message(target, msg_type, payload):
|
|
296
|
+
count += 1
|
|
297
|
+
|
|
298
|
+
logger.debug(f"Broadcasted {msg_type} to {count} peers")
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"Error broadcasting message: {e}")
|
|
301
|
+
|
|
302
|
+
return count
|
|
303
|
+
|
|
304
|
+
# Message handlers (stubs for Day 3 implementation)
|
|
305
|
+
|
|
306
|
+
async def _handle_step_broadcast(self, sender, message):
|
|
307
|
+
"""Handle step output broadcast."""
|
|
308
|
+
logger.debug(f"Received step broadcast from {sender}")
|
|
309
|
+
if self.broadcaster:
|
|
310
|
+
await self.broadcaster.handle_step_output_broadcast(sender, message)
|
|
311
|
+
|
|
312
|
+
async def _handle_step_ack(self, sender, message):
|
|
313
|
+
"""Handle step output acknowledgment."""
|
|
314
|
+
logger.debug(f"Received step ACK from {sender}")
|
|
315
|
+
if self.broadcaster:
|
|
316
|
+
await self.broadcaster.handle_step_output_ack(sender, message)
|
|
317
|
+
|
|
318
|
+
async def _handle_nudge(self, sender, message):
|
|
319
|
+
"""Handle step completion nudge."""
|
|
320
|
+
logger.debug(f"Received nudge from {sender}")
|
|
321
|
+
# TODO Day 3: Forward to nudging system
|
|
322
|
+
|
|
323
|
+
async def _handle_nudge_response(self, sender, message):
|
|
324
|
+
"""Handle nudge response."""
|
|
325
|
+
logger.debug(f"Received nudge response from {sender}")
|
|
326
|
+
# TODO Day 3: Forward to nudging system
|
|
327
|
+
|
|
328
|
+
async def _handle_data_request(self, sender, message):
|
|
329
|
+
"""Handle step data request."""
|
|
330
|
+
logger.debug(f"Received data request from {sender}")
|
|
331
|
+
# TODO Day 3: Forward to dependency manager
|
|
332
|
+
|
|
333
|
+
async def _handle_capability_announcement(self, sender, message):
|
|
334
|
+
"""Handle capability announcement from peer."""
|
|
335
|
+
try:
|
|
336
|
+
payload = message.get('payload', {})
|
|
337
|
+
caps = payload.get('capabilities', {})
|
|
338
|
+
node_id = payload.get('node_id')
|
|
339
|
+
|
|
340
|
+
# Update local capability map
|
|
341
|
+
for cap, agents in caps.items():
|
|
342
|
+
if cap not in self._capability_map:
|
|
343
|
+
self._capability_map[cap] = []
|
|
344
|
+
# Add remote agents (avoid duplicates)
|
|
345
|
+
for agent_id in agents:
|
|
346
|
+
if agent_id not in self._capability_map[cap]:
|
|
347
|
+
self._capability_map[cap].append(agent_id)
|
|
348
|
+
|
|
349
|
+
logger.info(f"Updated capabilities from {node_id}: {list(caps.keys())}")
|
|
350
|
+
except Exception as e:
|
|
351
|
+
logger.error(f"Error handling capability announcement: {e}")
|
|
352
|
+
|
|
353
|
+
async def _handle_capability_query(self, sender, message):
|
|
354
|
+
"""Handle capability query from peer."""
|
|
355
|
+
try:
|
|
356
|
+
capability = message.get('capability')
|
|
357
|
+
response = {
|
|
358
|
+
'capability': capability,
|
|
359
|
+
'agents': self._capability_map.get(capability, [])
|
|
360
|
+
}
|
|
361
|
+
await self._send_p2p_message(sender, 'CAPABILITY_QUERY_RESPONSE', response)
|
|
362
|
+
logger.debug(f"Responded to capability query from {sender} for {capability}")
|
|
363
|
+
except Exception as e:
|
|
364
|
+
logger.error(f"Error handling capability query: {e}")
|