jarviscore-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. examples/calculator_agent_example.py +77 -0
  2. examples/multi_agent_workflow.py +132 -0
  3. examples/research_agent_example.py +76 -0
  4. jarviscore/__init__.py +54 -0
  5. jarviscore/cli/__init__.py +7 -0
  6. jarviscore/cli/__main__.py +33 -0
  7. jarviscore/cli/check.py +404 -0
  8. jarviscore/cli/smoketest.py +371 -0
  9. jarviscore/config/__init__.py +7 -0
  10. jarviscore/config/settings.py +128 -0
  11. jarviscore/core/__init__.py +7 -0
  12. jarviscore/core/agent.py +163 -0
  13. jarviscore/core/mesh.py +463 -0
  14. jarviscore/core/profile.py +64 -0
  15. jarviscore/docs/API_REFERENCE.md +932 -0
  16. jarviscore/docs/CONFIGURATION.md +753 -0
  17. jarviscore/docs/GETTING_STARTED.md +600 -0
  18. jarviscore/docs/TROUBLESHOOTING.md +424 -0
  19. jarviscore/docs/USER_GUIDE.md +983 -0
  20. jarviscore/execution/__init__.py +94 -0
  21. jarviscore/execution/code_registry.py +298 -0
  22. jarviscore/execution/generator.py +268 -0
  23. jarviscore/execution/llm.py +430 -0
  24. jarviscore/execution/repair.py +283 -0
  25. jarviscore/execution/result_handler.py +332 -0
  26. jarviscore/execution/sandbox.py +555 -0
  27. jarviscore/execution/search.py +281 -0
  28. jarviscore/orchestration/__init__.py +18 -0
  29. jarviscore/orchestration/claimer.py +101 -0
  30. jarviscore/orchestration/dependency.py +143 -0
  31. jarviscore/orchestration/engine.py +292 -0
  32. jarviscore/orchestration/status.py +96 -0
  33. jarviscore/p2p/__init__.py +23 -0
  34. jarviscore/p2p/broadcaster.py +353 -0
  35. jarviscore/p2p/coordinator.py +364 -0
  36. jarviscore/p2p/keepalive.py +361 -0
  37. jarviscore/p2p/swim_manager.py +290 -0
  38. jarviscore/profiles/__init__.py +6 -0
  39. jarviscore/profiles/autoagent.py +264 -0
  40. jarviscore/profiles/customagent.py +137 -0
  41. jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
  42. jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
  43. jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
  44. jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
  45. jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
  46. tests/conftest.py +44 -0
  47. tests/test_agent.py +165 -0
  48. tests/test_autoagent.py +140 -0
  49. tests/test_autoagent_day4.py +186 -0
  50. tests/test_customagent.py +248 -0
  51. tests/test_integration.py +293 -0
  52. tests/test_llm_fallback.py +185 -0
  53. tests/test_mesh.py +356 -0
  54. tests/test_p2p_integration.py +375 -0
  55. tests/test_remote_sandbox.py +116 -0
@@ -0,0 +1,353 @@
1
+ """
2
+ Step Output Broadcaster for P2P Step Output Sharing
3
+ Handles broadcasting and caching step outputs between agents in the P2P network
4
+ """
5
+ import asyncio
6
+ import json
7
+ import logging
8
+ import time
9
+ import os
10
+ import uuid
11
+ from typing import Dict, Any, Optional, List, Set, Tuple
12
+ from dataclasses import dataclass, asdict
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ @dataclass
17
+ class StepExecutionResult:
18
+ """Data class for step execution results"""
19
+ step_id: str
20
+ workflow_id: str
21
+ status: str # "success", "failed", "in_progress"
22
+ output_data: Dict[str, Any]
23
+ agent_id: str
24
+ timestamp: float
25
+ execution_time: Optional[float] = None
26
+ retry_count: Optional[int] = None
27
+ result_status: Optional[str] = None
28
+ error_message: Optional[str] = None
29
+
30
+ class StepOutputBroadcaster:
31
+ """
32
+ Service class for managing step output broadcasting and caching in P2P network
33
+
34
+ This class manages:
35
+ - Broadcasting step execution results to peers
36
+ - Caching step outputs from all agents
37
+ - Processing step output broadcasts
38
+ - Persistent storage of step results
39
+
40
+ Note: Request/response functionality has been replaced by the nudging system
41
+ """
42
+
43
+ def __init__(self, agent_id: str, zmq_agent=None, swim_node=None):
44
+ self.agent_id = agent_id
45
+ self.zmq_agent = zmq_agent
46
+ self.swim_node = swim_node
47
+
48
+ # Cache for step outputs from all agents (including self)
49
+ self.step_outputs: Dict[str, StepExecutionResult] = {}
50
+
51
+ # Track message acknowledgments for broadcasts
52
+ self.pending_acks: Dict[str, Dict[str, Any]] = {}
53
+
54
+ # Create directories for persistence
55
+ os.makedirs("StepOutputs", exist_ok=True)
56
+
57
+ logger.info(f"Step Output Broadcaster service initialized for agent {agent_id}")
58
+
59
+ async def broadcast_step_result(
60
+ self,
61
+ step_id: str,
62
+ workflow_id: str,
63
+ output_data: Dict[str, Any],
64
+ status: str,
65
+ execution_metadata: Dict[str, Any] = None
66
+ ) -> bool:
67
+ """
68
+ Broadcast step execution result to all peers
69
+
70
+ Args:
71
+ step_id: The step ID
72
+ workflow_id: The workflow ID
73
+ output_data: The step output data
74
+ status: The step status ("success", "failed", "in_progress")
75
+ execution_metadata: Optional execution metadata
76
+
77
+ Returns:
78
+ bool: True if broadcast was successful
79
+ """
80
+ try:
81
+ # Extract execution details from metadata
82
+ execution_time = execution_metadata.get('execution_time') if execution_metadata else None
83
+ retry_count = execution_metadata.get('retry_count') if execution_metadata else None
84
+ result_status = execution_metadata.get('result_status') if execution_metadata else None
85
+ error_message = execution_metadata.get('error') if execution_metadata else None
86
+
87
+ # Create step execution result
88
+ result = StepExecutionResult(
89
+ step_id=step_id,
90
+ workflow_id=workflow_id,
91
+ status=status,
92
+ output_data=output_data,
93
+ agent_id=self.agent_id,
94
+ timestamp=time.time(),
95
+ execution_time=execution_time,
96
+ retry_count=retry_count,
97
+ result_status=result_status,
98
+ error_message=error_message
99
+ )
100
+
101
+ # Store in local cache
102
+ cache_key = f"{workflow_id}:{step_id}"
103
+ self.step_outputs[cache_key] = result
104
+
105
+ # Persist to file
106
+ await self._persist_step_result(result)
107
+
108
+ # Broadcast to peers
109
+ if self.zmq_agent:
110
+ success = await self._broadcast_to_peers(result)
111
+ if success:
112
+ logger.info(f"Successfully broadcasted step result for {step_id}")
113
+ return True
114
+ else:
115
+ logger.warning(f"Failed to broadcast step result for {step_id}")
116
+ return False
117
+ else:
118
+ logger.debug("No ZMQ agent available for broadcasting")
119
+ return True # Still successful locally
120
+
121
+ except Exception as e:
122
+ logger.error(f"Error broadcasting step result for {step_id}: {e}")
123
+ return False
124
+
125
+ # Public methods for IntegrationAgent to call for message processing
126
+
127
+ async def handle_step_output_broadcast(
128
+ self,
129
+ sender_swim_id: str,
130
+ message_data: Dict[str, Any],
131
+ send_ack_callback: Optional[callable] = None
132
+ ) -> Dict[str, Any]:
133
+ """
134
+ Handle incoming step output broadcast from peer (called by IntegrationAgent)
135
+
136
+ Args:
137
+ sender_swim_id: The SWIM ID of the sender
138
+ message_data: The message data containing step result
139
+ send_ack_callback: Optional callback to send acknowledgment
140
+
141
+ Returns:
142
+ Dict with processing result
143
+ """
144
+ try:
145
+ message_id = message_data.get('id', str(uuid.uuid4()))
146
+ step_result_data = message_data.get('step_result', {})
147
+
148
+ # Send acknowledgment if callback provided
149
+ if send_ack_callback:
150
+ await send_ack_callback(sender_swim_id, message_id, "STEP_OUTPUT_BROADCAST")
151
+
152
+ # Create StepExecutionResult from received data
153
+ result = StepExecutionResult(**step_result_data)
154
+
155
+ logger.info(f"Processing step output broadcast for {result.step_id} from {result.agent_id}")
156
+
157
+ # Store in local cache
158
+ cache_key = f"{result.workflow_id}:{result.step_id}"
159
+ self.step_outputs[cache_key] = result
160
+
161
+ # Persist to file
162
+ await self._persist_step_result(result)
163
+
164
+ logger.debug(f"Stored broadcasted step output for {result.step_id}")
165
+
166
+ return {"status": "success", "message": "Broadcast processed successfully"}
167
+
168
+ except Exception as e:
169
+ logger.error(f"Error handling step output broadcast: {e}")
170
+ return {"status": "error", "message": str(e)}
171
+
172
+ async def handle_step_output_ack(
173
+ self,
174
+ sender_swim_id: str,
175
+ message_data: Dict[str, Any]
176
+ ) -> Dict[str, Any]:
177
+ """
178
+ Handle acknowledgment for step output messages (called by IntegrationAgent)
179
+
180
+ Args:
181
+ sender_swim_id: The SWIM ID of the sender
182
+ message_data: The message data containing ACK details
183
+
184
+ Returns:
185
+ Dict with processing result
186
+ """
187
+ try:
188
+ # Extract ACK details
189
+ ack_for_message_id = message_data.get('ack_for')
190
+ ack_type = message_data.get('ack_type', 'delivery')
191
+ success = message_data.get('success', True)
192
+
193
+ if ack_for_message_id in self.pending_acks:
194
+ ack_data = self.pending_acks[ack_for_message_id]
195
+ step_id = ack_data.get('step_id')
196
+
197
+ logger.debug(f"Processing {ack_type} ACK from {sender_swim_id} for step {step_id} (message {ack_for_message_id})")
198
+
199
+ # Remove from pending ACKs if it's a processing ACK or if delivery failed
200
+ if ack_type == 'processing' or (ack_type == 'delivery' and not success):
201
+ del self.pending_acks[ack_for_message_id]
202
+
203
+ return {"status": "success", "message": "ACK processed"}
204
+ else:
205
+ logger.debug(f"Received ACK for unknown message {ack_for_message_id} from {sender_swim_id}")
206
+ return {"status": "unknown", "message": "ACK for unknown message"}
207
+
208
+ except Exception as e:
209
+ logger.error(f"Error handling step output ACK: {e}")
210
+ return {"status": "error", "message": str(e)}
211
+
212
+ # Private helper methods
213
+
214
+ async def _broadcast_to_peers(self, result: StepExecutionResult) -> bool:
215
+ """Broadcast step result to all peers using reliable messaging"""
216
+ try:
217
+ # Get all alive peers
218
+ peers = self._get_alive_peers()
219
+
220
+ if not peers:
221
+ logger.debug("No peers available for broadcasting")
222
+ return True # Not an error if no peers
223
+
224
+ # Generate a unique message ID for tracking
225
+ message_id = str(uuid.uuid4())
226
+
227
+ # Send to each peer using reliability manager
228
+ success_count = 0
229
+ for peer_id in peers:
230
+ try:
231
+ # Use the send_message_base method which leverages the reliability manager
232
+ success = await self.zmq_agent.send_message_base(
233
+ peer_id,
234
+ "STEP_OUTPUT_BROADCAST",
235
+ "step_output_data",
236
+ json.dumps(asdict(result)),
237
+ f"StepBroadcast_{result.step_id}"
238
+ )
239
+
240
+ if success:
241
+ success_count += 1
242
+ logger.debug(f"Broadcasted step result to peer {peer_id}")
243
+
244
+ # Track for acknowledgment
245
+ self.pending_acks[message_id] = {
246
+ "peer_id": peer_id,
247
+ "step_id": result.step_id,
248
+ "workflow_id": result.workflow_id,
249
+ "timestamp": time.time()
250
+ }
251
+ else:
252
+ logger.warning(f"Failed to broadcast to peer {peer_id}")
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error broadcasting to peer {peer_id}: {e}")
256
+
257
+ logger.info(f"Broadcasted step result to {success_count}/{len(peers)} peers")
258
+ return success_count > 0
259
+
260
+ except Exception as e:
261
+ logger.error(f"Error broadcasting to peers: {e}")
262
+ return False
263
+
264
+ def _get_alive_peers(self) -> List[str]:
265
+ """Get list of alive peers from SWIM node"""
266
+ peers = []
267
+
268
+ if self.swim_node and hasattr(self.swim_node, 'members'):
269
+ try:
270
+ alive_members = self.swim_node.members.get_alive_members(exclude_self=True)
271
+ peers = [f"{member.addr[0]}:{member.addr[1]}" for member in alive_members]
272
+ except Exception as e:
273
+ logger.debug(f"Error getting alive peers: {e}")
274
+
275
+ return peers
276
+
277
+ async def _persist_step_result(self, result: StepExecutionResult):
278
+ """Persist step result to file"""
279
+ try:
280
+ filename = f"step_{result.step_id}_{result.workflow_id}_result.json"
281
+ filepath = os.path.join("StepOutputs", filename)
282
+
283
+ with open(filepath, 'w') as f:
284
+ json.dump(asdict(result), f, indent=2)
285
+
286
+ logger.debug(f"Persisted step result for {result.step_id}")
287
+
288
+ except Exception as e:
289
+ logger.error(f"Error persisting step result: {e}")
290
+
291
+ def get_cached_output(self, step_id: str, workflow_id: str) -> Optional[Dict[str, Any]]:
292
+ """Get cached step output"""
293
+ cache_key = f"{workflow_id}:{step_id}"
294
+ if cache_key in self.step_outputs:
295
+ result = self.step_outputs[cache_key]
296
+ if result.status == "success":
297
+ return {
298
+ "status": "success",
299
+ "data": result.output_data,
300
+ "agent_id": result.agent_id,
301
+ "timestamp": result.timestamp,
302
+ "execution_time": result.execution_time,
303
+ "retry_count": result.retry_count
304
+ }
305
+ return None
306
+
307
+ def cleanup_old_outputs(self, max_age_hours: int = 24):
308
+ """Clean up old cached outputs"""
309
+ try:
310
+ current_time = time.time()
311
+ max_age_seconds = max_age_hours * 3600
312
+
313
+ # Clean up memory cache
314
+ keys_to_remove = []
315
+ for key, result in self.step_outputs.items():
316
+ if current_time - result.timestamp > max_age_seconds:
317
+ keys_to_remove.append(key)
318
+
319
+ for key in keys_to_remove:
320
+ del self.step_outputs[key]
321
+
322
+ if keys_to_remove:
323
+ logger.info(f"Cleaned up {len(keys_to_remove)} old step outputs from cache")
324
+
325
+ # Clean up files
326
+ output_dir = "StepOutputs"
327
+ if os.path.exists(output_dir):
328
+ files_removed = 0
329
+ for filename in os.listdir(output_dir):
330
+ filepath = os.path.join(output_dir, filename)
331
+ if os.path.isfile(filepath):
332
+ file_age = current_time - os.path.getmtime(filepath)
333
+ if file_age > max_age_seconds:
334
+ os.remove(filepath)
335
+ files_removed += 1
336
+
337
+ if files_removed > 0:
338
+ logger.info(f"Cleaned up {files_removed} old step output files")
339
+
340
+ # Clean up pending ACKs
341
+ ack_ids_to_remove = []
342
+ for ack_id, ack_data in self.pending_acks.items():
343
+ if current_time - ack_data.get('timestamp', 0) > max_age_seconds:
344
+ ack_ids_to_remove.append(ack_id)
345
+
346
+ for ack_id in ack_ids_to_remove:
347
+ del self.pending_acks[ack_id]
348
+
349
+ if ack_ids_to_remove:
350
+ logger.info(f"Cleaned up {len(ack_ids_to_remove)} old pending ACKs")
351
+
352
+ except Exception as e:
353
+ logger.error(f"Error cleaning up old outputs: {e}")