jarviscore-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. examples/calculator_agent_example.py +77 -0
  2. examples/multi_agent_workflow.py +132 -0
  3. examples/research_agent_example.py +76 -0
  4. jarviscore/__init__.py +54 -0
  5. jarviscore/cli/__init__.py +7 -0
  6. jarviscore/cli/__main__.py +33 -0
  7. jarviscore/cli/check.py +404 -0
  8. jarviscore/cli/smoketest.py +371 -0
  9. jarviscore/config/__init__.py +7 -0
  10. jarviscore/config/settings.py +128 -0
  11. jarviscore/core/__init__.py +7 -0
  12. jarviscore/core/agent.py +163 -0
  13. jarviscore/core/mesh.py +463 -0
  14. jarviscore/core/profile.py +64 -0
  15. jarviscore/docs/API_REFERENCE.md +932 -0
  16. jarviscore/docs/CONFIGURATION.md +753 -0
  17. jarviscore/docs/GETTING_STARTED.md +600 -0
  18. jarviscore/docs/TROUBLESHOOTING.md +424 -0
  19. jarviscore/docs/USER_GUIDE.md +983 -0
  20. jarviscore/execution/__init__.py +94 -0
  21. jarviscore/execution/code_registry.py +298 -0
  22. jarviscore/execution/generator.py +268 -0
  23. jarviscore/execution/llm.py +430 -0
  24. jarviscore/execution/repair.py +283 -0
  25. jarviscore/execution/result_handler.py +332 -0
  26. jarviscore/execution/sandbox.py +555 -0
  27. jarviscore/execution/search.py +281 -0
  28. jarviscore/orchestration/__init__.py +18 -0
  29. jarviscore/orchestration/claimer.py +101 -0
  30. jarviscore/orchestration/dependency.py +143 -0
  31. jarviscore/orchestration/engine.py +292 -0
  32. jarviscore/orchestration/status.py +96 -0
  33. jarviscore/p2p/__init__.py +23 -0
  34. jarviscore/p2p/broadcaster.py +353 -0
  35. jarviscore/p2p/coordinator.py +364 -0
  36. jarviscore/p2p/keepalive.py +361 -0
  37. jarviscore/p2p/swim_manager.py +290 -0
  38. jarviscore/profiles/__init__.py +6 -0
  39. jarviscore/profiles/autoagent.py +264 -0
  40. jarviscore/profiles/customagent.py +137 -0
  41. jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
  42. jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
  43. jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
  44. jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
  45. jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
  46. tests/conftest.py +44 -0
  47. tests/test_agent.py +165 -0
  48. tests/test_autoagent.py +140 -0
  49. tests/test_autoagent_day4.py +186 -0
  50. tests/test_customagent.py +248 -0
  51. tests/test_integration.py +293 -0
  52. tests/test_llm_fallback.py +185 -0
  53. tests/test_mesh.py +356 -0
  54. tests/test_p2p_integration.py +375 -0
  55. tests/test_remote_sandbox.py +116 -0
@@ -0,0 +1,292 @@
1
+ """
2
+ Workflow Engine - Orchestrates multi-step workflow execution
3
+
4
+ Core orchestration logic adapted from integration-agent.
5
+ Simplified for jarviscore MVP.
6
+ """
7
+ import logging
8
+ import asyncio
9
+ from typing import List, Dict, Any, Optional
10
+
11
+ from .claimer import StepClaimer
12
+ from .dependency import DependencyManager
13
+ from .status import StatusManager
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class WorkflowEngine:
19
+ """
20
+ Executes multi-step workflows with dependency management.
21
+
22
+ Simplified from integration-agent's workflow_processor.py (2600 lines → 200 lines)
23
+ Removes: Kafka integration, repair system, LLM nudging
24
+ Keeps: Core execution loop, dependency resolution, status tracking
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ mesh,
30
+ p2p_coordinator=None,
31
+ config: Optional[Dict] = None
32
+ ):
33
+ """
34
+ Initialize workflow engine.
35
+
36
+ Args:
37
+ mesh: Mesh instance containing agents
38
+ p2p_coordinator: Optional P2P coordinator for distributed execution
39
+ config: Optional configuration dictionary
40
+ """
41
+ self.mesh = mesh
42
+ self.p2p = p2p_coordinator
43
+ self.config = config or {}
44
+
45
+ # Core components
46
+ self.claimer = StepClaimer(mesh.agents)
47
+ self.status_manager = StatusManager()
48
+
49
+ # Working memory (step_id -> result)
50
+ self.memory: Dict[str, Any] = {}
51
+ self.dependency_manager = DependencyManager(self.memory)
52
+
53
+ self._started = False
54
+ logger.info("Workflow engine initialized")
55
+
56
+ async def start(self):
57
+ """Start the workflow engine."""
58
+ if self._started:
59
+ logger.warning("Workflow engine already started")
60
+ return
61
+
62
+ self._started = True
63
+ logger.info("Workflow engine started")
64
+
65
+ async def stop(self):
66
+ """Stop the workflow engine."""
67
+ if not self._started:
68
+ return
69
+
70
+ self._started = False
71
+ self.memory.clear()
72
+ self.status_manager.clear()
73
+ logger.info("Workflow engine stopped")
74
+
75
+ async def execute(
76
+ self,
77
+ workflow_id: str,
78
+ steps: List[Dict[str, Any]]
79
+ ) -> List[Dict[str, Any]]:
80
+ """
81
+ Execute a multi-step workflow.
82
+
83
+ Args:
84
+ workflow_id: Unique workflow identifier
85
+ steps: List of step specifications:
86
+ [
87
+ {
88
+ "id": "step1", # Optional, auto-generated if missing
89
+ "agent": "role_or_capability",
90
+ "task": "Task description",
91
+ "depends_on": [] # Optional list of step IDs
92
+ }
93
+ ]
94
+
95
+ Returns:
96
+ List of result dictionaries (one per step)
97
+
98
+ Example:
99
+ results = await engine.execute("pipeline-1", [
100
+ {"agent": "scraper", "task": "Scrape data"},
101
+ {"agent": "processor", "task": "Process data", "depends_on": [0]},
102
+ {"agent": "storage", "task": "Save data", "depends_on": [1]}
103
+ ])
104
+ """
105
+ if not self._started:
106
+ raise RuntimeError("Workflow engine not started. Call start() first.")
107
+
108
+ logger.info(
109
+ f"Executing workflow {workflow_id} with {len(steps)} step(s)"
110
+ )
111
+
112
+ # Normalize steps (ensure each has an ID)
113
+ normalized_steps = self._normalize_steps(steps)
114
+
115
+ results = []
116
+
117
+ for i, step in enumerate(normalized_steps):
118
+ step_id = step.get('id', f'step{i}')
119
+ logger.info(f"Processing step {i+1}/{len(steps)}: {step_id}")
120
+
121
+ try:
122
+ # 1. Update status to pending
123
+ self.status_manager.update(step_id, 'pending')
124
+
125
+ # 2. Claim step (find capable agent)
126
+ agent = self.claimer.find_agent(step)
127
+ if not agent:
128
+ error_msg = f"No agent found for step: {step}"
129
+ logger.error(error_msg)
130
+ self.status_manager.update(step_id, 'failed', error=error_msg)
131
+ results.append({
132
+ 'status': 'failure',
133
+ 'error': error_msg,
134
+ 'step': i
135
+ })
136
+ continue
137
+
138
+ logger.info(f"Step {step_id} claimed by: {agent.agent_id}")
139
+
140
+ # 3. Resolve dependencies
141
+ if depends_on := step.get('depends_on'):
142
+ logger.info(f"Step {step_id} has {len(depends_on)} dependencies")
143
+
144
+ # Convert numeric indices to step IDs
145
+ dep_ids = self._resolve_dependency_ids(depends_on, normalized_steps)
146
+
147
+ try:
148
+ await self.dependency_manager.wait_for(
149
+ dep_ids,
150
+ self.memory,
151
+ timeout=self.config.get('execution_timeout', 300)
152
+ )
153
+ logger.info(f"Dependencies satisfied for step {step_id}")
154
+ except TimeoutError as e:
155
+ error_msg = f"Dependency timeout: {e}"
156
+ logger.error(error_msg)
157
+ self.status_manager.update(step_id, 'failed', error=error_msg)
158
+ results.append({
159
+ 'status': 'failure',
160
+ 'error': error_msg,
161
+ 'step': i
162
+ })
163
+ continue
164
+
165
+ # 4. Update status to in_progress
166
+ self.status_manager.update(step_id, 'in_progress')
167
+
168
+ # 5. Prepare task with context from dependencies
169
+ task = step.copy()
170
+
171
+ # Inject dependency outputs as context
172
+ if depends_on := step.get('depends_on'):
173
+ dep_ids = self._resolve_dependency_ids(depends_on, normalized_steps)
174
+ dep_outputs = {}
175
+ for dep_id in dep_ids:
176
+ if dep_id in self.memory:
177
+ dep_result = self.memory[dep_id]
178
+ # Extract output from result
179
+ output = dep_result.get('output') if isinstance(dep_result, dict) else dep_result
180
+ dep_outputs[dep_id] = output
181
+
182
+ # Add dependency outputs to task context
183
+ task['context'] = {
184
+ 'previous_step_results': dep_outputs,
185
+ 'workflow_id': workflow_id,
186
+ 'step_id': step_id
187
+ }
188
+ logger.debug(f"Injected context with {len(dep_outputs)} dependencies")
189
+
190
+ # 6. Execute step with context
191
+ logger.info(f"Executing step {step_id} with agent {agent.agent_id}")
192
+ result = await agent.execute_task(task)
193
+
194
+ # Ensure result includes agent_id
195
+ if isinstance(result, dict) and 'agent' not in result:
196
+ result['agent'] = agent.agent_id
197
+
198
+ # 6. Store result in memory
199
+ self.memory[step_id] = result
200
+ logger.debug(f"Stored result for step {step_id} in memory")
201
+
202
+ # 7. Broadcast result to P2P mesh (if available)
203
+ if self.p2p and hasattr(self.p2p, 'broadcaster'):
204
+ try:
205
+ await self.p2p.broadcaster.broadcast_step_result(
206
+ step_id=step_id,
207
+ workflow_id=workflow_id,
208
+ output_data=result,
209
+ status='success'
210
+ )
211
+ logger.debug(f"Broadcasted result for step {step_id}")
212
+ except Exception as broadcast_error:
213
+ # Don't fail the step if broadcast fails
214
+ logger.warning(
215
+ f"Failed to broadcast step {step_id}: {broadcast_error}"
216
+ )
217
+
218
+ # 8. Update status to completed
219
+ self.status_manager.update(step_id, 'completed', output=result)
220
+
221
+ # 9. Add to results
222
+ results.append(result)
223
+ logger.info(f"Step {step_id} completed successfully")
224
+
225
+ except Exception as e:
226
+ logger.error(f"Step {step_id} failed: {e}", exc_info=True)
227
+ self.status_manager.update(step_id, 'failed', error=str(e))
228
+ results.append({
229
+ 'status': 'failure',
230
+ 'error': str(e),
231
+ 'step': i
232
+ })
233
+ # Don't stop workflow on single step failure
234
+ # Continue to next step
235
+
236
+ logger.info(
237
+ f"Workflow {workflow_id} completed: "
238
+ f"{len(results)}/{len(steps)} steps finished"
239
+ )
240
+ return results
241
+
242
+ def _normalize_steps(self, steps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
243
+ """
244
+ Normalize step specifications (ensure each has an ID).
245
+
246
+ Args:
247
+ steps: Raw step list
248
+
249
+ Returns:
250
+ Normalized step list with IDs
251
+ """
252
+ normalized = []
253
+ for i, step in enumerate(steps):
254
+ if 'id' not in step:
255
+ step = step.copy()
256
+ step['id'] = f'step{i}'
257
+ normalized.append(step)
258
+ return normalized
259
+
260
+ def _resolve_dependency_ids(
261
+ self,
262
+ depends_on: List,
263
+ steps: List[Dict[str, Any]]
264
+ ) -> List[str]:
265
+ """
266
+ Convert dependency references to step IDs.
267
+
268
+ Args:
269
+ depends_on: List of step indices or IDs
270
+ steps: All workflow steps
271
+
272
+ Returns:
273
+ List of step IDs
274
+ """
275
+ dep_ids = []
276
+ for dep in depends_on:
277
+ if isinstance(dep, int):
278
+ # Index reference
279
+ if 0 <= dep < len(steps):
280
+ dep_ids.append(steps[dep]['id'])
281
+ else:
282
+ # Direct ID reference
283
+ dep_ids.append(str(dep))
284
+ return dep_ids
285
+
286
+ def get_status(self, step_id: str) -> Optional[Dict[str, Any]]:
287
+ """Get status for a specific step."""
288
+ return self.status_manager.get(step_id)
289
+
290
+ def get_memory(self) -> Dict[str, Any]:
291
+ """Get current workflow memory (all step outputs)."""
292
+ return self.memory.copy()
@@ -0,0 +1,96 @@
1
+ """
2
+ Status Manager - Tracks workflow and step execution status
3
+
4
+ Simplified from integration-agent
5
+ """
6
+ import logging
7
+ import time
8
+ from typing import Dict, Any, Optional
9
+ from enum import Enum
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class StepStatus(Enum):
15
+ """Step execution status"""
16
+ PENDING = "pending"
17
+ IN_PROGRESS = "in_progress"
18
+ COMPLETED = "completed"
19
+ FAILED = "failed"
20
+
21
+
22
+ class StatusManager:
23
+ """
24
+ Tracks status of workflow steps.
25
+
26
+ Simplified from integration-agent's version.
27
+ Removes: P2P status sync, persistent storage
28
+ Keeps: In-memory status tracking
29
+ """
30
+
31
+ def __init__(self):
32
+ """Initialize status manager."""
33
+ self.statuses: Dict[str, Dict[str, Any]] = {}
34
+ logger.info("Status manager initialized")
35
+
36
+ def update(
37
+ self,
38
+ step_id: str,
39
+ status: str,
40
+ error: Optional[str] = None,
41
+ output: Optional[Any] = None
42
+ ):
43
+ """
44
+ Update step status.
45
+
46
+ Args:
47
+ step_id: Step identifier
48
+ status: New status (pending, in_progress, completed, failed)
49
+ error: Optional error message if failed
50
+ output: Optional output data if completed
51
+ """
52
+ if step_id not in self.statuses:
53
+ self.statuses[step_id] = {
54
+ 'step_id': step_id,
55
+ 'created_at': time.time()
56
+ }
57
+
58
+ self.statuses[step_id].update({
59
+ 'status': status,
60
+ 'updated_at': time.time(),
61
+ 'error': error,
62
+ 'output': output
63
+ })
64
+
65
+ logger.debug(f"Status updated: {step_id} -> {status}")
66
+
67
+ def get(self, step_id: str) -> Optional[Dict[str, Any]]:
68
+ """
69
+ Get status for a step.
70
+
71
+ Args:
72
+ step_id: Step identifier
73
+
74
+ Returns:
75
+ Status dictionary or None if not found
76
+ """
77
+ return self.statuses.get(step_id)
78
+
79
+ def is_completed(self, step_id: str) -> bool:
80
+ """Check if step is completed."""
81
+ status = self.get(step_id)
82
+ return status and status.get('status') == 'completed'
83
+
84
+ def is_failed(self, step_id: str) -> bool:
85
+ """Check if step has failed."""
86
+ status = self.get(step_id)
87
+ return status and status.get('status') == 'failed'
88
+
89
+ def get_all(self) -> Dict[str, Dict[str, Any]]:
90
+ """Get all tracked statuses."""
91
+ return self.statuses.copy()
92
+
93
+ def clear(self):
94
+ """Clear all statuses."""
95
+ self.statuses.clear()
96
+ logger.debug("Status manager cleared")
@@ -0,0 +1,23 @@
1
+ """
2
+ P2P Integration Layer for JarvisCore
3
+
4
+ Wraps swim_p2p library for distributed agent coordination:
5
+ - SWIM protocol for membership management
6
+ - ZMQ messaging for agent communication
7
+ - Smart keepalive with traffic suppression
8
+ - Step output broadcasting
9
+ """
10
+
11
+ from .coordinator import P2PCoordinator
12
+ from .swim_manager import SWIMThreadManager
13
+ from .keepalive import P2PKeepaliveManager, CircuitState
14
+ from .broadcaster import StepOutputBroadcaster, StepExecutionResult
15
+
16
+ __all__ = [
17
+ 'P2PCoordinator',
18
+ 'SWIMThreadManager',
19
+ 'P2PKeepaliveManager',
20
+ 'CircuitState',
21
+ 'StepOutputBroadcaster',
22
+ 'StepExecutionResult',
23
+ ]