jarviscore-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. examples/calculator_agent_example.py +77 -0
  2. examples/multi_agent_workflow.py +132 -0
  3. examples/research_agent_example.py +76 -0
  4. jarviscore/__init__.py +54 -0
  5. jarviscore/cli/__init__.py +7 -0
  6. jarviscore/cli/__main__.py +33 -0
  7. jarviscore/cli/check.py +404 -0
  8. jarviscore/cli/smoketest.py +371 -0
  9. jarviscore/config/__init__.py +7 -0
  10. jarviscore/config/settings.py +128 -0
  11. jarviscore/core/__init__.py +7 -0
  12. jarviscore/core/agent.py +163 -0
  13. jarviscore/core/mesh.py +463 -0
  14. jarviscore/core/profile.py +64 -0
  15. jarviscore/docs/API_REFERENCE.md +932 -0
  16. jarviscore/docs/CONFIGURATION.md +753 -0
  17. jarviscore/docs/GETTING_STARTED.md +600 -0
  18. jarviscore/docs/TROUBLESHOOTING.md +424 -0
  19. jarviscore/docs/USER_GUIDE.md +983 -0
  20. jarviscore/execution/__init__.py +94 -0
  21. jarviscore/execution/code_registry.py +298 -0
  22. jarviscore/execution/generator.py +268 -0
  23. jarviscore/execution/llm.py +430 -0
  24. jarviscore/execution/repair.py +283 -0
  25. jarviscore/execution/result_handler.py +332 -0
  26. jarviscore/execution/sandbox.py +555 -0
  27. jarviscore/execution/search.py +281 -0
  28. jarviscore/orchestration/__init__.py +18 -0
  29. jarviscore/orchestration/claimer.py +101 -0
  30. jarviscore/orchestration/dependency.py +143 -0
  31. jarviscore/orchestration/engine.py +292 -0
  32. jarviscore/orchestration/status.py +96 -0
  33. jarviscore/p2p/__init__.py +23 -0
  34. jarviscore/p2p/broadcaster.py +353 -0
  35. jarviscore/p2p/coordinator.py +364 -0
  36. jarviscore/p2p/keepalive.py +361 -0
  37. jarviscore/p2p/swim_manager.py +290 -0
  38. jarviscore/profiles/__init__.py +6 -0
  39. jarviscore/profiles/autoagent.py +264 -0
  40. jarviscore/profiles/customagent.py +137 -0
  41. jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
  42. jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
  43. jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
  44. jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
  45. jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
  46. tests/conftest.py +44 -0
  47. tests/test_agent.py +165 -0
  48. tests/test_autoagent.py +140 -0
  49. tests/test_autoagent_day4.py +186 -0
  50. tests/test_customagent.py +248 -0
  51. tests/test_integration.py +293 -0
  52. tests/test_llm_fallback.py +185 -0
  53. tests/test_mesh.py +356 -0
  54. tests/test_p2p_integration.py +375 -0
  55. tests/test_remote_sandbox.py +116 -0
@@ -0,0 +1,463 @@
1
+ """
2
+ Mesh - Central orchestrator for JarvisCore framework.
3
+
4
+ The Mesh coordinates agent execution and provides two operational modes:
5
+ - Autonomous: Execute multi-step workflows with dependency resolution
6
+ - Distributed: Run as P2P service responding to task requests
7
+
8
+ Day 1: Foundation with agent registration and setup
9
+ Day 2: P2P integration for agent discovery and coordination
10
+ Day 3: Full workflow orchestration with state management
11
+ """
12
+ from typing import List, Dict, Any, Optional, Type
13
+ from enum import Enum
14
+ import logging
15
+
16
+ from .agent import Agent
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class MeshMode(Enum):
22
+ """Operational modes for Mesh."""
23
+ AUTONOMOUS = "autonomous" # Execute workflows locally
24
+ DISTRIBUTED = "distributed" # Run as P2P service
25
+
26
+
27
+ class Mesh:
28
+ """
29
+ Central orchestrator for JarvisCore agent framework.
30
+
31
+ The Mesh manages agent lifecycle, coordinates execution, and provides
32
+ two operational modes:
33
+
34
+ 1. **Autonomous Mode**: Execute multi-step workflows locally
35
+ - User defines workflow steps with dependencies
36
+ - Mesh routes tasks to capable agents
37
+ - Handles crash recovery and checkpointing
38
+
39
+ 2. **Distributed Mode**: Run as P2P service
40
+ - Agents join P2P network and announce capabilities
41
+ - Receive and execute tasks from other nodes
42
+ - Coordinate with remote agents for complex workflows
43
+
44
+ Example (Autonomous):
45
+ mesh = Mesh(mode="autonomous")
46
+ mesh.add(ScraperAgent)
47
+ mesh.add(ProcessorAgent)
48
+
49
+ await mesh.start()
50
+ results = await mesh.workflow("scrape-and-process", [
51
+ {"agent": "scraper", "task": "Scrape example.com"},
52
+ {"agent": "processor", "task": "Process data", "depends_on": [0]}
53
+ ])
54
+
55
+ Example (Distributed):
56
+ mesh = Mesh(mode="distributed")
57
+ mesh.add(APIAgent)
58
+ mesh.add(DatabaseAgent)
59
+
60
+ await mesh.start()
61
+ await mesh.serve_forever() # Run as service
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ mode: str = "autonomous",
67
+ config: Optional[Dict[str, Any]] = None
68
+ ):
69
+ """
70
+ Initialize Mesh orchestrator.
71
+
72
+ Args:
73
+ mode: Operational mode ("autonomous" or "distributed")
74
+ config: Optional configuration dictionary:
75
+ - p2p_enabled: Enable P2P networking (default: True for distributed)
76
+ - state_backend: "file", "redis", "mongodb" (default: "file")
77
+ - event_store: Path or connection string for event storage
78
+ - checkpoint_interval: Save checkpoints every N steps (default: 1)
79
+ - max_parallel: Max parallel step execution (default: 5)
80
+
81
+ Raises:
82
+ ValueError: If invalid mode specified
83
+ """
84
+ # Validate mode
85
+ try:
86
+ self.mode = MeshMode(mode)
87
+ except ValueError:
88
+ raise ValueError(
89
+ f"Invalid mode '{mode}'. Must be 'autonomous' or 'distributed'"
90
+ )
91
+
92
+ self.config = config or {}
93
+ self.agents: List[Agent] = []
94
+ self._agent_registry: Dict[str, List[Agent]] = {} # role -> list of agents
95
+ self._agent_ids: set = set() # Track unique agent IDs
96
+ self._capability_index: Dict[str, List[Agent]] = {} # capability -> agents
97
+
98
+ # Components (initialized in start())
99
+ self._p2p_coordinator = None # Day 2: P2P integration
100
+ self._workflow_engine = None # Day 3: Workflow orchestration
101
+ self._state_manager = None # Day 3: State management
102
+
103
+ self._started = False
104
+ self._logger = logging.getLogger(f"jarviscore.mesh")
105
+
106
+ self._logger.info(f"Mesh initialized in {self.mode.value} mode")
107
+
108
+ def add(
109
+ self,
110
+ agent_class: Type[Agent],
111
+ agent_id: Optional[str] = None,
112
+ **kwargs
113
+ ) -> Agent:
114
+ """
115
+ Register an agent with the mesh.
116
+
117
+ Args:
118
+ agent_class: Agent class to instantiate (must inherit from Agent)
119
+ agent_id: Optional unique identifier for the agent
120
+ **kwargs: Additional arguments passed to agent constructor
121
+
122
+ Returns:
123
+ Instantiated agent instance
124
+
125
+ Raises:
126
+ ValueError: If agent with same role already registered
127
+ TypeError: If agent_class doesn't inherit from Agent
128
+
129
+ Example:
130
+ mesh = Mesh()
131
+ scraper = mesh.add(ScraperAgent, agent_id="scraper-1")
132
+ processor = mesh.add(ProcessorAgent)
133
+ """
134
+ # Validate agent class
135
+ if not issubclass(agent_class, Agent):
136
+ raise TypeError(
137
+ f"{agent_class.__name__} must inherit from Agent base class"
138
+ )
139
+
140
+ # Instantiate agent
141
+ agent = agent_class(agent_id=agent_id, **kwargs)
142
+
143
+ # Check for duplicate agent_ids
144
+ if agent.agent_id in self._agent_ids:
145
+ raise ValueError(
146
+ f"Agent with id '{agent.agent_id}' already registered. "
147
+ f"Each agent must have a unique agent_id."
148
+ )
149
+
150
+ # If agent_id was NOT explicitly provided (auto-generated),
151
+ # prevent duplicate roles to avoid accidents
152
+ if agent_id is None and agent.role in self._agent_registry:
153
+ raise ValueError(
154
+ f"Agent with role '{agent.role}' already registered. "
155
+ f"Use agent_id parameter to create multiple agents with same role."
156
+ )
157
+
158
+ # Link agent to mesh
159
+ agent._mesh = self
160
+
161
+ # Register agent
162
+ self.agents.append(agent)
163
+ self._agent_ids.add(agent.agent_id)
164
+
165
+ # Register by role (allow multiple agents per role)
166
+ if agent.role not in self._agent_registry:
167
+ self._agent_registry[agent.role] = []
168
+ self._agent_registry[agent.role].append(agent)
169
+
170
+ # Index by capabilities
171
+ for capability in agent.capabilities:
172
+ if capability not in self._capability_index:
173
+ self._capability_index[capability] = []
174
+ self._capability_index[capability].append(agent)
175
+
176
+ self._logger.info(
177
+ f"Registered agent: {agent.agent_id} "
178
+ f"(role={agent.role}, capabilities={agent.capabilities})"
179
+ )
180
+
181
+ return agent
182
+
183
+ async def start(self):
184
+ """
185
+ Initialize mesh and setup all registered agents.
186
+
187
+ This method:
188
+ 1. Calls setup() on all registered agents
189
+ 2. Initializes P2P coordinator (distributed mode)
190
+ 3. Announces agent capabilities to network (distributed mode)
191
+ 4. Initializes workflow engine (autonomous mode)
192
+
193
+ Raises:
194
+ RuntimeError: If no agents registered or already started
195
+
196
+ Example:
197
+ mesh = Mesh()
198
+ mesh.add(ScraperAgent)
199
+ await mesh.start() # Agents are now ready
200
+ """
201
+ if self._started:
202
+ raise RuntimeError("Mesh already started. Call stop() first.")
203
+
204
+ if not self.agents:
205
+ raise RuntimeError("No agents registered. Use mesh.add() to register agents.")
206
+
207
+ self._logger.info("Starting mesh...")
208
+
209
+ # Setup all agents
210
+ for agent in self.agents:
211
+ try:
212
+ await agent.setup()
213
+ self._logger.info(f"Agent setup complete: {agent.agent_id}")
214
+ except Exception as e:
215
+ self._logger.error(f"Failed to setup agent {agent.agent_id}: {e}")
216
+ raise
217
+
218
+ # Initialize P2P coordinator (Day 2 implementation)
219
+ if self.mode == MeshMode.DISTRIBUTED or self.config.get("p2p_enabled", False):
220
+ self._logger.info("Initializing P2P coordinator...")
221
+ from jarviscore.p2p import P2PCoordinator
222
+ from jarviscore.config import get_config_from_dict
223
+
224
+ # Get full config with defaults
225
+ full_config = get_config_from_dict(self.config)
226
+
227
+ # Initialize P2P Coordinator
228
+ self._p2p_coordinator = P2PCoordinator(self.agents, full_config)
229
+ await self._p2p_coordinator.start()
230
+ self._logger.info("✓ P2P coordinator started")
231
+
232
+ # Announce capabilities to network
233
+ await self._p2p_coordinator.announce_capabilities()
234
+ self._logger.info("✓ Capabilities announced to mesh")
235
+
236
+ # Initialize workflow engine (Day 3 implementation)
237
+ if self.mode == MeshMode.AUTONOMOUS:
238
+ self._logger.info("Initializing workflow engine...")
239
+ from jarviscore.orchestration import WorkflowEngine
240
+
241
+ # Initialize workflow engine
242
+ self._workflow_engine = WorkflowEngine(
243
+ mesh=self,
244
+ p2p_coordinator=self._p2p_coordinator,
245
+ config=self.config
246
+ )
247
+ await self._workflow_engine.start()
248
+ self._logger.info("✓ Workflow engine started")
249
+
250
+ self._started = True
251
+ self._logger.info(
252
+ f"Mesh started successfully with {len(self.agents)} agent(s) "
253
+ f"in {self.mode.value} mode"
254
+ )
255
+
256
+ async def workflow(
257
+ self,
258
+ workflow_id: str,
259
+ steps: List[Dict[str, Any]]
260
+ ) -> List[Dict[str, Any]]:
261
+ """
262
+ Execute a multi-step workflow (autonomous mode only).
263
+
264
+ Args:
265
+ workflow_id: Unique workflow identifier (for crash recovery)
266
+ steps: List of step specifications, each containing:
267
+ - agent: Agent role or capability to execute step
268
+ - task: Task description
269
+ - depends_on: List of step indices this step depends on (optional)
270
+ - params: Additional parameters (optional)
271
+
272
+ Returns:
273
+ List of step results in execution order
274
+
275
+ Raises:
276
+ RuntimeError: If mesh not started or not in autonomous mode
277
+ ValueError: If workflow specification is invalid
278
+
279
+ Example:
280
+ results = await mesh.workflow("data-pipeline", [
281
+ {
282
+ "agent": "scraper",
283
+ "task": "Scrape example.com for product data"
284
+ },
285
+ {
286
+ "agent": "processor",
287
+ "task": "Clean and normalize product data",
288
+ "depends_on": [0]
289
+ },
290
+ {
291
+ "agent": "storage",
292
+ "task": "Save to database",
293
+ "depends_on": [1]
294
+ }
295
+ ])
296
+
297
+ DAY 1: Mock implementation (returns placeholder results)
298
+ DAY 3: Full implementation with state management and crash recovery
299
+ """
300
+ if not self._started:
301
+ raise RuntimeError("Mesh not started. Call await mesh.start() first.")
302
+
303
+ if self.mode != MeshMode.AUTONOMOUS:
304
+ raise RuntimeError(
305
+ f"workflow() only available in autonomous mode. "
306
+ f"Current mode: {self.mode.value}"
307
+ )
308
+
309
+ self._logger.info(f"Executing workflow: {workflow_id} with {len(steps)} step(s)")
310
+
311
+ # Execute workflow using workflow engine
312
+ if self._workflow_engine:
313
+ return await self._workflow_engine.execute(workflow_id, steps)
314
+ else:
315
+ # Fallback if workflow engine not initialized
316
+ raise RuntimeError("Workflow engine not initialized")
317
+
318
+ async def serve_forever(self):
319
+ """
320
+ Run mesh as a service (distributed mode only).
321
+
322
+ Keeps the mesh running indefinitely, processing incoming tasks from
323
+ the P2P network. Handles:
324
+ - Task routing to capable agents
325
+ - Heartbeat/keepalive with P2P network
326
+ - Graceful shutdown on interrupt
327
+
328
+ Raises:
329
+ RuntimeError: If mesh not started or not in distributed mode
330
+
331
+ Example:
332
+ mesh = Mesh(mode="distributed")
333
+ mesh.add(APIAgent)
334
+ await mesh.start()
335
+ await mesh.serve_forever() # Blocks until interrupted
336
+
337
+ DAY 1: Basic keep-alive loop
338
+ DAY 2: Full P2P integration with task routing
339
+ """
340
+ if not self._started:
341
+ raise RuntimeError("Mesh not started. Call await mesh.start() first.")
342
+
343
+ if self.mode != MeshMode.DISTRIBUTED:
344
+ raise RuntimeError(
345
+ f"serve_forever() only available in distributed mode. "
346
+ f"Current mode: {self.mode.value}"
347
+ )
348
+
349
+ self._logger.info("Serving requests in distributed mode...")
350
+ self._logger.info("Press Ctrl+C to stop")
351
+
352
+ # Run P2P service
353
+ try:
354
+ if self._p2p_coordinator:
355
+ await self._p2p_coordinator.serve()
356
+ else:
357
+ # Fallback if P2P not initialized
358
+ import asyncio
359
+ await asyncio.Event().wait()
360
+ except KeyboardInterrupt:
361
+ self._logger.info("Shutting down...")
362
+ await self.stop()
363
+
364
+ async def stop(self):
365
+ """
366
+ Stop mesh and cleanup resources.
367
+
368
+ This method:
369
+ 1. Calls teardown() on all agents
370
+ 2. Disconnects from P2P network (distributed mode)
371
+ 3. Saves state and checkpoints
372
+ 4. Closes all connections
373
+
374
+ Example:
375
+ await mesh.stop()
376
+ """
377
+ if not self._started:
378
+ return
379
+
380
+ self._logger.info("Stopping mesh...")
381
+
382
+ # Teardown agents
383
+ for agent in self.agents:
384
+ try:
385
+ await agent.teardown()
386
+ self._logger.info(f"Agent teardown complete: {agent.agent_id}")
387
+ except Exception as e:
388
+ self._logger.error(f"Error during agent teardown {agent.agent_id}: {e}")
389
+
390
+ # Cleanup P2P coordinator
391
+ if self._p2p_coordinator:
392
+ await self._p2p_coordinator.stop()
393
+ self._logger.info("✓ P2P coordinator stopped")
394
+
395
+ # Cleanup workflow engine
396
+ if self._workflow_engine:
397
+ await self._workflow_engine.stop()
398
+ self._logger.info("✓ Workflow engine stopped")
399
+
400
+ self._started = False
401
+ self._logger.info("Mesh stopped successfully")
402
+
403
+ def _find_agent_for_step(self, step: Dict[str, Any]) -> Optional[Agent]:
404
+ """
405
+ Find agent capable of executing a step.
406
+
407
+ Args:
408
+ step: Step specification with 'agent' field (role or capability)
409
+
410
+ Returns:
411
+ Agent instance or None if no capable agent found
412
+ """
413
+ required = step.get("agent")
414
+ if not required:
415
+ return None
416
+
417
+ # Try exact role match first
418
+ if required in self._agent_registry:
419
+ agents = self._agent_registry[required]
420
+ return agents[0] if agents else None
421
+
422
+ # Try capability match
423
+ if required in self._capability_index:
424
+ agents = self._capability_index[required]
425
+ return agents[0] if agents else None
426
+
427
+ return None
428
+
429
+ def get_agent(self, role: str) -> Optional[Agent]:
430
+ """
431
+ Get first agent by role.
432
+
433
+ If multiple agents share the same role, returns the first registered agent.
434
+ Use get_agents_by_role() to get all agents with a specific role.
435
+
436
+ Args:
437
+ role: Agent role identifier
438
+
439
+ Returns:
440
+ Agent instance or None if not found
441
+ """
442
+ agents = self._agent_registry.get(role, [])
443
+ return agents[0] if agents else None
444
+
445
+ def get_agents_by_capability(self, capability: str) -> List[Agent]:
446
+ """
447
+ Get all agents with a specific capability.
448
+
449
+ Args:
450
+ capability: Capability identifier
451
+
452
+ Returns:
453
+ List of agents with the capability (empty if none found)
454
+ """
455
+ return self._capability_index.get(capability, [])
456
+
457
+ def __repr__(self) -> str:
458
+ """String representation of mesh."""
459
+ return (
460
+ f"<Mesh mode={self.mode.value} "
461
+ f"agents={len(self.agents)} "
462
+ f"started={self._started}>"
463
+ )
@@ -0,0 +1,64 @@
1
+ """
2
+ Profile base class - defines HOW agents execute tasks.
3
+
4
+ Profiles are execution strategies:
5
+ - AutoAgent: Automated execution via LLM code generation (framework does everything)
6
+ - CustomAgent: Custom execution with user-defined logic (user has full control)
7
+ """
8
+ from abc import ABC
9
+ from .agent import Agent
10
+
11
+
12
+ class Profile(Agent):
13
+ """
14
+ Abstract base for execution profiles.
15
+
16
+ Profiles define HOW agents execute tasks, while the Agent base class
17
+ defines WHAT they do (role, capabilities).
18
+
19
+ This is an intermediate layer between Agent and concrete implementations
20
+ (AutoAgent, CustomAgent). It provides a common place for profile-specific
21
+ setup and teardown logic.
22
+
23
+ Note: This class is optional - profiles can directly inherit from Agent.
24
+ We use it to make the architecture clearer: Agent (WHAT) → Profile (HOW) → Concrete.
25
+ """
26
+
27
+ def __init__(self, agent_id=None):
28
+ super().__init__(agent_id)
29
+
30
+ # Profile-specific execution engine (initialized by subclasses)
31
+ self._execution_engine = None
32
+
33
+ async def setup(self):
34
+ """
35
+ Setup execution engine.
36
+
37
+ Subclasses override to initialize their specific execution engines:
38
+ - AutoAgent: LLM client, code generator, sandbox executor
39
+ - CustomAgent: User's custom framework (LangChain, MCP, etc.)
40
+
41
+ Example:
42
+ async def setup(self):
43
+ await super().setup()
44
+ self.llm = create_llm_client(config)
45
+ self.codegen = CodeGenerator(self.llm)
46
+ """
47
+ await super().setup()
48
+ self._logger.debug(f"Profile setup: {self.__class__.__name__}")
49
+
50
+ async def teardown(self):
51
+ """
52
+ Cleanup execution engine.
53
+
54
+ Subclasses override to cleanup their specific resources.
55
+
56
+ Example:
57
+ async def teardown(self):
58
+ await self.llm.close()
59
+ await super().teardown()
60
+ """
61
+ await super().teardown()
62
+ self._logger.debug(f"Profile teardown: {self.__class__.__name__}")
63
+
64
+ # execute_task() remains abstract - implemented by concrete profiles