jarviscore-framework 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarviscore/__init__.py +37 -5
- jarviscore/adapter/__init__.py +34 -0
- jarviscore/adapter/decorator.py +332 -0
- jarviscore/cli/check.py +18 -13
- jarviscore/cli/scaffold.py +178 -0
- jarviscore/context/__init__.py +40 -0
- jarviscore/context/dependency.py +160 -0
- jarviscore/context/jarvis_context.py +207 -0
- jarviscore/context/memory.py +155 -0
- jarviscore/data/.env.example +146 -0
- jarviscore/data/__init__.py +7 -0
- jarviscore/data/examples/calculator_agent_example.py +77 -0
- jarviscore/data/examples/multi_agent_workflow.py +132 -0
- jarviscore/data/examples/research_agent_example.py +76 -0
- jarviscore/docs/CONFIGURATION.md +6 -2
- jarviscore/docs/GETTING_STARTED.md +7 -4
- jarviscore/docs/TROUBLESHOOTING.md +11 -7
- jarviscore/docs/USER_GUIDE.md +6 -2
- jarviscore/execution/llm.py +23 -16
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.1.1.dist-info}/METADATA +10 -9
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.1.1.dist-info}/RECORD +26 -12
- tests/test_context.py +467 -0
- tests/test_decorator.py +622 -0
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.1.1.dist-info}/WHEEL +0 -0
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.1.1.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context module for JarvisCore Custom Profile.
|
|
3
|
+
|
|
4
|
+
Provides orchestration primitives for wrapped agents:
|
|
5
|
+
- JarvisContext: Unified context with workflow info and accessors
|
|
6
|
+
- MemoryAccessor: Clean API over workflow memory
|
|
7
|
+
- DependencyAccessor: Clean API over dependency management
|
|
8
|
+
|
|
9
|
+
These are facades over existing JarvisCore components, providing
|
|
10
|
+
a developer-friendly interface for Custom Profile agents.
|
|
11
|
+
|
|
12
|
+
Example:
|
|
13
|
+
from jarviscore.context import JarvisContext
|
|
14
|
+
|
|
15
|
+
@jarvis_agent(role="processor", capabilities=["processing"])
|
|
16
|
+
class Processor:
|
|
17
|
+
def run(self, task, ctx: JarvisContext):
|
|
18
|
+
# Access previous step
|
|
19
|
+
data = ctx.previous("step1")
|
|
20
|
+
|
|
21
|
+
# Access memory
|
|
22
|
+
all_data = ctx.memory.all()
|
|
23
|
+
|
|
24
|
+
# Check dependencies
|
|
25
|
+
if ctx.deps.is_ready("optional"):
|
|
26
|
+
optional = ctx.previous("optional")
|
|
27
|
+
|
|
28
|
+
return {"processed": process(data)}
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from .jarvis_context import JarvisContext, create_context
|
|
32
|
+
from .memory import MemoryAccessor
|
|
33
|
+
from .dependency import DependencyAccessor
|
|
34
|
+
|
|
35
|
+
__all__ = [
|
|
36
|
+
'JarvisContext',
|
|
37
|
+
'create_context',
|
|
38
|
+
'MemoryAccessor',
|
|
39
|
+
'DependencyAccessor',
|
|
40
|
+
]
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DependencyAccessor - Clean API over DependencyManager.
|
|
3
|
+
|
|
4
|
+
Wraps the existing orchestration.DependencyManager to provide
|
|
5
|
+
a developer-friendly interface for Custom Profile agents.
|
|
6
|
+
"""
|
|
7
|
+
from typing import List, Dict, Any, Tuple, Optional
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DependencyAccessor:
|
|
11
|
+
"""
|
|
12
|
+
Provides clean access to dependency management.
|
|
13
|
+
|
|
14
|
+
This is a facade over the existing DependencyManager class.
|
|
15
|
+
It provides a simpler interface for checking and waiting on dependencies.
|
|
16
|
+
|
|
17
|
+
Example:
|
|
18
|
+
# In agent's run method with ctx: JarvisContext
|
|
19
|
+
await ctx.deps.wait_for(["step1", "step2"])
|
|
20
|
+
ready, missing = ctx.deps.check(["step1", "step2"])
|
|
21
|
+
if ctx.deps.is_ready("optional_step"):
|
|
22
|
+
...
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
dependency_manager: Optional[Any],
|
|
28
|
+
memory: Dict[str, Any]
|
|
29
|
+
):
|
|
30
|
+
"""
|
|
31
|
+
Initialize dependency accessor.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
dependency_manager: Reference to orchestration.DependencyManager
|
|
35
|
+
memory: Reference to WorkflowEngine.memory dict
|
|
36
|
+
"""
|
|
37
|
+
self._manager = dependency_manager
|
|
38
|
+
self._memory = memory
|
|
39
|
+
|
|
40
|
+
async def wait_for(
|
|
41
|
+
self,
|
|
42
|
+
step_ids: List[str],
|
|
43
|
+
timeout: float = 300.0
|
|
44
|
+
) -> Dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Wait for specific steps to complete.
|
|
47
|
+
|
|
48
|
+
Blocks until all specified steps have outputs in memory.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
step_ids: List of step IDs to wait for
|
|
52
|
+
timeout: Maximum wait time in seconds (default: 5 minutes)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Dictionary of step_id -> output
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
TimeoutError: If dependencies not ready within timeout
|
|
59
|
+
|
|
60
|
+
Example:
|
|
61
|
+
results = await deps.wait_for(["step1", "step2"])
|
|
62
|
+
step1_data = results["step1"]
|
|
63
|
+
"""
|
|
64
|
+
if self._manager is None:
|
|
65
|
+
# Fallback: simple check without manager
|
|
66
|
+
return self._simple_wait(step_ids)
|
|
67
|
+
|
|
68
|
+
return await self._manager.wait_for(step_ids, self._memory, timeout)
|
|
69
|
+
|
|
70
|
+
def _simple_wait(self, step_ids: List[str]) -> Dict[str, Any]:
|
|
71
|
+
"""
|
|
72
|
+
Simple synchronous check (used when manager not available).
|
|
73
|
+
|
|
74
|
+
Returns outputs for steps that exist in memory.
|
|
75
|
+
"""
|
|
76
|
+
result = {}
|
|
77
|
+
for step_id in step_ids:
|
|
78
|
+
if step_id in self._memory:
|
|
79
|
+
value = self._memory[step_id]
|
|
80
|
+
if isinstance(value, dict) and 'output' in value:
|
|
81
|
+
result[step_id] = value['output']
|
|
82
|
+
else:
|
|
83
|
+
result[step_id] = value
|
|
84
|
+
return result
|
|
85
|
+
|
|
86
|
+
def check(self, step_ids: List[str]) -> Tuple[bool, List[str]]:
|
|
87
|
+
"""
|
|
88
|
+
Check if dependencies are satisfied (non-blocking).
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
step_ids: Steps to check
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Tuple of (all_satisfied, missing_step_ids)
|
|
95
|
+
|
|
96
|
+
Example:
|
|
97
|
+
ready, missing = deps.check(["step1", "step2"])
|
|
98
|
+
if not ready:
|
|
99
|
+
print(f"Still waiting for: {missing}")
|
|
100
|
+
"""
|
|
101
|
+
if self._manager is None:
|
|
102
|
+
# Fallback: simple check without manager
|
|
103
|
+
missing = [s for s in step_ids if s not in self._memory]
|
|
104
|
+
return (len(missing) == 0, missing)
|
|
105
|
+
|
|
106
|
+
return self._manager.check_dependencies(step_ids, self._memory)
|
|
107
|
+
|
|
108
|
+
def is_ready(self, step_id: str) -> bool:
|
|
109
|
+
"""
|
|
110
|
+
Check if a single step is ready (non-blocking).
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
step_id: Step to check
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
True if step output exists in memory
|
|
117
|
+
|
|
118
|
+
Example:
|
|
119
|
+
if deps.is_ready("optional_step"):
|
|
120
|
+
data = ctx.memory.get("optional_step")
|
|
121
|
+
"""
|
|
122
|
+
return step_id in self._memory
|
|
123
|
+
|
|
124
|
+
def all_ready(self, step_ids: List[str]) -> bool:
|
|
125
|
+
"""
|
|
126
|
+
Check if all specified steps are ready.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
step_ids: Steps to check
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
True if all steps have outputs in memory
|
|
133
|
+
|
|
134
|
+
Example:
|
|
135
|
+
if deps.all_ready(["step1", "step2", "step3"]):
|
|
136
|
+
# All dependencies satisfied
|
|
137
|
+
...
|
|
138
|
+
"""
|
|
139
|
+
return all(self.is_ready(s) for s in step_ids)
|
|
140
|
+
|
|
141
|
+
def any_ready(self, step_ids: List[str]) -> bool:
|
|
142
|
+
"""
|
|
143
|
+
Check if any of the specified steps are ready.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
step_ids: Steps to check
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
True if at least one step has output in memory
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
if deps.any_ready(["cache_step", "compute_step"]):
|
|
153
|
+
# At least one source available
|
|
154
|
+
...
|
|
155
|
+
"""
|
|
156
|
+
return any(self.is_ready(s) for s in step_ids)
|
|
157
|
+
|
|
158
|
+
def __repr__(self) -> str:
|
|
159
|
+
ready_count = sum(1 for k in self._memory.keys())
|
|
160
|
+
return f"<DependencyAccessor ready_steps={ready_count}>"
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JarvisContext - Unified context for Custom Profile agents.
|
|
3
|
+
|
|
4
|
+
Provides a single object that gives agents access to:
|
|
5
|
+
- Workflow information (workflow_id, step_id)
|
|
6
|
+
- Task information (task description, params)
|
|
7
|
+
- Memory (shared state between steps)
|
|
8
|
+
- Dependencies (check/wait for other steps)
|
|
9
|
+
|
|
10
|
+
This is a facade over existing JarvisCore primitives.
|
|
11
|
+
"""
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Dict, Any, Optional, List
|
|
14
|
+
|
|
15
|
+
from .memory import MemoryAccessor
|
|
16
|
+
from .dependency import DependencyAccessor
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class JarvisContext:
|
|
21
|
+
"""
|
|
22
|
+
Context passed to wrapped agents during execution.
|
|
23
|
+
|
|
24
|
+
Provides unified access to JarvisCore orchestration primitives.
|
|
25
|
+
Agents receive this as the `ctx` parameter when they declare it
|
|
26
|
+
in their run method signature.
|
|
27
|
+
|
|
28
|
+
Attributes:
|
|
29
|
+
workflow_id: Unique identifier for the current workflow
|
|
30
|
+
step_id: Unique identifier for the current step
|
|
31
|
+
task: Task description string
|
|
32
|
+
params: Task parameters dictionary
|
|
33
|
+
memory: Accessor for shared workflow memory
|
|
34
|
+
deps: Accessor for dependency management
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
@jarvis_agent(role="aggregator", capabilities=["aggregation"])
|
|
38
|
+
class Aggregator:
|
|
39
|
+
def run(self, task, ctx: JarvisContext):
|
|
40
|
+
# Access previous step output
|
|
41
|
+
step1_data = ctx.previous("step1")
|
|
42
|
+
|
|
43
|
+
# Access all previous results
|
|
44
|
+
all_results = ctx.memory.all()
|
|
45
|
+
|
|
46
|
+
# Check workflow info
|
|
47
|
+
print(f"Running step {ctx.step_id} in {ctx.workflow_id}")
|
|
48
|
+
|
|
49
|
+
# Use params
|
|
50
|
+
threshold = ctx.params.get("threshold", 0.5)
|
|
51
|
+
|
|
52
|
+
return {"aggregated": process(step1_data)}
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
# Workflow info
|
|
56
|
+
workflow_id: str
|
|
57
|
+
step_id: str
|
|
58
|
+
|
|
59
|
+
# Task info
|
|
60
|
+
task: str
|
|
61
|
+
params: Dict[str, Any] = field(default_factory=dict)
|
|
62
|
+
|
|
63
|
+
# Orchestration accessors
|
|
64
|
+
memory: MemoryAccessor = None
|
|
65
|
+
deps: DependencyAccessor = None
|
|
66
|
+
|
|
67
|
+
def previous(self, step_id: str, default: Any = None) -> Any:
|
|
68
|
+
"""
|
|
69
|
+
Get output from a previous step.
|
|
70
|
+
|
|
71
|
+
Convenience method that delegates to memory.get().
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
step_id: ID of the step to get output from
|
|
75
|
+
default: Default value if not found
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Step output or default
|
|
79
|
+
|
|
80
|
+
Example:
|
|
81
|
+
step1_result = ctx.previous("step1")
|
|
82
|
+
optional = ctx.previous("optional_step", default={})
|
|
83
|
+
"""
|
|
84
|
+
if self.memory is None:
|
|
85
|
+
return default
|
|
86
|
+
return self.memory.get(step_id, default)
|
|
87
|
+
|
|
88
|
+
def all_previous(self) -> Dict[str, Any]:
|
|
89
|
+
"""
|
|
90
|
+
Get all previous step outputs.
|
|
91
|
+
|
|
92
|
+
Convenience method that delegates to memory.all().
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Dictionary of step_id -> output
|
|
96
|
+
|
|
97
|
+
Example:
|
|
98
|
+
all_results = ctx.all_previous()
|
|
99
|
+
for step_id, output in all_results.items():
|
|
100
|
+
print(f"{step_id} produced: {output}")
|
|
101
|
+
"""
|
|
102
|
+
if self.memory is None:
|
|
103
|
+
return {}
|
|
104
|
+
return self.memory.all()
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def previous_results(self) -> Dict[str, Any]:
|
|
108
|
+
"""
|
|
109
|
+
Alias for all_previous().
|
|
110
|
+
|
|
111
|
+
Provides property-style access to all previous results.
|
|
112
|
+
|
|
113
|
+
Example:
|
|
114
|
+
results = ctx.previous_results
|
|
115
|
+
"""
|
|
116
|
+
return self.all_previous()
|
|
117
|
+
|
|
118
|
+
def has_previous(self, step_id: str) -> bool:
|
|
119
|
+
"""
|
|
120
|
+
Check if a previous step's output exists.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
step_id: Step to check
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
True if output exists
|
|
127
|
+
|
|
128
|
+
Example:
|
|
129
|
+
if ctx.has_previous("optional_step"):
|
|
130
|
+
data = ctx.previous("optional_step")
|
|
131
|
+
"""
|
|
132
|
+
if self.memory is None:
|
|
133
|
+
return False
|
|
134
|
+
return self.memory.has(step_id)
|
|
135
|
+
|
|
136
|
+
def get_param(self, key: str, default: Any = None) -> Any:
|
|
137
|
+
"""
|
|
138
|
+
Get a task parameter by key.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
key: Parameter key
|
|
142
|
+
default: Default value if not found
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Parameter value or default
|
|
146
|
+
|
|
147
|
+
Example:
|
|
148
|
+
threshold = ctx.get_param("threshold", 0.5)
|
|
149
|
+
mode = ctx.get_param("mode", "default")
|
|
150
|
+
"""
|
|
151
|
+
return self.params.get(key, default)
|
|
152
|
+
|
|
153
|
+
def __repr__(self) -> str:
|
|
154
|
+
return (
|
|
155
|
+
f"<JarvisContext "
|
|
156
|
+
f"workflow={self.workflow_id} "
|
|
157
|
+
f"step={self.step_id} "
|
|
158
|
+
f"params={list(self.params.keys())}>"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def create_context(
|
|
163
|
+
workflow_id: str,
|
|
164
|
+
step_id: str,
|
|
165
|
+
task: str,
|
|
166
|
+
params: Dict[str, Any],
|
|
167
|
+
memory_dict: Dict[str, Any],
|
|
168
|
+
dependency_manager: Optional[Any] = None
|
|
169
|
+
) -> JarvisContext:
|
|
170
|
+
"""
|
|
171
|
+
Factory function to create a JarvisContext.
|
|
172
|
+
|
|
173
|
+
Used internally by the decorator and WorkflowEngine to create
|
|
174
|
+
context objects for agents.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
workflow_id: Workflow identifier
|
|
178
|
+
step_id: Step identifier
|
|
179
|
+
task: Task description
|
|
180
|
+
params: Task parameters
|
|
181
|
+
memory_dict: Reference to WorkflowEngine.memory
|
|
182
|
+
dependency_manager: Optional reference to DependencyManager
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Configured JarvisContext instance
|
|
186
|
+
|
|
187
|
+
Example:
|
|
188
|
+
ctx = create_context(
|
|
189
|
+
workflow_id="pipeline-1",
|
|
190
|
+
step_id="step2",
|
|
191
|
+
task="Process data",
|
|
192
|
+
params={"threshold": 0.5},
|
|
193
|
+
memory_dict=engine.memory,
|
|
194
|
+
dependency_manager=engine.dependency_manager
|
|
195
|
+
)
|
|
196
|
+
"""
|
|
197
|
+
memory_accessor = MemoryAccessor(memory_dict, step_id)
|
|
198
|
+
dep_accessor = DependencyAccessor(dependency_manager, memory_dict)
|
|
199
|
+
|
|
200
|
+
return JarvisContext(
|
|
201
|
+
workflow_id=workflow_id,
|
|
202
|
+
step_id=step_id,
|
|
203
|
+
task=task,
|
|
204
|
+
params=params,
|
|
205
|
+
memory=memory_accessor,
|
|
206
|
+
deps=dep_accessor
|
|
207
|
+
)
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MemoryAccessor - Clean API over workflow memory.
|
|
3
|
+
|
|
4
|
+
Wraps the existing WorkflowEngine.memory dict to provide
|
|
5
|
+
a developer-friendly interface for Custom Profile agents.
|
|
6
|
+
"""
|
|
7
|
+
from typing import Dict, Any, Optional, List
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MemoryAccessor:
|
|
11
|
+
"""
|
|
12
|
+
Provides clean access to workflow memory (shared state between agents).
|
|
13
|
+
|
|
14
|
+
This is a facade over the existing WorkflowEngine.memory dict.
|
|
15
|
+
It extracts 'output' from step results automatically and provides
|
|
16
|
+
a simple get/put/has/all interface.
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
# In agent's run method with ctx: JarvisContext
|
|
20
|
+
data = ctx.memory.get("step1")
|
|
21
|
+
ctx.memory.put("intermediate", processed)
|
|
22
|
+
all_results = ctx.memory.all()
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, memory: Dict[str, Any], current_step: str = ""):
|
|
26
|
+
"""
|
|
27
|
+
Initialize memory accessor.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
memory: Reference to WorkflowEngine.memory dict
|
|
31
|
+
current_step: ID of the current step (for context)
|
|
32
|
+
"""
|
|
33
|
+
self._memory = memory
|
|
34
|
+
self._current_step = current_step
|
|
35
|
+
|
|
36
|
+
def get(self, step_id: str, default: Any = None) -> Any:
|
|
37
|
+
"""
|
|
38
|
+
Get output from a specific step.
|
|
39
|
+
|
|
40
|
+
Automatically extracts 'output' from step result dicts.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
step_id: Step to get output from
|
|
44
|
+
default: Default value if not found
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Step output or default
|
|
48
|
+
|
|
49
|
+
Example:
|
|
50
|
+
data = memory.get("step1")
|
|
51
|
+
data = memory.get("optional", default=[])
|
|
52
|
+
"""
|
|
53
|
+
result = self._memory.get(step_id, default)
|
|
54
|
+
|
|
55
|
+
# If result is a dict with 'output' key, extract it
|
|
56
|
+
if isinstance(result, dict) and 'output' in result:
|
|
57
|
+
return result['output']
|
|
58
|
+
|
|
59
|
+
return result
|
|
60
|
+
|
|
61
|
+
def get_raw(self, step_id: str, default: Any = None) -> Any:
|
|
62
|
+
"""
|
|
63
|
+
Get raw result from a step (without extracting 'output').
|
|
64
|
+
|
|
65
|
+
Use this when you need the full result dict including
|
|
66
|
+
status, error, agent_id, etc.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
step_id: Step to get result from
|
|
70
|
+
default: Default value if not found
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Full step result dict or default
|
|
74
|
+
"""
|
|
75
|
+
return self._memory.get(step_id, default)
|
|
76
|
+
|
|
77
|
+
def put(self, key: str, value: Any) -> None:
|
|
78
|
+
"""
|
|
79
|
+
Store a value in memory.
|
|
80
|
+
|
|
81
|
+
Use for intermediate results that other steps may need.
|
|
82
|
+
Note: Step outputs are automatically stored by the engine.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
key: Key to store under
|
|
86
|
+
value: Value to store
|
|
87
|
+
|
|
88
|
+
Example:
|
|
89
|
+
memory.put("intermediate_result", processed_data)
|
|
90
|
+
"""
|
|
91
|
+
self._memory[key] = value
|
|
92
|
+
|
|
93
|
+
def has(self, step_id: str) -> bool:
|
|
94
|
+
"""
|
|
95
|
+
Check if a step's output exists in memory.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
step_id: Step to check
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
True if output exists
|
|
102
|
+
|
|
103
|
+
Example:
|
|
104
|
+
if memory.has("optional_step"):
|
|
105
|
+
data = memory.get("optional_step")
|
|
106
|
+
"""
|
|
107
|
+
return step_id in self._memory
|
|
108
|
+
|
|
109
|
+
def all(self) -> Dict[str, Any]:
|
|
110
|
+
"""
|
|
111
|
+
Get all memory contents with outputs extracted.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Dictionary of step_id -> output
|
|
115
|
+
|
|
116
|
+
Example:
|
|
117
|
+
all_results = memory.all()
|
|
118
|
+
for step_id, output in all_results.items():
|
|
119
|
+
print(f"{step_id}: {output}")
|
|
120
|
+
"""
|
|
121
|
+
result = {}
|
|
122
|
+
for key, value in self._memory.items():
|
|
123
|
+
if isinstance(value, dict) and 'output' in value:
|
|
124
|
+
result[key] = value['output']
|
|
125
|
+
else:
|
|
126
|
+
result[key] = value
|
|
127
|
+
return result
|
|
128
|
+
|
|
129
|
+
def keys(self) -> List[str]:
|
|
130
|
+
"""
|
|
131
|
+
Get all step IDs in memory.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
List of step IDs
|
|
135
|
+
"""
|
|
136
|
+
return list(self._memory.keys())
|
|
137
|
+
|
|
138
|
+
def __contains__(self, step_id: str) -> bool:
|
|
139
|
+
"""Support 'in' operator: if 'step1' in memory"""
|
|
140
|
+
return self.has(step_id)
|
|
141
|
+
|
|
142
|
+
def __getitem__(self, step_id: str) -> Any:
|
|
143
|
+
"""Support dict-style access: memory['step1']"""
|
|
144
|
+
return self.get(step_id)
|
|
145
|
+
|
|
146
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
147
|
+
"""Support dict-style assignment: memory['key'] = value"""
|
|
148
|
+
self.put(key, value)
|
|
149
|
+
|
|
150
|
+
def __len__(self) -> int:
|
|
151
|
+
"""Return number of items in memory."""
|
|
152
|
+
return len(self._memory)
|
|
153
|
+
|
|
154
|
+
def __repr__(self) -> str:
|
|
155
|
+
return f"<MemoryAccessor keys={self.keys()}>"
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# =============================================================================
|
|
2
|
+
# JARVISCORE FRAMEWORK CONFIGURATION
|
|
3
|
+
# =============================================================================
|
|
4
|
+
# Copy this file to .env and fill in your values
|
|
5
|
+
# All settings are optional - framework provides sensible defaults
|
|
6
|
+
# Standard environment variable names (no JARVISCORE_ prefix)
|
|
7
|
+
|
|
8
|
+
# =============================================================================
|
|
9
|
+
# LLM CONFIGURATION (Zero-Config with Automatic Fallback)
|
|
10
|
+
# =============================================================================
|
|
11
|
+
# Framework tries providers in order: Claude → vLLM → Azure → Gemini
|
|
12
|
+
# At least ONE provider must be configured for AutoAgent to work
|
|
13
|
+
|
|
14
|
+
# --- Anthropic Claude ---
|
|
15
|
+
# Standard: CLAUDE_API_KEY or ANTHROPIC_API_KEY
|
|
16
|
+
# CLAUDE_API_KEY=your-anthropic-api-key
|
|
17
|
+
# CLAUDE_ENDPOINT=https://api.anthropic.com # Optional: custom endpoint
|
|
18
|
+
# CLAUDE_MODEL=claude-sonnet-4
|
|
19
|
+
|
|
20
|
+
# --- vLLM (Local/Self-Hosted) ---
|
|
21
|
+
# Recommended for development and cost-effective production
|
|
22
|
+
# LLM_ENDPOINT=http://localhost:8000
|
|
23
|
+
# LLM_MODEL=Qwen/Qwen2.5-Coder-32B-Instruct
|
|
24
|
+
|
|
25
|
+
# --- Azure OpenAI ---
|
|
26
|
+
# Standard: AZURE_API_KEY or AZURE_OPENAI_KEY
|
|
27
|
+
# AZURE_API_KEY=your-azure-openai-key
|
|
28
|
+
# AZURE_ENDPOINT=https://your-resource.openai.azure.com
|
|
29
|
+
# AZURE_DEPLOYMENT=gpt-4o
|
|
30
|
+
# AZURE_API_VERSION=2024-02-15-preview
|
|
31
|
+
|
|
32
|
+
# --- Google Gemini ---
|
|
33
|
+
# GEMINI_API_KEY=your-gemini-api-key
|
|
34
|
+
# GEMINI_MODEL=gemini-1.5-flash
|
|
35
|
+
# GEMINI_TEMPERATURE=0.1
|
|
36
|
+
# GEMINI_TIMEOUT=30.0
|
|
37
|
+
|
|
38
|
+
# Common LLM Settings
|
|
39
|
+
# LLM_TIMEOUT=120.0
|
|
40
|
+
# LLM_TEMPERATURE=0.7
|
|
41
|
+
|
|
42
|
+
# =============================================================================
|
|
43
|
+
# EXECUTION SETTINGS
|
|
44
|
+
# =============================================================================
|
|
45
|
+
# Sandbox execution and code generation limits
|
|
46
|
+
|
|
47
|
+
# Maximum execution time for generated code (seconds)
|
|
48
|
+
EXECUTION_TIMEOUT=300
|
|
49
|
+
|
|
50
|
+
# Maximum number of autonomous repair attempts
|
|
51
|
+
MAX_REPAIR_ATTEMPTS=3
|
|
52
|
+
|
|
53
|
+
# Maximum retries for failed operations
|
|
54
|
+
MAX_RETRIES=3
|
|
55
|
+
|
|
56
|
+
# =============================================================================
|
|
57
|
+
# SANDBOX CONFIGURATION (Phase 2)
|
|
58
|
+
# =============================================================================
|
|
59
|
+
# Sandbox execution mode: "local" (in-process) or "remote" (service)
|
|
60
|
+
# Default: local (for development)
|
|
61
|
+
# Production: remote (uses JarvisCore's Azure Container Apps sandbox)
|
|
62
|
+
SANDBOX_MODE=local
|
|
63
|
+
|
|
64
|
+
# Remote sandbox service URL (provided by JarvisCore)
|
|
65
|
+
# Azure Container Apps sandbox service - no setup required!
|
|
66
|
+
# Uncomment and set SANDBOX_MODE=remote to use it
|
|
67
|
+
# SANDBOX_SERVICE_URL=https://browser-task-executor.bravesea-3f5f7e75.eastus.azurecontainerapps.io
|
|
68
|
+
|
|
69
|
+
# =============================================================================
|
|
70
|
+
# STORAGE CONFIGURATION (Phase 1)
|
|
71
|
+
# =============================================================================
|
|
72
|
+
# Directory for result storage and code registry
|
|
73
|
+
LOG_DIRECTORY=./logs
|
|
74
|
+
|
|
75
|
+
# =============================================================================
|
|
76
|
+
# P2P CONFIGURATION (For Distributed Mode)
|
|
77
|
+
# =============================================================================
|
|
78
|
+
# Required only if using mesh in distributed mode
|
|
79
|
+
# Autonomous mode works without P2P
|
|
80
|
+
|
|
81
|
+
# Enable P2P mesh networking
|
|
82
|
+
P2P_ENABLED=true
|
|
83
|
+
|
|
84
|
+
# Node identification
|
|
85
|
+
NODE_NAME=jarviscore-node-1
|
|
86
|
+
|
|
87
|
+
# Bind address and port for P2P communication
|
|
88
|
+
BIND_HOST=127.0.0.1
|
|
89
|
+
BIND_PORT=7946
|
|
90
|
+
|
|
91
|
+
# Seed nodes to join existing mesh (comma-separated)
|
|
92
|
+
# Example: 192.168.1.100:7946,192.168.1.101:7946
|
|
93
|
+
# SEED_NODES=
|
|
94
|
+
|
|
95
|
+
# ZeroMQ port offset (P2P messaging)
|
|
96
|
+
ZMQ_PORT_OFFSET=1000
|
|
97
|
+
|
|
98
|
+
# Transport type: udp, tcp, or hybrid
|
|
99
|
+
TRANSPORT_TYPE=hybrid
|
|
100
|
+
|
|
101
|
+
# =============================================================================
|
|
102
|
+
# KEEPALIVE SETTINGS (P2P Health Monitoring)
|
|
103
|
+
# =============================================================================
|
|
104
|
+
|
|
105
|
+
# Enable smart keepalive (suppresses when active)
|
|
106
|
+
KEEPALIVE_ENABLED=true
|
|
107
|
+
|
|
108
|
+
# Keepalive interval (seconds)
|
|
109
|
+
KEEPALIVE_INTERVAL=90
|
|
110
|
+
|
|
111
|
+
# Keepalive timeout (seconds)
|
|
112
|
+
KEEPALIVE_TIMEOUT=10
|
|
113
|
+
|
|
114
|
+
# Activity suppression window (seconds)
|
|
115
|
+
# If agent is actively working, suppress keepalive for this duration
|
|
116
|
+
ACTIVITY_SUPPRESS_WINDOW=60
|
|
117
|
+
|
|
118
|
+
# =============================================================================
|
|
119
|
+
# LOGGING
|
|
120
|
+
# =============================================================================
|
|
121
|
+
# Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
|
122
|
+
LOG_LEVEL=INFO
|
|
123
|
+
|
|
124
|
+
# =============================================================================
|
|
125
|
+
# EXAMPLES
|
|
126
|
+
# =============================================================================
|
|
127
|
+
|
|
128
|
+
# Example 1: Local development with vLLM
|
|
129
|
+
# LLM_ENDPOINT=http://localhost:8000
|
|
130
|
+
# LLM_MODEL=Qwen/Qwen2.5-Coder-32B-Instruct
|
|
131
|
+
# P2P_ENABLED=false
|
|
132
|
+
|
|
133
|
+
# Example 2: Production with Azure OpenAI + P2P mesh
|
|
134
|
+
# AZURE_API_KEY=sk-...
|
|
135
|
+
# AZURE_ENDPOINT=https://my-resource.openai.azure.com
|
|
136
|
+
# AZURE_DEPLOYMENT=gpt-4o
|
|
137
|
+
# P2P_ENABLED=true
|
|
138
|
+
# BIND_HOST=0.0.0.0
|
|
139
|
+
# BIND_PORT=7946
|
|
140
|
+
# SEED_NODES=192.168.1.100:7946
|
|
141
|
+
|
|
142
|
+
# Example 3: Zero-config (use defaults)
|
|
143
|
+
# Just don't set any variables and framework will:
|
|
144
|
+
# - Try to detect available LLM providers
|
|
145
|
+
# - Use sensible defaults for all settings
|
|
146
|
+
# - Work in autonomous mode (no P2P)
|