jarviscore-framework 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/calculator_agent_example.py +77 -0
- examples/multi_agent_workflow.py +132 -0
- examples/research_agent_example.py +76 -0
- jarviscore/__init__.py +54 -0
- jarviscore/cli/__init__.py +7 -0
- jarviscore/cli/__main__.py +33 -0
- jarviscore/cli/check.py +404 -0
- jarviscore/cli/smoketest.py +371 -0
- jarviscore/config/__init__.py +7 -0
- jarviscore/config/settings.py +128 -0
- jarviscore/core/__init__.py +7 -0
- jarviscore/core/agent.py +163 -0
- jarviscore/core/mesh.py +463 -0
- jarviscore/core/profile.py +64 -0
- jarviscore/docs/API_REFERENCE.md +932 -0
- jarviscore/docs/CONFIGURATION.md +753 -0
- jarviscore/docs/GETTING_STARTED.md +600 -0
- jarviscore/docs/TROUBLESHOOTING.md +424 -0
- jarviscore/docs/USER_GUIDE.md +983 -0
- jarviscore/execution/__init__.py +94 -0
- jarviscore/execution/code_registry.py +298 -0
- jarviscore/execution/generator.py +268 -0
- jarviscore/execution/llm.py +430 -0
- jarviscore/execution/repair.py +283 -0
- jarviscore/execution/result_handler.py +332 -0
- jarviscore/execution/sandbox.py +555 -0
- jarviscore/execution/search.py +281 -0
- jarviscore/orchestration/__init__.py +18 -0
- jarviscore/orchestration/claimer.py +101 -0
- jarviscore/orchestration/dependency.py +143 -0
- jarviscore/orchestration/engine.py +292 -0
- jarviscore/orchestration/status.py +96 -0
- jarviscore/p2p/__init__.py +23 -0
- jarviscore/p2p/broadcaster.py +353 -0
- jarviscore/p2p/coordinator.py +364 -0
- jarviscore/p2p/keepalive.py +361 -0
- jarviscore/p2p/swim_manager.py +290 -0
- jarviscore/profiles/__init__.py +6 -0
- jarviscore/profiles/autoagent.py +264 -0
- jarviscore/profiles/customagent.py +137 -0
- jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
- jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
- jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
- jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
- jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
- tests/conftest.py +44 -0
- tests/test_agent.py +165 -0
- tests/test_autoagent.py +140 -0
- tests/test_autoagent_day4.py +186 -0
- tests/test_customagent.py +248 -0
- tests/test_integration.py +293 -0
- tests/test_llm_fallback.py +185 -0
- tests/test_mesh.py +356 -0
- tests/test_p2p_integration.py +375 -0
- tests/test_remote_sandbox.py +116 -0
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JarvisCore Smoke Test CLI
|
|
3
|
+
|
|
4
|
+
Quick validation that AutoAgent/Prompt-Dev workflow works end-to-end.
|
|
5
|
+
Tests: LLM → Code Generation → Sandbox Execution → Result
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python -m jarviscore.cli.smoketest # Run basic smoke test
|
|
9
|
+
python -m jarviscore.cli.smoketest --verbose # Show detailed output
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import sys
|
|
13
|
+
import asyncio
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SmokeTest:
|
|
19
|
+
"""Smoke test runner for JarvisCore AutoAgent."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, verbose: bool = False):
|
|
22
|
+
self.verbose = verbose
|
|
23
|
+
self.passed = []
|
|
24
|
+
self.failed = []
|
|
25
|
+
self.start_time = None
|
|
26
|
+
self.end_time = None
|
|
27
|
+
|
|
28
|
+
def print_header(self):
|
|
29
|
+
"""Print test header."""
|
|
30
|
+
print("\n" + "="*70)
|
|
31
|
+
print(" JarvisCore Smoke Test")
|
|
32
|
+
print(" Validating AutoAgent: Prompt → Code → Result")
|
|
33
|
+
print("="*70 + "\n")
|
|
34
|
+
|
|
35
|
+
def print_test(self, name: str, status: bool, detail: str = "", duration: float = None):
|
|
36
|
+
"""Print test result."""
|
|
37
|
+
symbol = "✓" if status else "✗"
|
|
38
|
+
duration_str = f" ({duration:.2f}s)" if duration else ""
|
|
39
|
+
|
|
40
|
+
print(f"{symbol} {name}{duration_str}")
|
|
41
|
+
|
|
42
|
+
if self.verbose or not status:
|
|
43
|
+
if detail:
|
|
44
|
+
for line in detail.split('\n'):
|
|
45
|
+
print(f" {line}")
|
|
46
|
+
|
|
47
|
+
if status:
|
|
48
|
+
self.passed.append(name)
|
|
49
|
+
else:
|
|
50
|
+
self.failed.append((name, detail))
|
|
51
|
+
|
|
52
|
+
async def test_imports(self) -> bool:
|
|
53
|
+
"""Test that core framework modules load."""
|
|
54
|
+
test_start = asyncio.get_event_loop().time()
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
from jarviscore import Mesh
|
|
58
|
+
from jarviscore.profiles import AutoAgent
|
|
59
|
+
from dotenv import load_dotenv
|
|
60
|
+
|
|
61
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
62
|
+
self.print_test("Import Framework", True, "Core modules loaded", duration)
|
|
63
|
+
return True
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
67
|
+
self.print_test("Import Framework", False, str(e), duration)
|
|
68
|
+
return False
|
|
69
|
+
|
|
70
|
+
async def test_env_config(self) -> bool:
|
|
71
|
+
"""Test environment configuration."""
|
|
72
|
+
test_start = asyncio.get_event_loop().time()
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
import os
|
|
76
|
+
from dotenv import load_dotenv
|
|
77
|
+
|
|
78
|
+
# Load .env
|
|
79
|
+
env_paths = [Path.cwd() / '.env', Path.cwd() / 'jarviscore' / '.env']
|
|
80
|
+
env_loaded = False
|
|
81
|
+
|
|
82
|
+
for env_path in env_paths:
|
|
83
|
+
if env_path.exists():
|
|
84
|
+
load_dotenv(env_path)
|
|
85
|
+
env_loaded = True
|
|
86
|
+
break
|
|
87
|
+
|
|
88
|
+
# Check for at least one LLM configured
|
|
89
|
+
llm_providers = {
|
|
90
|
+
'CLAUDE_API_KEY': os.getenv('CLAUDE_API_KEY') or os.getenv('ANTHROPIC_API_KEY'),
|
|
91
|
+
'AZURE_API_KEY': os.getenv('AZURE_API_KEY') or os.getenv('AZURE_OPENAI_KEY'),
|
|
92
|
+
'GEMINI_API_KEY': os.getenv('GEMINI_API_KEY'),
|
|
93
|
+
'LLM_ENDPOINT': os.getenv('LLM_ENDPOINT'),
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
configured = [k for k, v in llm_providers.items() if v]
|
|
97
|
+
|
|
98
|
+
if not configured:
|
|
99
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
100
|
+
self.print_test(
|
|
101
|
+
"Configuration",
|
|
102
|
+
False,
|
|
103
|
+
"No LLM provider configured. Add API key to .env file.",
|
|
104
|
+
duration
|
|
105
|
+
)
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
109
|
+
provider_str = configured[0].replace('_', ' ').title()
|
|
110
|
+
self.print_test("Configuration", True, f"Using {provider_str}", duration)
|
|
111
|
+
return True
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
115
|
+
self.print_test("Configuration", False, str(e), duration)
|
|
116
|
+
return False
|
|
117
|
+
|
|
118
|
+
async def test_mesh_creation(self) -> bool:
|
|
119
|
+
"""Test mesh creation."""
|
|
120
|
+
test_start = asyncio.get_event_loop().time()
|
|
121
|
+
|
|
122
|
+
try:
|
|
123
|
+
from jarviscore import Mesh
|
|
124
|
+
|
|
125
|
+
mesh = Mesh(mode="autonomous")
|
|
126
|
+
|
|
127
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
128
|
+
self.print_test("Create Mesh", True, "Autonomous mode initialized", duration)
|
|
129
|
+
return True
|
|
130
|
+
|
|
131
|
+
except Exception as e:
|
|
132
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
133
|
+
self.print_test("Create Mesh", False, str(e), duration)
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
async def test_agent_definition(self) -> bool:
|
|
137
|
+
"""Test agent definition."""
|
|
138
|
+
test_start = asyncio.get_event_loop().time()
|
|
139
|
+
|
|
140
|
+
try:
|
|
141
|
+
from jarviscore.profiles import AutoAgent
|
|
142
|
+
|
|
143
|
+
class TestAgent(AutoAgent):
|
|
144
|
+
role = "calculator"
|
|
145
|
+
capabilities = ["math", "calculation"]
|
|
146
|
+
system_prompt = "You are a math expert. Generate Python code to solve problems."
|
|
147
|
+
|
|
148
|
+
agent = TestAgent()
|
|
149
|
+
|
|
150
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
151
|
+
self.print_test("Define Agent", True, "AutoAgent class created", duration)
|
|
152
|
+
return True
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
156
|
+
self.print_test("Define Agent", False, str(e), duration)
|
|
157
|
+
return False
|
|
158
|
+
|
|
159
|
+
async def test_end_to_end_execution(self) -> bool:
|
|
160
|
+
"""Test full workflow: prompt → code → result."""
|
|
161
|
+
test_start = asyncio.get_event_loop().time()
|
|
162
|
+
|
|
163
|
+
max_retries = 3
|
|
164
|
+
retry_delay = 2 # seconds
|
|
165
|
+
|
|
166
|
+
for attempt in range(max_retries):
|
|
167
|
+
try:
|
|
168
|
+
from jarviscore import Mesh
|
|
169
|
+
from jarviscore.profiles import AutoAgent
|
|
170
|
+
|
|
171
|
+
# Define agent
|
|
172
|
+
class CalculatorAgent(AutoAgent):
|
|
173
|
+
role = "calculator"
|
|
174
|
+
capabilities = ["math"]
|
|
175
|
+
system_prompt = "You are a math expert. Generate Python code. Store result in 'result' variable."
|
|
176
|
+
|
|
177
|
+
# Create mesh and add agent
|
|
178
|
+
mesh = Mesh(mode="autonomous")
|
|
179
|
+
mesh.add(CalculatorAgent)
|
|
180
|
+
|
|
181
|
+
# Start mesh
|
|
182
|
+
await mesh.start()
|
|
183
|
+
|
|
184
|
+
if self.verbose and attempt > 0:
|
|
185
|
+
print(f" Retry attempt {attempt + 1}/{max_retries}...")
|
|
186
|
+
|
|
187
|
+
# Execute simple task
|
|
188
|
+
task_start = asyncio.get_event_loop().time()
|
|
189
|
+
results = await mesh.workflow("smoke-test", [
|
|
190
|
+
{
|
|
191
|
+
"agent": "calculator",
|
|
192
|
+
"task": "Calculate 2 + 2"
|
|
193
|
+
}
|
|
194
|
+
])
|
|
195
|
+
|
|
196
|
+
task_duration = asyncio.get_event_loop().time() - task_start
|
|
197
|
+
|
|
198
|
+
# Validate result
|
|
199
|
+
result = results[0]
|
|
200
|
+
|
|
201
|
+
# Check for success (status can be 'success' or 'completed')
|
|
202
|
+
if result.get('status') not in ['success', 'completed']:
|
|
203
|
+
error_msg = result.get('error', 'Unknown error')
|
|
204
|
+
|
|
205
|
+
# Check if it's a retryable error (rate limit, overloaded, timeout)
|
|
206
|
+
if any(x in str(error_msg).lower() for x in ['overloaded', '529', 'rate limit', 'timeout']):
|
|
207
|
+
if attempt < max_retries - 1:
|
|
208
|
+
if self.verbose:
|
|
209
|
+
print(f" LLM temporarily unavailable, retrying in {retry_delay}s...")
|
|
210
|
+
await asyncio.sleep(retry_delay)
|
|
211
|
+
retry_delay *= 2 # Exponential backoff
|
|
212
|
+
await mesh.stop()
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
raise Exception(f"Task failed: {error_msg}")
|
|
216
|
+
|
|
217
|
+
output = result.get('output')
|
|
218
|
+
if output != 4:
|
|
219
|
+
raise Exception(f"Expected 4, got {output}")
|
|
220
|
+
|
|
221
|
+
# Stop mesh
|
|
222
|
+
await mesh.stop()
|
|
223
|
+
|
|
224
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
225
|
+
|
|
226
|
+
detail = f"Task: '2 + 2' → Result: {output}\n"
|
|
227
|
+
detail += f"Execution time: {task_duration:.2f}s\n"
|
|
228
|
+
detail += f"Repairs: {result.get('repairs', 0)}"
|
|
229
|
+
if attempt > 0:
|
|
230
|
+
detail += f"\nRetries: {attempt}"
|
|
231
|
+
|
|
232
|
+
self.print_test("End-to-End Workflow", True, detail, duration)
|
|
233
|
+
return True
|
|
234
|
+
|
|
235
|
+
except Exception as e:
|
|
236
|
+
# Check if this is the last attempt
|
|
237
|
+
if attempt == max_retries - 1:
|
|
238
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
239
|
+
|
|
240
|
+
error_detail = str(e)
|
|
241
|
+
if self.verbose:
|
|
242
|
+
import traceback
|
|
243
|
+
error_detail = traceback.format_exc()
|
|
244
|
+
|
|
245
|
+
self.print_test("End-to-End Workflow", False, error_detail, duration)
|
|
246
|
+
return False
|
|
247
|
+
|
|
248
|
+
# Otherwise, retry if it's a retryable error
|
|
249
|
+
error_str = str(e).lower()
|
|
250
|
+
if any(x in error_str for x in ['overloaded', '529', 'rate limit', 'timeout']):
|
|
251
|
+
if self.verbose:
|
|
252
|
+
print(f" LLM temporarily unavailable (attempt {attempt + 1}/{max_retries}), retrying in {retry_delay}s...")
|
|
253
|
+
await asyncio.sleep(retry_delay)
|
|
254
|
+
retry_delay *= 2
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
# Non-retryable error, fail immediately
|
|
258
|
+
duration = asyncio.get_event_loop().time() - test_start
|
|
259
|
+
error_detail = str(e)
|
|
260
|
+
if self.verbose:
|
|
261
|
+
import traceback
|
|
262
|
+
error_detail = traceback.format_exc()
|
|
263
|
+
self.print_test("End-to-End Workflow", False, error_detail, duration)
|
|
264
|
+
return False
|
|
265
|
+
|
|
266
|
+
return False
|
|
267
|
+
|
|
268
|
+
def print_summary(self):
|
|
269
|
+
"""Print test summary."""
|
|
270
|
+
print("\n" + "="*70)
|
|
271
|
+
print(" Summary")
|
|
272
|
+
print("="*70 + "\n")
|
|
273
|
+
|
|
274
|
+
total = len(self.passed) + len(self.failed)
|
|
275
|
+
duration = self.end_time - self.start_time if self.start_time and self.end_time else 0
|
|
276
|
+
|
|
277
|
+
print(f"Tests run: {total}")
|
|
278
|
+
print(f"Passed: {len(self.passed)} ✓")
|
|
279
|
+
print(f"Failed: {len(self.failed)} ✗")
|
|
280
|
+
print(f"Duration: {duration:.2f}s")
|
|
281
|
+
|
|
282
|
+
if self.failed:
|
|
283
|
+
print("\n" + "="*70)
|
|
284
|
+
print(" Failed Tests")
|
|
285
|
+
print("="*70 + "\n")
|
|
286
|
+
|
|
287
|
+
for name, detail in self.failed:
|
|
288
|
+
print(f"✗ {name}")
|
|
289
|
+
if detail:
|
|
290
|
+
print(f" {detail}\n")
|
|
291
|
+
|
|
292
|
+
print("\n" + "="*70)
|
|
293
|
+
print(" Troubleshooting")
|
|
294
|
+
print("="*70)
|
|
295
|
+
|
|
296
|
+
if any("No LLM provider" in detail for _, detail in self.failed):
|
|
297
|
+
print("\nNo LLM configured:")
|
|
298
|
+
print(" 1. Copy .env.example to .env")
|
|
299
|
+
print(" 2. Add your API key (CLAUDE_API_KEY, AZURE_API_KEY, or GEMINI_API_KEY)")
|
|
300
|
+
print(" 3. Run health check: python -m jarviscore.cli.check --validate-llm")
|
|
301
|
+
|
|
302
|
+
if any("Task failed" in detail or "error" in detail.lower() for _, detail in self.failed):
|
|
303
|
+
print("\nExecution failed:")
|
|
304
|
+
print(" 1. Check LLM API key is valid")
|
|
305
|
+
print(" 2. Test connectivity: python -m jarviscore.cli.check --validate-llm")
|
|
306
|
+
print(" 3. Check logs: ls -la logs/")
|
|
307
|
+
print(" 4. Run with verbose: python -m jarviscore.cli.smoketest --verbose")
|
|
308
|
+
|
|
309
|
+
print()
|
|
310
|
+
return False
|
|
311
|
+
|
|
312
|
+
print("\n✓ All smoke tests passed!")
|
|
313
|
+
print("\nJarvisCore is working correctly. Next steps:")
|
|
314
|
+
print(" 1. Try examples: python examples/calculator_agent_example.py")
|
|
315
|
+
print(" 2. Read user guide: docs/USER_GUIDE.md")
|
|
316
|
+
print(" 3. Build your first agent: docs/GETTING_STARTED.md")
|
|
317
|
+
print()
|
|
318
|
+
return True
|
|
319
|
+
|
|
320
|
+
async def run(self) -> bool:
|
|
321
|
+
"""Run all smoke tests."""
|
|
322
|
+
self.print_header()
|
|
323
|
+
self.start_time = asyncio.get_event_loop().time()
|
|
324
|
+
|
|
325
|
+
print("[Framework Tests]")
|
|
326
|
+
imports_ok = await self.test_imports()
|
|
327
|
+
if not imports_ok:
|
|
328
|
+
self.end_time = asyncio.get_event_loop().time()
|
|
329
|
+
self.print_summary()
|
|
330
|
+
return False
|
|
331
|
+
|
|
332
|
+
config_ok = await self.test_env_config()
|
|
333
|
+
if not config_ok:
|
|
334
|
+
self.end_time = asyncio.get_event_loop().time()
|
|
335
|
+
self.print_summary()
|
|
336
|
+
return False
|
|
337
|
+
|
|
338
|
+
mesh_ok = await self.test_mesh_creation()
|
|
339
|
+
agent_ok = await self.test_agent_definition()
|
|
340
|
+
|
|
341
|
+
print("\n[Integration Test]")
|
|
342
|
+
e2e_ok = await self.test_end_to_end_execution()
|
|
343
|
+
|
|
344
|
+
self.end_time = asyncio.get_event_loop().time()
|
|
345
|
+
|
|
346
|
+
return self.print_summary()
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
def main():
|
|
350
|
+
"""CLI entry point."""
|
|
351
|
+
import argparse
|
|
352
|
+
|
|
353
|
+
parser = argparse.ArgumentParser(
|
|
354
|
+
description='JarvisCore Smoke Test - Quick validation'
|
|
355
|
+
)
|
|
356
|
+
parser.add_argument(
|
|
357
|
+
'--verbose',
|
|
358
|
+
action='store_true',
|
|
359
|
+
help='Show detailed output and stack traces'
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
args = parser.parse_args()
|
|
363
|
+
|
|
364
|
+
smoke_test = SmokeTest(verbose=args.verbose)
|
|
365
|
+
success = asyncio.run(smoke_test.run())
|
|
366
|
+
|
|
367
|
+
sys.exit(0 if success else 1)
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
if __name__ == '__main__':
|
|
371
|
+
main()
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JarvisCore Framework Configuration
|
|
3
|
+
|
|
4
|
+
Zero-config with standard environment variables (no prefix needed).
|
|
5
|
+
|
|
6
|
+
Configuration can be provided via:
|
|
7
|
+
1. Standard environment variables (CLAUDE_API_KEY, AZURE_OPENAI_KEY, etc.)
|
|
8
|
+
2. .env file
|
|
9
|
+
3. Direct config dictionary passed to Mesh
|
|
10
|
+
|
|
11
|
+
Example:
|
|
12
|
+
# Via environment (standard names)
|
|
13
|
+
export CLAUDE_API_KEY="sk-..."
|
|
14
|
+
export AZURE_OPENAI_KEY="..."
|
|
15
|
+
export BIND_HOST="0.0.0.0"
|
|
16
|
+
export BIND_PORT=7946
|
|
17
|
+
|
|
18
|
+
# Via config dict
|
|
19
|
+
config = {
|
|
20
|
+
'bind_host': '0.0.0.0',
|
|
21
|
+
'bind_port': 7946,
|
|
22
|
+
'seed_nodes': '192.168.1.100:7946'
|
|
23
|
+
}
|
|
24
|
+
mesh = Mesh(mode="distributed", config=config)
|
|
25
|
+
"""
|
|
26
|
+
import os
|
|
27
|
+
from typing import Optional
|
|
28
|
+
from pydantic_settings import BaseSettings
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Settings(BaseSettings):
|
|
32
|
+
"""
|
|
33
|
+
Framework configuration with zero-config defaults.
|
|
34
|
+
Uses standard environment variable names (no JARVISCORE_ prefix).
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# === P2P Settings ===
|
|
38
|
+
node_name: str = "jarviscore-node"
|
|
39
|
+
bind_host: str = "127.0.0.1"
|
|
40
|
+
bind_port: int = 7946
|
|
41
|
+
seed_nodes: str = "" # Comma-separated "host:port,host:port"
|
|
42
|
+
p2p_enabled: bool = True
|
|
43
|
+
zmq_port_offset: int = 1000
|
|
44
|
+
transport_type: str = "hybrid" # udp, tcp, or hybrid
|
|
45
|
+
|
|
46
|
+
# === Keepalive Settings ===
|
|
47
|
+
keepalive_enabled: bool = True
|
|
48
|
+
keepalive_interval: int = 90 # seconds
|
|
49
|
+
keepalive_timeout: int = 10
|
|
50
|
+
activity_suppress_window: int = 60
|
|
51
|
+
|
|
52
|
+
# === Execution Settings ===
|
|
53
|
+
max_retries: int = 3
|
|
54
|
+
max_repair_attempts: int = 3
|
|
55
|
+
execution_timeout: int = 300 # seconds
|
|
56
|
+
|
|
57
|
+
# === Sandbox Settings ===
|
|
58
|
+
sandbox_mode: str = "local" # "local" or "remote"
|
|
59
|
+
sandbox_service_url: Optional[str] = None # URL for remote sandbox
|
|
60
|
+
|
|
61
|
+
# === Storage Settings ===
|
|
62
|
+
log_directory: str = "./logs"
|
|
63
|
+
|
|
64
|
+
# === LLM Configuration ===
|
|
65
|
+
llm_timeout: float = 120.0
|
|
66
|
+
llm_temperature: float = 0.7
|
|
67
|
+
|
|
68
|
+
# Claude
|
|
69
|
+
claude_api_key: Optional[str] = None
|
|
70
|
+
claude_endpoint: Optional[str] = None
|
|
71
|
+
claude_model: str = "claude-sonnet-4"
|
|
72
|
+
anthropic_api_key: Optional[str] = None # Alias for claude_api_key
|
|
73
|
+
|
|
74
|
+
# Azure OpenAI
|
|
75
|
+
azure_api_key: Optional[str] = None
|
|
76
|
+
azure_openai_key: Optional[str] = None # Alias
|
|
77
|
+
azure_endpoint: Optional[str] = None
|
|
78
|
+
azure_openai_endpoint: Optional[str] = None # Alias
|
|
79
|
+
azure_deployment: str = "gpt-4o"
|
|
80
|
+
azure_api_version: str = "2024-02-15-preview"
|
|
81
|
+
|
|
82
|
+
# Gemini
|
|
83
|
+
gemini_api_key: Optional[str] = None
|
|
84
|
+
gemini_model: str = "gemini-1.5-flash"
|
|
85
|
+
gemini_temperature: float = 0.1
|
|
86
|
+
gemini_timeout: float = 30.0
|
|
87
|
+
|
|
88
|
+
# vLLM
|
|
89
|
+
llm_endpoint: Optional[str] = None
|
|
90
|
+
vllm_endpoint: Optional[str] = None # Alias
|
|
91
|
+
llm_model: str = "default"
|
|
92
|
+
|
|
93
|
+
# === Logging ===
|
|
94
|
+
log_level: str = "INFO"
|
|
95
|
+
|
|
96
|
+
class Config:
|
|
97
|
+
env_file = ".env"
|
|
98
|
+
env_file_encoding = "utf-8"
|
|
99
|
+
case_sensitive = False
|
|
100
|
+
extra = "ignore"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def get_config_from_dict(config_dict: Optional[dict] = None) -> dict:
|
|
104
|
+
"""
|
|
105
|
+
Get configuration from dictionary or environment.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
config_dict: Optional configuration dictionary
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Configuration dictionary with defaults applied
|
|
112
|
+
"""
|
|
113
|
+
# Load from environment first
|
|
114
|
+
try:
|
|
115
|
+
base_config = settings.model_dump()
|
|
116
|
+
except Exception:
|
|
117
|
+
# If pydantic fails, use manual defaults
|
|
118
|
+
base_config = {}
|
|
119
|
+
|
|
120
|
+
# Override with provided config
|
|
121
|
+
if config_dict:
|
|
122
|
+
base_config.update(config_dict)
|
|
123
|
+
|
|
124
|
+
return base_config
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# Global settings instance - loads from .env automatically
|
|
128
|
+
settings = Settings()
|
jarviscore/core/agent.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent base class - defines WHAT an agent does (role, capabilities).
|
|
3
|
+
|
|
4
|
+
This is the foundation of the JarvisCore framework. All agents inherit from this class.
|
|
5
|
+
"""
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from typing import List, Dict, Any, Optional
|
|
8
|
+
from uuid import uuid4
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Agent(ABC):
|
|
15
|
+
"""
|
|
16
|
+
Base class for all agents in JarvisCore framework.
|
|
17
|
+
|
|
18
|
+
Agents define WHAT they do via class attributes:
|
|
19
|
+
- role: The agent's role identifier
|
|
20
|
+
- capabilities: List of capabilities this agent provides
|
|
21
|
+
|
|
22
|
+
Subclasses (Profiles) define HOW they execute tasks.
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
class MyAgent(PromptDevAgent):
|
|
26
|
+
role = "scraper"
|
|
27
|
+
capabilities = ["web_scraping", "data_extraction"]
|
|
28
|
+
system_prompt = "You are an expert web scraper..."
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
# Class attributes - user must define these
|
|
32
|
+
role: str = None
|
|
33
|
+
capabilities: List[str] = []
|
|
34
|
+
|
|
35
|
+
def __init__(self, agent_id: Optional[str] = None):
|
|
36
|
+
"""
|
|
37
|
+
Initialize agent with validation.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
agent_id: Optional unique identifier (auto-generated if not provided)
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
ValueError: If role or capabilities are not defined
|
|
44
|
+
"""
|
|
45
|
+
# Validate required attributes
|
|
46
|
+
if not self.role:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
f"{self.__class__.__name__} must define 'role' class attribute\n"
|
|
49
|
+
f"Example: role = 'scraper'"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if not self.capabilities:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"{self.__class__.__name__} must define 'capabilities' class attribute\n"
|
|
55
|
+
f"Example: capabilities = ['web_scraping']"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Initialize instance attributes
|
|
59
|
+
self.agent_id = agent_id or f"{self.role}-{uuid4().hex[:8]}"
|
|
60
|
+
self._mesh = None # Set by Mesh when agent is added
|
|
61
|
+
self._logger = logging.getLogger(f"jarviscore.agent.{self.agent_id}")
|
|
62
|
+
|
|
63
|
+
self._logger.debug(f"Agent initialized: {self.agent_id}")
|
|
64
|
+
|
|
65
|
+
@abstractmethod
|
|
66
|
+
async def execute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
67
|
+
"""
|
|
68
|
+
Execute a task (implemented by profile subclasses).
|
|
69
|
+
|
|
70
|
+
This defines HOW the agent executes tasks. Different profiles implement
|
|
71
|
+
this differently:
|
|
72
|
+
- PromptDevAgent: LLM code generation + sandbox execution
|
|
73
|
+
- MCPAgent: User-defined MCP tool calls
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
task: Task specification containing:
|
|
77
|
+
- task (str): Task description
|
|
78
|
+
- id (str): Task identifier
|
|
79
|
+
- params (dict, optional): Additional parameters
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Result dictionary containing:
|
|
83
|
+
- status (str): "success" or "failure"
|
|
84
|
+
- output (Any): Task output
|
|
85
|
+
- error (str, optional): Error message if failed
|
|
86
|
+
- tokens_used (int, optional): LLM tokens consumed
|
|
87
|
+
- cost_usd (float, optional): Cost in USD
|
|
88
|
+
|
|
89
|
+
Raises:
|
|
90
|
+
NotImplementedError: If subclass doesn't implement this method
|
|
91
|
+
"""
|
|
92
|
+
raise NotImplementedError(
|
|
93
|
+
f"{self.__class__.__name__} must implement execute_task()"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
async def setup(self):
|
|
97
|
+
"""
|
|
98
|
+
Optional setup hook called when agent joins mesh.
|
|
99
|
+
|
|
100
|
+
Override this to perform initialization:
|
|
101
|
+
- Connect to external services
|
|
102
|
+
- Load models
|
|
103
|
+
- Setup resources
|
|
104
|
+
|
|
105
|
+
Example:
|
|
106
|
+
async def setup(self):
|
|
107
|
+
await super().setup()
|
|
108
|
+
self.db = await connect_to_database()
|
|
109
|
+
"""
|
|
110
|
+
self._logger.info(f"Setting up agent: {self.agent_id}")
|
|
111
|
+
|
|
112
|
+
async def teardown(self):
|
|
113
|
+
"""
|
|
114
|
+
Optional cleanup hook called when agent leaves mesh.
|
|
115
|
+
|
|
116
|
+
Override this to cleanup resources:
|
|
117
|
+
- Close connections
|
|
118
|
+
- Save state
|
|
119
|
+
- Release resources
|
|
120
|
+
|
|
121
|
+
Example:
|
|
122
|
+
async def teardown(self):
|
|
123
|
+
await self.db.close()
|
|
124
|
+
await super().teardown()
|
|
125
|
+
"""
|
|
126
|
+
self._logger.info(f"Tearing down agent: {self.agent_id}")
|
|
127
|
+
|
|
128
|
+
def can_handle(self, task: Dict[str, Any]) -> bool:
|
|
129
|
+
"""
|
|
130
|
+
Check if agent can handle a task based on capabilities.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
task: Task specification with 'capability' or 'role' key
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
True if agent has the required capability
|
|
137
|
+
|
|
138
|
+
Example:
|
|
139
|
+
task = {"task": "Scrape website", "role": "scraper"}
|
|
140
|
+
if agent.can_handle(task):
|
|
141
|
+
result = await agent.execute_task(task)
|
|
142
|
+
"""
|
|
143
|
+
required = task.get("capability") or task.get("role")
|
|
144
|
+
can_handle = required in self.capabilities or required == self.role
|
|
145
|
+
|
|
146
|
+
self._logger.debug(
|
|
147
|
+
f"Can handle task requiring '{required}': {can_handle}"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
return can_handle
|
|
151
|
+
|
|
152
|
+
def __repr__(self) -> str:
|
|
153
|
+
"""String representation of agent."""
|
|
154
|
+
return (
|
|
155
|
+
f"<{self.__class__.__name__} "
|
|
156
|
+
f"id={self.agent_id} "
|
|
157
|
+
f"role={self.role} "
|
|
158
|
+
f"capabilities={self.capabilities}>"
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
def __str__(self) -> str:
|
|
162
|
+
"""Human-readable string representation."""
|
|
163
|
+
return f"{self.role} ({self.agent_id})"
|