jarviscore-framework 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +3 -3
- examples/{listeneragent_cognitive_discovery_example.py → customagent_cognitive_discovery_example.py} +55 -14
- examples/customagent_distributed_example.py +140 -1
- examples/fastapi_integration_example.py +74 -11
- jarviscore/__init__.py +8 -11
- jarviscore/cli/smoketest.py +1 -1
- jarviscore/core/mesh.py +158 -0
- jarviscore/data/examples/cloud_deployment_example.py +3 -3
- jarviscore/data/examples/custom_profile_decorator.py +134 -0
- jarviscore/data/examples/custom_profile_wrap.py +168 -0
- jarviscore/data/examples/{listeneragent_cognitive_discovery_example.py → customagent_cognitive_discovery_example.py} +55 -14
- jarviscore/data/examples/customagent_distributed_example.py +140 -1
- jarviscore/data/examples/fastapi_integration_example.py +74 -11
- jarviscore/docs/API_REFERENCE.md +576 -47
- jarviscore/docs/CHANGELOG.md +131 -0
- jarviscore/docs/CONFIGURATION.md +1 -1
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +591 -153
- jarviscore/docs/GETTING_STARTED.md +186 -329
- jarviscore/docs/TROUBLESHOOTING.md +1 -1
- jarviscore/docs/USER_GUIDE.md +292 -12
- jarviscore/integrations/fastapi.py +4 -4
- jarviscore/p2p/coordinator.py +36 -7
- jarviscore/p2p/messages.py +13 -0
- jarviscore/p2p/peer_client.py +380 -21
- jarviscore/p2p/peer_tool.py +17 -11
- jarviscore/profiles/__init__.py +2 -4
- jarviscore/profiles/customagent.py +302 -74
- jarviscore/testing/__init__.py +35 -0
- jarviscore/testing/mocks.py +578 -0
- {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/METADATA +61 -46
- {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/RECORD +42 -34
- tests/test_13_dx_improvements.py +37 -37
- tests/test_15_llm_cognitive_discovery.py +18 -18
- tests/test_16_unified_dx_flow.py +3 -3
- tests/test_17_session_context.py +489 -0
- tests/test_18_mesh_diagnostics.py +465 -0
- tests/test_19_async_requests.py +516 -0
- tests/test_20_load_balancing.py +546 -0
- tests/test_21_mock_testing.py +776 -0
- jarviscore/profiles/listeneragent.py +0 -292
- {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/WHEEL +0 -0
- {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/top_level.txt +0 -0
|
@@ -28,10 +28,10 @@ import sys
|
|
|
28
28
|
|
|
29
29
|
sys.path.insert(0, '.')
|
|
30
30
|
|
|
31
|
-
from jarviscore.profiles import
|
|
31
|
+
from jarviscore.profiles import CustomAgent
|
|
32
32
|
|
|
33
33
|
|
|
34
|
-
class StandaloneProcessor(
|
|
34
|
+
class StandaloneProcessor(CustomAgent):
|
|
35
35
|
"""
|
|
36
36
|
Example standalone agent that joins mesh independently.
|
|
37
37
|
|
|
@@ -143,7 +143,7 @@ async def main():
|
|
|
143
143
|
print("Listening for peer requests...")
|
|
144
144
|
print("Press Ctrl+C to stop.\n")
|
|
145
145
|
|
|
146
|
-
# Run agent (
|
|
146
|
+
# Run agent (CustomAgent's run() handles the message loop)
|
|
147
147
|
try:
|
|
148
148
|
await agent.run()
|
|
149
149
|
except asyncio.CancelledError:
|
examples/{listeneragent_cognitive_discovery_example.py → customagent_cognitive_discovery_example.py}
RENAMED
|
@@ -1,42 +1,47 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
CustomAgent + Cognitive Discovery Example
|
|
3
3
|
|
|
4
|
-
Demonstrates
|
|
4
|
+
Demonstrates v0.3.0 and v0.3.2 features:
|
|
5
5
|
|
|
6
|
-
1.
|
|
6
|
+
1. CustomAgent - Handler-based P2P agents (no run() loop needed)
|
|
7
7
|
- on_peer_request() handles incoming requests
|
|
8
8
|
- on_peer_notify() handles broadcast notifications
|
|
9
9
|
|
|
10
|
-
2. Cognitive Discovery - Dynamic peer awareness for LLMs
|
|
10
|
+
2. Cognitive Discovery (v0.3.0) - Dynamic peer awareness for LLMs
|
|
11
11
|
- get_cognitive_context() generates LLM-ready peer descriptions
|
|
12
12
|
- No hardcoded agent names in prompts
|
|
13
13
|
- LLM autonomously decides when to delegate
|
|
14
14
|
|
|
15
|
+
3. Session Context (v0.3.2) - Request tracking with metadata
|
|
16
|
+
- Pass context={mission_id, request_id} with peer requests
|
|
17
|
+
- Track requests across agent boundaries for debugging/tracing
|
|
18
|
+
|
|
15
19
|
Usage:
|
|
16
|
-
python examples/
|
|
20
|
+
python examples/customagent_cognitive_discovery_example.py
|
|
17
21
|
|
|
18
22
|
Prerequisites:
|
|
19
23
|
- .env file with CLAUDE_API_KEY (or other LLM provider)
|
|
20
24
|
"""
|
|
21
25
|
import asyncio
|
|
22
26
|
import sys
|
|
27
|
+
import uuid
|
|
23
28
|
from pathlib import Path
|
|
24
29
|
|
|
25
30
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
26
31
|
|
|
27
32
|
from jarviscore import Mesh
|
|
28
|
-
from jarviscore.profiles import
|
|
33
|
+
from jarviscore.profiles import CustomAgent
|
|
29
34
|
|
|
30
35
|
|
|
31
36
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
32
37
|
# SPECIALIST AGENT - Responds to requests from other agents
|
|
33
38
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
34
39
|
|
|
35
|
-
class AnalystAgent(
|
|
40
|
+
class AnalystAgent(CustomAgent):
|
|
36
41
|
"""
|
|
37
42
|
Specialist agent that handles analysis requests.
|
|
38
43
|
|
|
39
|
-
Uses
|
|
44
|
+
Uses CustomAgent profile - just implement handlers, no run() loop needed.
|
|
40
45
|
"""
|
|
41
46
|
role = "analyst"
|
|
42
47
|
capabilities = ["data_analysis", "statistics", "insights"]
|
|
@@ -45,13 +50,21 @@ class AnalystAgent(ListenerAgent):
|
|
|
45
50
|
async def on_peer_request(self, msg):
|
|
46
51
|
"""Handle incoming analysis requests."""
|
|
47
52
|
query = msg.data.get("question", msg.data.get("query", ""))
|
|
48
|
-
|
|
53
|
+
|
|
54
|
+
# v0.3.2: Access session context for request tracking
|
|
55
|
+
context = msg.context or {}
|
|
56
|
+
mission_id = context.get("mission_id", "unknown")
|
|
57
|
+
request_id = context.get("request_id", "unknown")
|
|
58
|
+
|
|
59
|
+
print(f"\n[Analyst] Received request (mission={mission_id[:8]}..., req={request_id[:8]}...)")
|
|
60
|
+
print(f"[Analyst] Query: {query[:50]}...")
|
|
49
61
|
|
|
50
62
|
# Simulate analysis (in real usage, this would use an LLM)
|
|
51
63
|
result = {
|
|
52
64
|
"analysis": f"Analysis of '{query}': The data shows positive trends.",
|
|
53
65
|
"confidence": 0.85,
|
|
54
|
-
"insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"]
|
|
66
|
+
"insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"],
|
|
67
|
+
"context": {"mission_id": mission_id, "request_id": request_id} # Echo back for tracing
|
|
55
68
|
}
|
|
56
69
|
|
|
57
70
|
print(f"[Analyst] Sending response with {len(result['insights'])} insights")
|
|
@@ -62,7 +75,7 @@ class AnalystAgent(ListenerAgent):
|
|
|
62
75
|
# COORDINATOR AGENT - Uses LLM with cognitive discovery
|
|
63
76
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
64
77
|
|
|
65
|
-
class CoordinatorAgent(
|
|
78
|
+
class CoordinatorAgent(CustomAgent):
|
|
66
79
|
"""
|
|
67
80
|
Coordinator agent that uses LLM with dynamic peer discovery.
|
|
68
81
|
|
|
@@ -78,6 +91,9 @@ class CoordinatorAgent(ListenerAgent):
|
|
|
78
91
|
async def setup(self):
|
|
79
92
|
await super().setup()
|
|
80
93
|
self.llm = self._create_llm_client()
|
|
94
|
+
# v0.3.2: Track missions for context propagation
|
|
95
|
+
self.mission_id = str(uuid.uuid4())
|
|
96
|
+
self.request_counter = 0
|
|
81
97
|
|
|
82
98
|
def _create_llm_client(self):
|
|
83
99
|
"""Create LLM client with fallback to mock."""
|
|
@@ -167,10 +183,22 @@ Never try to do analysis yourself - always delegate to the analyst."""
|
|
|
167
183
|
# Mock: simulate LLM deciding to delegate
|
|
168
184
|
if any(word in user_query.lower() for word in ["analyze", "analysis", "statistics", "data"]):
|
|
169
185
|
print("[Coordinator] Mock LLM decides to delegate to analyst")
|
|
186
|
+
|
|
187
|
+
# v0.3.2: Generate request context for tracking
|
|
188
|
+
self.request_counter += 1
|
|
189
|
+
request_context = {
|
|
190
|
+
"mission_id": self.mission_id,
|
|
191
|
+
"request_id": str(uuid.uuid4()),
|
|
192
|
+
"request_num": self.request_counter,
|
|
193
|
+
"source": "coordinator"
|
|
194
|
+
}
|
|
195
|
+
print(f"[Coordinator] Sending with context: mission={self.mission_id[:8]}...")
|
|
196
|
+
|
|
170
197
|
response = await self.peers.request(
|
|
171
198
|
"analyst",
|
|
172
199
|
{"question": user_query},
|
|
173
|
-
timeout=30
|
|
200
|
+
timeout=30,
|
|
201
|
+
context=request_context # v0.3.2: Pass context
|
|
174
202
|
)
|
|
175
203
|
return f"Based on the analyst's findings: {response.get('analysis', 'No response')}"
|
|
176
204
|
return f"I can help with: {user_query}"
|
|
@@ -261,12 +289,24 @@ Never try to do analysis yourself - always delegate to the analyst."""
|
|
|
261
289
|
role = args.get("role", "")
|
|
262
290
|
question = args.get("question", "")
|
|
263
291
|
|
|
292
|
+
# v0.3.2: Generate request context for tracking
|
|
293
|
+
self.request_counter += 1
|
|
294
|
+
request_context = {
|
|
295
|
+
"mission_id": self.mission_id,
|
|
296
|
+
"request_id": str(uuid.uuid4()),
|
|
297
|
+
"request_num": self.request_counter,
|
|
298
|
+
"source": "coordinator",
|
|
299
|
+
"tool": "ask_peer"
|
|
300
|
+
}
|
|
301
|
+
|
|
264
302
|
print(f"[Coordinator] Asking {role}: {question[:50]}...")
|
|
303
|
+
print(f"[Coordinator] Context: mission={self.mission_id[:8]}..., req_num={self.request_counter}")
|
|
265
304
|
|
|
266
305
|
response = await self.peers.request(
|
|
267
306
|
role,
|
|
268
307
|
{"question": question},
|
|
269
|
-
timeout=30
|
|
308
|
+
timeout=30,
|
|
309
|
+
context=request_context # v0.3.2: Pass context
|
|
270
310
|
)
|
|
271
311
|
|
|
272
312
|
return response
|
|
@@ -284,7 +324,8 @@ Never try to do analysis yourself - always delegate to the analyst."""
|
|
|
284
324
|
|
|
285
325
|
async def main():
|
|
286
326
|
print("=" * 60)
|
|
287
|
-
print("
|
|
327
|
+
print("CustomAgent + Cognitive Discovery + Session Context")
|
|
328
|
+
print("Features: v0.3.0 Cognitive Discovery, v0.3.2 Session Context")
|
|
288
329
|
print("=" * 60)
|
|
289
330
|
|
|
290
331
|
# Create mesh with both agents
|
|
@@ -6,6 +6,10 @@ Demonstrates CustomAgent in distributed mode, which combines:
|
|
|
6
6
|
- Workflow orchestration (step execution, dependencies)
|
|
7
7
|
- User-controlled execution logic (you write execute_task)
|
|
8
8
|
|
|
9
|
+
v0.3.2 Features Demonstrated:
|
|
10
|
+
- Async Requests (ask_async) - Non-blocking parallel requests to multiple agents
|
|
11
|
+
- Load Balancing (strategy="round_robin") - Distribute requests across agent instances
|
|
12
|
+
|
|
9
13
|
This is ideal for:
|
|
10
14
|
- Multi-node deployments with custom logic
|
|
11
15
|
- Integrating external frameworks (LangChain, CrewAI, etc.)
|
|
@@ -231,6 +235,7 @@ async def main():
|
|
|
231
235
|
"""Run CustomAgent distributed mode example."""
|
|
232
236
|
print("\n" + "="*70)
|
|
233
237
|
print("JarvisCore: CustomAgent in Distributed Mode")
|
|
238
|
+
print("v0.3.2: Also supports --async and --load-balance demos")
|
|
234
239
|
print("="*70)
|
|
235
240
|
|
|
236
241
|
# ─────────────────────────────────────────────────────────────────────────
|
|
@@ -358,5 +363,139 @@ async def peer_communication_example():
|
|
|
358
363
|
pass
|
|
359
364
|
|
|
360
365
|
|
|
366
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
367
|
+
# v0.3.2 FEATURES: ASYNC REQUESTS & LOAD BALANCING
|
|
368
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
369
|
+
|
|
370
|
+
async def async_requests_demo():
|
|
371
|
+
"""
|
|
372
|
+
Demonstrate v0.3.2 async requests for parallel agent communication.
|
|
373
|
+
|
|
374
|
+
ask_async() returns a Future that can be awaited later, enabling:
|
|
375
|
+
- Fire multiple requests in parallel
|
|
376
|
+
- Continue other work while waiting
|
|
377
|
+
- Gather results when needed
|
|
378
|
+
"""
|
|
379
|
+
print("\n" + "="*70)
|
|
380
|
+
print("v0.3.2 Feature: Async Requests (ask_async)")
|
|
381
|
+
print("="*70)
|
|
382
|
+
|
|
383
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7966})
|
|
384
|
+
|
|
385
|
+
# Add multiple agents
|
|
386
|
+
mesh.add(ContentResearcherAgent)
|
|
387
|
+
mesh.add(ContentWriterAgent)
|
|
388
|
+
mesh.add(ContentReviewerAgent)
|
|
389
|
+
|
|
390
|
+
try:
|
|
391
|
+
await mesh.start()
|
|
392
|
+
|
|
393
|
+
# Get an agent with peer access
|
|
394
|
+
researcher = next((a for a in mesh.agents if a.role == "content_researcher"), None)
|
|
395
|
+
if not researcher or not researcher.peers:
|
|
396
|
+
print("Peers not available")
|
|
397
|
+
return
|
|
398
|
+
|
|
399
|
+
print("\n[Demo] Firing parallel requests to multiple agents...")
|
|
400
|
+
|
|
401
|
+
# v0.3.2: ask_async returns a Future - doesn't block!
|
|
402
|
+
future1 = researcher.peers.ask_async(
|
|
403
|
+
"content_writer",
|
|
404
|
+
{"question": "What makes good technical writing?"}
|
|
405
|
+
)
|
|
406
|
+
future2 = researcher.peers.ask_async(
|
|
407
|
+
"content_reviewer",
|
|
408
|
+
{"question": "What are common writing mistakes?"}
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
print("[Demo] Requests sent! Doing other work while waiting...")
|
|
412
|
+
await asyncio.sleep(0.1) # Simulate other work
|
|
413
|
+
|
|
414
|
+
# Gather results when ready
|
|
415
|
+
print("[Demo] Gathering results...")
|
|
416
|
+
results = await asyncio.gather(future1, future2, return_exceptions=True)
|
|
417
|
+
|
|
418
|
+
for i, result in enumerate(results):
|
|
419
|
+
if isinstance(result, Exception):
|
|
420
|
+
print(f" Request {i+1}: Error - {result}")
|
|
421
|
+
else:
|
|
422
|
+
print(f" Request {i+1}: Got response")
|
|
423
|
+
|
|
424
|
+
print("\n[Demo] Async requests complete!")
|
|
425
|
+
|
|
426
|
+
finally:
|
|
427
|
+
await mesh.stop()
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
async def load_balancing_demo():
|
|
431
|
+
"""
|
|
432
|
+
Demonstrate v0.3.2 load balancing strategies.
|
|
433
|
+
|
|
434
|
+
When multiple agents have the same capability, use strategy parameter:
|
|
435
|
+
- "random" (default): Random selection
|
|
436
|
+
- "round_robin": Distribute evenly across instances
|
|
437
|
+
"""
|
|
438
|
+
print("\n" + "="*70)
|
|
439
|
+
print("v0.3.2 Feature: Load Balancing Strategies")
|
|
440
|
+
print("="*70)
|
|
441
|
+
|
|
442
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7967})
|
|
443
|
+
|
|
444
|
+
# Add agents
|
|
445
|
+
mesh.add(ContentResearcherAgent)
|
|
446
|
+
mesh.add(ContentWriterAgent)
|
|
447
|
+
|
|
448
|
+
try:
|
|
449
|
+
await mesh.start()
|
|
450
|
+
|
|
451
|
+
researcher = next((a for a in mesh.agents if a.role == "content_researcher"), None)
|
|
452
|
+
if not researcher or not researcher.peers:
|
|
453
|
+
print("Peers not available")
|
|
454
|
+
return
|
|
455
|
+
|
|
456
|
+
print("\n[Demo] Load balancing with strategy='round_robin'")
|
|
457
|
+
print("[Demo] Sending 3 requests to 'writing' capability...")
|
|
458
|
+
|
|
459
|
+
# v0.3.2: Use discover_one() with strategy for load balancing
|
|
460
|
+
for i in range(3):
|
|
461
|
+
# round_robin distributes requests evenly across matching peers
|
|
462
|
+
# First, discover which peer to use with the strategy
|
|
463
|
+
target = researcher.peers.discover_one(
|
|
464
|
+
role="content_writer",
|
|
465
|
+
strategy="round_robin" # v0.3.2: Load balancing
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
if target:
|
|
469
|
+
# Then make the request to that specific peer
|
|
470
|
+
response = await researcher.peers.request(
|
|
471
|
+
target.role,
|
|
472
|
+
{"question": f"Request #{i+1}"},
|
|
473
|
+
timeout=10
|
|
474
|
+
)
|
|
475
|
+
print(f" Request {i+1}: Handled by {target.agent_id[:8]}...")
|
|
476
|
+
else:
|
|
477
|
+
print(f" Request {i+1}: No peer found")
|
|
478
|
+
|
|
479
|
+
print("\n[Demo] Load balancing complete!")
|
|
480
|
+
print("[Demo] In a multi-node setup with multiple writers,")
|
|
481
|
+
print(" round_robin would distribute across all instances.")
|
|
482
|
+
|
|
483
|
+
finally:
|
|
484
|
+
await mesh.stop()
|
|
485
|
+
|
|
486
|
+
|
|
361
487
|
if __name__ == "__main__":
|
|
362
|
-
|
|
488
|
+
import sys
|
|
489
|
+
|
|
490
|
+
if len(sys.argv) > 1:
|
|
491
|
+
if sys.argv[1] == "--async":
|
|
492
|
+
asyncio.run(async_requests_demo())
|
|
493
|
+
elif sys.argv[1] == "--load-balance":
|
|
494
|
+
asyncio.run(load_balancing_demo())
|
|
495
|
+
else:
|
|
496
|
+
print("Usage:")
|
|
497
|
+
print(" python customagent_distributed_example.py # Main workflow demo")
|
|
498
|
+
print(" python customagent_distributed_example.py --async # Async requests demo")
|
|
499
|
+
print(" python customagent_distributed_example.py --load-balance # Load balancing demo")
|
|
500
|
+
else:
|
|
501
|
+
asyncio.run(main())
|
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
"""
|
|
2
|
-
FastAPI Integration Example (v0.3.0)
|
|
2
|
+
FastAPI Integration Example (v0.3.0 + v0.3.2)
|
|
3
3
|
|
|
4
4
|
Demonstrates JarvisLifespan for 3-line FastAPI integration with autonomous agents.
|
|
5
5
|
|
|
6
6
|
Features shown:
|
|
7
7
|
1. JarvisLifespan - Automatic agent lifecycle management
|
|
8
|
-
2.
|
|
8
|
+
2. CustomAgent - API-first agents with on_peer_request handlers
|
|
9
9
|
3. Cognitive Discovery - get_cognitive_context() for LLM awareness
|
|
10
10
|
4. Autonomous Agents - Each agent has MESH as a TOOL, LLM decides when to delegate
|
|
11
|
+
5. Mesh Diagnostics (v0.3.2) - /health endpoint using get_diagnostics()
|
|
12
|
+
6. Session Context (v0.3.2) - Request tracking with context parameter
|
|
11
13
|
|
|
12
14
|
Real-World Flow:
|
|
13
15
|
HTTP Request → Agent A (with LLM) → LLM sees peers as tools
|
|
@@ -22,6 +24,9 @@ Usage:
|
|
|
22
24
|
-H "Content-Type: application/json" \
|
|
23
25
|
-d '{"message": "Analyze the Q4 sales trends"}'
|
|
24
26
|
|
|
27
|
+
# Check mesh health (v0.3.2)
|
|
28
|
+
curl http://localhost:8000/health
|
|
29
|
+
|
|
25
30
|
# Optional: Start a standalone agent that joins the mesh (in another terminal)
|
|
26
31
|
python examples/fastapi_integration_example.py --join-as scout
|
|
27
32
|
|
|
@@ -32,6 +37,7 @@ Prerequisites:
|
|
|
32
37
|
import asyncio
|
|
33
38
|
import sys
|
|
34
39
|
import os
|
|
40
|
+
import uuid
|
|
35
41
|
from pathlib import Path
|
|
36
42
|
|
|
37
43
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
@@ -45,14 +51,14 @@ except ImportError:
|
|
|
45
51
|
FASTAPI_AVAILABLE = False
|
|
46
52
|
print("FastAPI not installed. Run: pip install fastapi uvicorn")
|
|
47
53
|
|
|
48
|
-
from jarviscore.profiles import
|
|
54
|
+
from jarviscore.profiles import CustomAgent
|
|
49
55
|
|
|
50
56
|
|
|
51
57
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
52
58
|
# LLM-POWERED AGENT BASE - Each agent can discover and delegate
|
|
53
59
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
54
60
|
|
|
55
|
-
class LLMAgent(
|
|
61
|
+
class LLMAgent(CustomAgent):
|
|
56
62
|
"""
|
|
57
63
|
Base for LLM-powered agents that can discover and delegate to peers.
|
|
58
64
|
|
|
@@ -104,14 +110,27 @@ class LLMAgent(ListenerAgent):
|
|
|
104
110
|
}
|
|
105
111
|
}]
|
|
106
112
|
|
|
107
|
-
async def _ask_peer(self, role: str, question: str) -> dict:
|
|
113
|
+
async def _ask_peer(self, role: str, question: str, request_id: str = None) -> dict:
|
|
108
114
|
"""Execute ask_peer tool - send request to another agent."""
|
|
109
115
|
print(f"[{self.role}] Asking {role}: {question[:50]}...")
|
|
110
|
-
|
|
116
|
+
|
|
117
|
+
# v0.3.2: Pass context for request tracking
|
|
118
|
+
context = {
|
|
119
|
+
"request_id": request_id or str(uuid.uuid4()),
|
|
120
|
+
"source_agent": self.role,
|
|
121
|
+
"tool": "ask_peer"
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
response = await self.peers.request(
|
|
125
|
+
role,
|
|
126
|
+
{"question": question},
|
|
127
|
+
timeout=30,
|
|
128
|
+
context=context # v0.3.2: Session context
|
|
129
|
+
)
|
|
111
130
|
print(f"[{self.role}] Got response from {role}")
|
|
112
131
|
return response
|
|
113
132
|
|
|
114
|
-
async def chat(self, message: str) -> dict:
|
|
133
|
+
async def chat(self, message: str, request_id: str = None) -> dict:
|
|
115
134
|
"""
|
|
116
135
|
Process a message with LLM that can discover and delegate to peers.
|
|
117
136
|
|
|
@@ -119,7 +138,14 @@ class LLMAgent(ListenerAgent):
|
|
|
119
138
|
1. Build system prompt with WHO I AM + WHO ELSE IS AVAILABLE
|
|
120
139
|
2. LLM sees available peers as potential helpers
|
|
121
140
|
3. LLM decides whether to handle directly or delegate
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
message: The user message to process
|
|
144
|
+
request_id: Optional request ID for tracking (v0.3.2)
|
|
122
145
|
"""
|
|
146
|
+
# v0.3.2: Generate request_id for tracking if not provided
|
|
147
|
+
request_id = request_id or str(uuid.uuid4())
|
|
148
|
+
|
|
123
149
|
if not self.llm:
|
|
124
150
|
return await self._chat_mock(message)
|
|
125
151
|
|
|
@@ -154,7 +180,8 @@ class LLMAgent(ListenerAgent):
|
|
|
154
180
|
role = tool_use_block.input.get("role")
|
|
155
181
|
question = tool_use_block.input.get("question")
|
|
156
182
|
|
|
157
|
-
|
|
183
|
+
# v0.3.2: Pass request_id for tracing
|
|
184
|
+
peer_response = await self._ask_peer(role, question, request_id=request_id)
|
|
158
185
|
|
|
159
186
|
# Continue with tool result
|
|
160
187
|
messages = [{"role": "user", "content": message}]
|
|
@@ -467,6 +494,36 @@ def create_app():
|
|
|
467
494
|
}
|
|
468
495
|
return result
|
|
469
496
|
|
|
497
|
+
@app.get("/health")
|
|
498
|
+
async def health_check(request: Request):
|
|
499
|
+
"""
|
|
500
|
+
Health check endpoint using mesh diagnostics (v0.3.2).
|
|
501
|
+
|
|
502
|
+
Returns mesh status, connected agents, and network health.
|
|
503
|
+
Useful for Kubernetes probes, load balancer checks, and monitoring.
|
|
504
|
+
"""
|
|
505
|
+
# Get the mesh from JarvisLifespan state
|
|
506
|
+
mesh = getattr(request.app.state, "jarvis_mesh", None)
|
|
507
|
+
if not mesh:
|
|
508
|
+
return JSONResponse(
|
|
509
|
+
status_code=503,
|
|
510
|
+
content={"status": "unhealthy", "error": "Mesh not initialized"}
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
# v0.3.2: Use get_diagnostics() for comprehensive health info
|
|
514
|
+
diagnostics = mesh.get_diagnostics()
|
|
515
|
+
|
|
516
|
+
return {
|
|
517
|
+
"status": "healthy",
|
|
518
|
+
"mesh": {
|
|
519
|
+
"mode": diagnostics.get("mode", "unknown"),
|
|
520
|
+
"agent_count": diagnostics.get("agent_count", 0),
|
|
521
|
+
"agents": diagnostics.get("agents", []),
|
|
522
|
+
},
|
|
523
|
+
"network": diagnostics.get("network", {}),
|
|
524
|
+
"uptime_seconds": diagnostics.get("uptime_seconds", 0)
|
|
525
|
+
}
|
|
526
|
+
|
|
470
527
|
@app.post("/chat")
|
|
471
528
|
async def chat(request: Request):
|
|
472
529
|
"""
|
|
@@ -476,16 +533,21 @@ def create_app():
|
|
|
476
533
|
1. Sees other agents via get_cognitive_context()
|
|
477
534
|
2. Decides if it needs to delegate
|
|
478
535
|
3. Uses ask_peer tool to communicate
|
|
536
|
+
|
|
537
|
+
v0.3.2: Supports request_id for tracking across agent boundaries.
|
|
479
538
|
"""
|
|
480
539
|
body = await request.json()
|
|
481
540
|
message = body.get("message", "")
|
|
482
541
|
|
|
542
|
+
# v0.3.2: Generate request_id for tracking
|
|
543
|
+
request_id = str(uuid.uuid4())
|
|
544
|
+
|
|
483
545
|
assistant = request.app.state.jarvis_agents.get("assistant")
|
|
484
546
|
if not assistant:
|
|
485
547
|
return JSONResponse(status_code=503, content={"error": "Assistant not available"})
|
|
486
548
|
|
|
487
|
-
result = await assistant.chat(message)
|
|
488
|
-
return {"message": message, **result}
|
|
549
|
+
result = await assistant.chat(message, request_id=request_id)
|
|
550
|
+
return {"message": message, "request_id": request_id, **result}
|
|
489
551
|
|
|
490
552
|
@app.post("/ask/{agent_role}")
|
|
491
553
|
async def ask_agent(agent_role: str, request: Request):
|
|
@@ -544,13 +606,14 @@ def main():
|
|
|
544
606
|
print("=" * 60)
|
|
545
607
|
print("\n - FastAPI Integration:")
|
|
546
608
|
print(" - JarvisLifespan for one-line integration")
|
|
547
|
-
print(" -
|
|
609
|
+
print(" - CustomAgent with on_peer_request handlers")
|
|
548
610
|
print(" - Cognitive discovery via get_cognitive_context()")
|
|
549
611
|
print("\n - Cloud Deployment:")
|
|
550
612
|
print(" - Each agent has MESH as a TOOL")
|
|
551
613
|
print(" - LLM decides when to delegate autonomously")
|
|
552
614
|
print(" - Standalone agents can join with --join-as flag")
|
|
553
615
|
print("\nEndpoints:")
|
|
616
|
+
print(" GET /health - Mesh health diagnostics (v0.3.2)")
|
|
554
617
|
print(" GET /agents - Show what each agent sees")
|
|
555
618
|
print(" POST /chat - Chat with assistant (may delegate)")
|
|
556
619
|
print(" POST /ask/{role} - Ask specific agent directly")
|
jarviscore/__init__.py
CHANGED
|
@@ -4,16 +4,15 @@ JarvisCore - P2P Distributed Agent Framework
|
|
|
4
4
|
A production-grade framework for building autonomous agent systems with:
|
|
5
5
|
- P2P coordination via SWIM protocol
|
|
6
6
|
- Workflow orchestration with dependencies
|
|
7
|
-
-
|
|
7
|
+
- Two agent profiles: AutoAgent and CustomAgent
|
|
8
8
|
|
|
9
9
|
Profiles:
|
|
10
|
-
AutoAgent
|
|
11
|
-
CustomAgent
|
|
12
|
-
ListenerAgent - API-first agents with background P2P (just implement handlers)
|
|
10
|
+
AutoAgent - LLM generates and executes code from prompts (autonomous mode)
|
|
11
|
+
CustomAgent - You provide handlers or execute_task() (p2p/distributed modes)
|
|
13
12
|
|
|
14
13
|
Modes:
|
|
15
14
|
autonomous - Workflow engine only (AutoAgent)
|
|
16
|
-
p2p - P2P coordinator only (CustomAgent
|
|
15
|
+
p2p - P2P coordinator only (CustomAgent with run() loop)
|
|
17
16
|
distributed - Both workflow + P2P (CustomAgent with execute_task())
|
|
18
17
|
|
|
19
18
|
Quick Start (AutoAgent - autonomous mode):
|
|
@@ -30,12 +29,12 @@ Quick Start (AutoAgent - autonomous mode):
|
|
|
30
29
|
await mesh.start()
|
|
31
30
|
results = await mesh.workflow("calc", [{"agent": "calculator", "task": "Calculate 10!"}])
|
|
32
31
|
|
|
33
|
-
Quick Start (
|
|
32
|
+
Quick Start (CustomAgent + FastAPI):
|
|
34
33
|
from fastapi import FastAPI
|
|
35
|
-
from jarviscore.profiles import
|
|
34
|
+
from jarviscore.profiles import CustomAgent
|
|
36
35
|
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
37
36
|
|
|
38
|
-
class MyAgent(
|
|
37
|
+
class MyAgent(CustomAgent):
|
|
39
38
|
role = "processor"
|
|
40
39
|
capabilities = ["processing"]
|
|
41
40
|
|
|
@@ -61,7 +60,7 @@ Quick Start (CustomAgent - distributed mode):
|
|
|
61
60
|
results = await mesh.workflow("demo", [{"agent": "processor", "task": "hello"}])
|
|
62
61
|
"""
|
|
63
62
|
|
|
64
|
-
__version__ = "0.3.
|
|
63
|
+
__version__ = "0.3.2"
|
|
65
64
|
__author__ = "JarvisCore Contributors"
|
|
66
65
|
__license__ = "MIT"
|
|
67
66
|
|
|
@@ -73,7 +72,6 @@ from jarviscore.core.mesh import Mesh, MeshMode
|
|
|
73
72
|
# Execution profiles
|
|
74
73
|
from jarviscore.profiles.autoagent import AutoAgent
|
|
75
74
|
from jarviscore.profiles.customagent import CustomAgent
|
|
76
|
-
from jarviscore.profiles.listeneragent import ListenerAgent
|
|
77
75
|
|
|
78
76
|
# Custom Profile: Decorator, Wrapper, and Context
|
|
79
77
|
from jarviscore.adapter import jarvis_agent, wrap
|
|
@@ -99,7 +97,6 @@ __all__ = [
|
|
|
99
97
|
# Profiles
|
|
100
98
|
"AutoAgent",
|
|
101
99
|
"CustomAgent",
|
|
102
|
-
"ListenerAgent",
|
|
103
100
|
|
|
104
101
|
# Custom Profile (decorator and wrapper)
|
|
105
102
|
"jarvis_agent",
|
jarviscore/cli/smoketest.py
CHANGED
|
@@ -313,7 +313,7 @@ class SmokeTest:
|
|
|
313
313
|
print("\nJarvisCore is working correctly. Next steps:")
|
|
314
314
|
print(" 1. AutoAgent example: python examples/calculator_agent_example.py")
|
|
315
315
|
print(" 2. CustomAgent P2P: python examples/customagent_p2p_example.py")
|
|
316
|
-
print(" 3.
|
|
316
|
+
print(" 3. Cognitive Discovery: python examples/customagent_cognitive_discovery_example.py")
|
|
317
317
|
print(" 4. FastAPI (v0.3): python examples/fastapi_integration_example.py")
|
|
318
318
|
print(" 5. Cloud deploy (v0.3): python examples/cloud_deployment_example.py")
|
|
319
319
|
print("\nDocumentation:")
|