jarviscore-framework 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. examples/cloud_deployment_example.py +3 -3
  2. examples/{listeneragent_cognitive_discovery_example.py → customagent_cognitive_discovery_example.py} +55 -14
  3. examples/customagent_distributed_example.py +140 -1
  4. examples/fastapi_integration_example.py +74 -11
  5. jarviscore/__init__.py +8 -11
  6. jarviscore/cli/smoketest.py +1 -1
  7. jarviscore/core/mesh.py +158 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +3 -3
  9. jarviscore/data/examples/custom_profile_decorator.py +134 -0
  10. jarviscore/data/examples/custom_profile_wrap.py +168 -0
  11. jarviscore/data/examples/{listeneragent_cognitive_discovery_example.py → customagent_cognitive_discovery_example.py} +55 -14
  12. jarviscore/data/examples/customagent_distributed_example.py +140 -1
  13. jarviscore/data/examples/fastapi_integration_example.py +74 -11
  14. jarviscore/docs/API_REFERENCE.md +576 -47
  15. jarviscore/docs/CHANGELOG.md +131 -0
  16. jarviscore/docs/CONFIGURATION.md +1 -1
  17. jarviscore/docs/CUSTOMAGENT_GUIDE.md +591 -153
  18. jarviscore/docs/GETTING_STARTED.md +186 -329
  19. jarviscore/docs/TROUBLESHOOTING.md +1 -1
  20. jarviscore/docs/USER_GUIDE.md +292 -12
  21. jarviscore/integrations/fastapi.py +4 -4
  22. jarviscore/p2p/coordinator.py +36 -7
  23. jarviscore/p2p/messages.py +13 -0
  24. jarviscore/p2p/peer_client.py +380 -21
  25. jarviscore/p2p/peer_tool.py +17 -11
  26. jarviscore/profiles/__init__.py +2 -4
  27. jarviscore/profiles/customagent.py +302 -74
  28. jarviscore/testing/__init__.py +35 -0
  29. jarviscore/testing/mocks.py +578 -0
  30. {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/METADATA +61 -46
  31. {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/RECORD +42 -34
  32. tests/test_13_dx_improvements.py +37 -37
  33. tests/test_15_llm_cognitive_discovery.py +18 -18
  34. tests/test_16_unified_dx_flow.py +3 -3
  35. tests/test_17_session_context.py +489 -0
  36. tests/test_18_mesh_diagnostics.py +465 -0
  37. tests/test_19_async_requests.py +516 -0
  38. tests/test_20_load_balancing.py +546 -0
  39. tests/test_21_mock_testing.py +776 -0
  40. jarviscore/profiles/listeneragent.py +0 -292
  41. {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/WHEEL +0 -0
  42. {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/licenses/LICENSE +0 -0
  43. {jarviscore_framework-0.3.0.dist-info → jarviscore_framework-0.3.2.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,10 @@ Demonstrates CustomAgent in distributed mode, which combines:
6
6
  - Workflow orchestration (step execution, dependencies)
7
7
  - User-controlled execution logic (you write execute_task)
8
8
 
9
+ v0.3.2 Features Demonstrated:
10
+ - Async Requests (ask_async) - Non-blocking parallel requests to multiple agents
11
+ - Load Balancing (strategy="round_robin") - Distribute requests across agent instances
12
+
9
13
  This is ideal for:
10
14
  - Multi-node deployments with custom logic
11
15
  - Integrating external frameworks (LangChain, CrewAI, etc.)
@@ -231,6 +235,7 @@ async def main():
231
235
  """Run CustomAgent distributed mode example."""
232
236
  print("\n" + "="*70)
233
237
  print("JarvisCore: CustomAgent in Distributed Mode")
238
+ print("v0.3.2: Also supports --async and --load-balance demos")
234
239
  print("="*70)
235
240
 
236
241
  # ─────────────────────────────────────────────────────────────────────────
@@ -358,5 +363,139 @@ async def peer_communication_example():
358
363
  pass
359
364
 
360
365
 
366
+ # ═══════════════════════════════════════════════════════════════════════════════
367
+ # v0.3.2 FEATURES: ASYNC REQUESTS & LOAD BALANCING
368
+ # ═══════════════════════════════════════════════════════════════════════════════
369
+
370
+ async def async_requests_demo():
371
+ """
372
+ Demonstrate v0.3.2 async requests for parallel agent communication.
373
+
374
+ ask_async() returns a Future that can be awaited later, enabling:
375
+ - Fire multiple requests in parallel
376
+ - Continue other work while waiting
377
+ - Gather results when needed
378
+ """
379
+ print("\n" + "="*70)
380
+ print("v0.3.2 Feature: Async Requests (ask_async)")
381
+ print("="*70)
382
+
383
+ mesh = Mesh(mode="p2p", config={"bind_port": 7966})
384
+
385
+ # Add multiple agents
386
+ mesh.add(ContentResearcherAgent)
387
+ mesh.add(ContentWriterAgent)
388
+ mesh.add(ContentReviewerAgent)
389
+
390
+ try:
391
+ await mesh.start()
392
+
393
+ # Get an agent with peer access
394
+ researcher = next((a for a in mesh.agents if a.role == "content_researcher"), None)
395
+ if not researcher or not researcher.peers:
396
+ print("Peers not available")
397
+ return
398
+
399
+ print("\n[Demo] Firing parallel requests to multiple agents...")
400
+
401
+ # v0.3.2: ask_async returns a Future - doesn't block!
402
+ future1 = researcher.peers.ask_async(
403
+ "content_writer",
404
+ {"question": "What makes good technical writing?"}
405
+ )
406
+ future2 = researcher.peers.ask_async(
407
+ "content_reviewer",
408
+ {"question": "What are common writing mistakes?"}
409
+ )
410
+
411
+ print("[Demo] Requests sent! Doing other work while waiting...")
412
+ await asyncio.sleep(0.1) # Simulate other work
413
+
414
+ # Gather results when ready
415
+ print("[Demo] Gathering results...")
416
+ results = await asyncio.gather(future1, future2, return_exceptions=True)
417
+
418
+ for i, result in enumerate(results):
419
+ if isinstance(result, Exception):
420
+ print(f" Request {i+1}: Error - {result}")
421
+ else:
422
+ print(f" Request {i+1}: Got response")
423
+
424
+ print("\n[Demo] Async requests complete!")
425
+
426
+ finally:
427
+ await mesh.stop()
428
+
429
+
430
+ async def load_balancing_demo():
431
+ """
432
+ Demonstrate v0.3.2 load balancing strategies.
433
+
434
+ When multiple agents have the same capability, use strategy parameter:
435
+ - "random" (default): Random selection
436
+ - "round_robin": Distribute evenly across instances
437
+ """
438
+ print("\n" + "="*70)
439
+ print("v0.3.2 Feature: Load Balancing Strategies")
440
+ print("="*70)
441
+
442
+ mesh = Mesh(mode="p2p", config={"bind_port": 7967})
443
+
444
+ # Add agents
445
+ mesh.add(ContentResearcherAgent)
446
+ mesh.add(ContentWriterAgent)
447
+
448
+ try:
449
+ await mesh.start()
450
+
451
+ researcher = next((a for a in mesh.agents if a.role == "content_researcher"), None)
452
+ if not researcher or not researcher.peers:
453
+ print("Peers not available")
454
+ return
455
+
456
+ print("\n[Demo] Load balancing with strategy='round_robin'")
457
+ print("[Demo] Sending 3 requests to 'writing' capability...")
458
+
459
+ # v0.3.2: Use discover_one() with strategy for load balancing
460
+ for i in range(3):
461
+ # round_robin distributes requests evenly across matching peers
462
+ # First, discover which peer to use with the strategy
463
+ target = researcher.peers.discover_one(
464
+ role="content_writer",
465
+ strategy="round_robin" # v0.3.2: Load balancing
466
+ )
467
+
468
+ if target:
469
+ # Then make the request to that specific peer
470
+ response = await researcher.peers.request(
471
+ target.role,
472
+ {"question": f"Request #{i+1}"},
473
+ timeout=10
474
+ )
475
+ print(f" Request {i+1}: Handled by {target.agent_id[:8]}...")
476
+ else:
477
+ print(f" Request {i+1}: No peer found")
478
+
479
+ print("\n[Demo] Load balancing complete!")
480
+ print("[Demo] In a multi-node setup with multiple writers,")
481
+ print(" round_robin would distribute across all instances.")
482
+
483
+ finally:
484
+ await mesh.stop()
485
+
486
+
361
487
  if __name__ == "__main__":
362
- asyncio.run(main())
488
+ import sys
489
+
490
+ if len(sys.argv) > 1:
491
+ if sys.argv[1] == "--async":
492
+ asyncio.run(async_requests_demo())
493
+ elif sys.argv[1] == "--load-balance":
494
+ asyncio.run(load_balancing_demo())
495
+ else:
496
+ print("Usage:")
497
+ print(" python customagent_distributed_example.py # Main workflow demo")
498
+ print(" python customagent_distributed_example.py --async # Async requests demo")
499
+ print(" python customagent_distributed_example.py --load-balance # Load balancing demo")
500
+ else:
501
+ asyncio.run(main())
@@ -1,13 +1,15 @@
1
1
  """
2
- FastAPI Integration Example (v0.3.0)
2
+ FastAPI Integration Example (v0.3.0 + v0.3.2)
3
3
 
4
4
  Demonstrates JarvisLifespan for 3-line FastAPI integration with autonomous agents.
5
5
 
6
6
  Features shown:
7
7
  1. JarvisLifespan - Automatic agent lifecycle management
8
- 2. ListenerAgent - API-first agents with on_peer_request handlers
8
+ 2. CustomAgent - API-first agents with on_peer_request handlers
9
9
  3. Cognitive Discovery - get_cognitive_context() for LLM awareness
10
10
  4. Autonomous Agents - Each agent has MESH as a TOOL, LLM decides when to delegate
11
+ 5. Mesh Diagnostics (v0.3.2) - /health endpoint using get_diagnostics()
12
+ 6. Session Context (v0.3.2) - Request tracking with context parameter
11
13
 
12
14
  Real-World Flow:
13
15
  HTTP Request → Agent A (with LLM) → LLM sees peers as tools
@@ -22,6 +24,9 @@ Usage:
22
24
  -H "Content-Type: application/json" \
23
25
  -d '{"message": "Analyze the Q4 sales trends"}'
24
26
 
27
+ # Check mesh health (v0.3.2)
28
+ curl http://localhost:8000/health
29
+
25
30
  # Optional: Start a standalone agent that joins the mesh (in another terminal)
26
31
  python examples/fastapi_integration_example.py --join-as scout
27
32
 
@@ -32,6 +37,7 @@ Prerequisites:
32
37
  import asyncio
33
38
  import sys
34
39
  import os
40
+ import uuid
35
41
  from pathlib import Path
36
42
 
37
43
  sys.path.insert(0, str(Path(__file__).parent.parent))
@@ -45,14 +51,14 @@ except ImportError:
45
51
  FASTAPI_AVAILABLE = False
46
52
  print("FastAPI not installed. Run: pip install fastapi uvicorn")
47
53
 
48
- from jarviscore.profiles import ListenerAgent
54
+ from jarviscore.profiles import CustomAgent
49
55
 
50
56
 
51
57
  # ═══════════════════════════════════════════════════════════════════════════════
52
58
  # LLM-POWERED AGENT BASE - Each agent can discover and delegate
53
59
  # ═══════════════════════════════════════════════════════════════════════════════
54
60
 
55
- class LLMAgent(ListenerAgent):
61
+ class LLMAgent(CustomAgent):
56
62
  """
57
63
  Base for LLM-powered agents that can discover and delegate to peers.
58
64
 
@@ -104,14 +110,27 @@ class LLMAgent(ListenerAgent):
104
110
  }
105
111
  }]
106
112
 
107
- async def _ask_peer(self, role: str, question: str) -> dict:
113
+ async def _ask_peer(self, role: str, question: str, request_id: str = None) -> dict:
108
114
  """Execute ask_peer tool - send request to another agent."""
109
115
  print(f"[{self.role}] Asking {role}: {question[:50]}...")
110
- response = await self.peers.request(role, {"question": question}, timeout=30)
116
+
117
+ # v0.3.2: Pass context for request tracking
118
+ context = {
119
+ "request_id": request_id or str(uuid.uuid4()),
120
+ "source_agent": self.role,
121
+ "tool": "ask_peer"
122
+ }
123
+
124
+ response = await self.peers.request(
125
+ role,
126
+ {"question": question},
127
+ timeout=30,
128
+ context=context # v0.3.2: Session context
129
+ )
111
130
  print(f"[{self.role}] Got response from {role}")
112
131
  return response
113
132
 
114
- async def chat(self, message: str) -> dict:
133
+ async def chat(self, message: str, request_id: str = None) -> dict:
115
134
  """
116
135
  Process a message with LLM that can discover and delegate to peers.
117
136
 
@@ -119,7 +138,14 @@ class LLMAgent(ListenerAgent):
119
138
  1. Build system prompt with WHO I AM + WHO ELSE IS AVAILABLE
120
139
  2. LLM sees available peers as potential helpers
121
140
  3. LLM decides whether to handle directly or delegate
141
+
142
+ Args:
143
+ message: The user message to process
144
+ request_id: Optional request ID for tracking (v0.3.2)
122
145
  """
146
+ # v0.3.2: Generate request_id for tracking if not provided
147
+ request_id = request_id or str(uuid.uuid4())
148
+
123
149
  if not self.llm:
124
150
  return await self._chat_mock(message)
125
151
 
@@ -154,7 +180,8 @@ class LLMAgent(ListenerAgent):
154
180
  role = tool_use_block.input.get("role")
155
181
  question = tool_use_block.input.get("question")
156
182
 
157
- peer_response = await self._ask_peer(role, question)
183
+ # v0.3.2: Pass request_id for tracing
184
+ peer_response = await self._ask_peer(role, question, request_id=request_id)
158
185
 
159
186
  # Continue with tool result
160
187
  messages = [{"role": "user", "content": message}]
@@ -467,6 +494,36 @@ def create_app():
467
494
  }
468
495
  return result
469
496
 
497
+ @app.get("/health")
498
+ async def health_check(request: Request):
499
+ """
500
+ Health check endpoint using mesh diagnostics (v0.3.2).
501
+
502
+ Returns mesh status, connected agents, and network health.
503
+ Useful for Kubernetes probes, load balancer checks, and monitoring.
504
+ """
505
+ # Get the mesh from JarvisLifespan state
506
+ mesh = getattr(request.app.state, "jarvis_mesh", None)
507
+ if not mesh:
508
+ return JSONResponse(
509
+ status_code=503,
510
+ content={"status": "unhealthy", "error": "Mesh not initialized"}
511
+ )
512
+
513
+ # v0.3.2: Use get_diagnostics() for comprehensive health info
514
+ diagnostics = mesh.get_diagnostics()
515
+
516
+ return {
517
+ "status": "healthy",
518
+ "mesh": {
519
+ "mode": diagnostics.get("mode", "unknown"),
520
+ "agent_count": diagnostics.get("agent_count", 0),
521
+ "agents": diagnostics.get("agents", []),
522
+ },
523
+ "network": diagnostics.get("network", {}),
524
+ "uptime_seconds": diagnostics.get("uptime_seconds", 0)
525
+ }
526
+
470
527
  @app.post("/chat")
471
528
  async def chat(request: Request):
472
529
  """
@@ -476,16 +533,21 @@ def create_app():
476
533
  1. Sees other agents via get_cognitive_context()
477
534
  2. Decides if it needs to delegate
478
535
  3. Uses ask_peer tool to communicate
536
+
537
+ v0.3.2: Supports request_id for tracking across agent boundaries.
479
538
  """
480
539
  body = await request.json()
481
540
  message = body.get("message", "")
482
541
 
542
+ # v0.3.2: Generate request_id for tracking
543
+ request_id = str(uuid.uuid4())
544
+
483
545
  assistant = request.app.state.jarvis_agents.get("assistant")
484
546
  if not assistant:
485
547
  return JSONResponse(status_code=503, content={"error": "Assistant not available"})
486
548
 
487
- result = await assistant.chat(message)
488
- return {"message": message, **result}
549
+ result = await assistant.chat(message, request_id=request_id)
550
+ return {"message": message, "request_id": request_id, **result}
489
551
 
490
552
  @app.post("/ask/{agent_role}")
491
553
  async def ask_agent(agent_role: str, request: Request):
@@ -544,13 +606,14 @@ def main():
544
606
  print("=" * 60)
545
607
  print("\n - FastAPI Integration:")
546
608
  print(" - JarvisLifespan for one-line integration")
547
- print(" - ListenerAgent with on_peer_request handlers")
609
+ print(" - CustomAgent with on_peer_request handlers")
548
610
  print(" - Cognitive discovery via get_cognitive_context()")
549
611
  print("\n - Cloud Deployment:")
550
612
  print(" - Each agent has MESH as a TOOL")
551
613
  print(" - LLM decides when to delegate autonomously")
552
614
  print(" - Standalone agents can join with --join-as flag")
553
615
  print("\nEndpoints:")
616
+ print(" GET /health - Mesh health diagnostics (v0.3.2)")
554
617
  print(" GET /agents - Show what each agent sees")
555
618
  print(" POST /chat - Chat with assistant (may delegate)")
556
619
  print(" POST /ask/{role} - Ask specific agent directly")