jarviscore-framework 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/customagent_p2p_example.py +566 -183
  3. examples/fastapi_integration_example.py +570 -0
  4. examples/listeneragent_cognitive_discovery_example.py +343 -0
  5. jarviscore/__init__.py +22 -5
  6. jarviscore/cli/smoketest.py +8 -4
  7. jarviscore/core/agent.py +227 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  9. jarviscore/data/examples/customagent_p2p_example.py +566 -183
  10. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  11. jarviscore/data/examples/listeneragent_cognitive_discovery_example.py +343 -0
  12. jarviscore/docs/API_REFERENCE.md +296 -3
  13. jarviscore/docs/CHANGELOG.md +97 -0
  14. jarviscore/docs/CONFIGURATION.md +2 -2
  15. jarviscore/docs/CUSTOMAGENT_GUIDE.md +2021 -255
  16. jarviscore/docs/GETTING_STARTED.md +112 -8
  17. jarviscore/docs/TROUBLESHOOTING.md +3 -3
  18. jarviscore/docs/USER_GUIDE.md +152 -6
  19. jarviscore/integrations/__init__.py +16 -0
  20. jarviscore/integrations/fastapi.py +247 -0
  21. jarviscore/p2p/broadcaster.py +10 -3
  22. jarviscore/p2p/coordinator.py +310 -14
  23. jarviscore/p2p/keepalive.py +45 -23
  24. jarviscore/p2p/peer_client.py +282 -10
  25. jarviscore/p2p/swim_manager.py +9 -4
  26. jarviscore/profiles/__init__.py +10 -2
  27. jarviscore/profiles/listeneragent.py +292 -0
  28. {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/METADATA +42 -8
  29. {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/RECORD +36 -22
  30. {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/WHEEL +1 -1
  31. tests/test_13_dx_improvements.py +554 -0
  32. tests/test_14_cloud_deployment.py +403 -0
  33. tests/test_15_llm_cognitive_discovery.py +684 -0
  34. tests/test_16_unified_dx_flow.py +947 -0
  35. {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/licenses/LICENSE +0 -0
  36. {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,31 @@ if TYPE_CHECKING:
32
32
  logger = logging.getLogger(__name__)
33
33
 
34
34
 
35
+ class RemoteAgentProxy:
36
+ """
37
+ Proxy object for remote agents.
38
+
39
+ Used by PeerClient to represent agents on other nodes in the mesh.
40
+ Contains enough information to route messages via P2P coordinator.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ agent_id: str,
46
+ role: str,
47
+ node_id: str,
48
+ capabilities: List[str] = None
49
+ ):
50
+ self.agent_id = agent_id
51
+ self.role = role
52
+ self.node_id = node_id
53
+ self.capabilities = capabilities or []
54
+ self.peers = None # Remote agents don't have local PeerClient
55
+
56
+ def __repr__(self):
57
+ return f"<RemoteAgentProxy {self.role}@{self.node_id}>"
58
+
59
+
35
60
  class PeerClient:
36
61
  """
37
62
  Client for peer-to-peer agent communication.
@@ -234,15 +259,22 @@ class PeerClient:
234
259
  """
235
260
  Get detailed list of peers with capabilities.
236
261
 
262
+ Includes both local and remote agents in the mesh.
263
+
237
264
  Returns:
238
- List of dicts with role, agent_id, capabilities, status
265
+ List of dicts with role, agent_id, capabilities, status, location
239
266
 
240
267
  Example:
241
268
  peers = self.peers.list_peers()
242
- # [{"role": "scout", "capabilities": ["reasoning"], ...}]
269
+ # [
270
+ # {"role": "scout", "capabilities": ["reasoning"], "location": "local", ...},
271
+ # {"role": "analyst", "capabilities": ["analysis"], "location": "remote", ...}
272
+ # ]
243
273
  """
244
274
  seen = set()
245
275
  peers = []
276
+
277
+ # 1. Local agents
246
278
  for role_name, agents in self._agent_registry.items():
247
279
  for agent in agents:
248
280
  if agent.agent_id != self._agent_id and agent.agent_id not in seen:
@@ -251,8 +283,27 @@ class PeerClient:
251
283
  "role": agent.role,
252
284
  "agent_id": agent.agent_id,
253
285
  "capabilities": list(agent.capabilities),
254
- "status": "online"
286
+ "description": getattr(agent, 'description', ''),
287
+ "status": "online",
288
+ "location": "local"
289
+ })
290
+
291
+ # 2. Remote agents from coordinator
292
+ if self._coordinator:
293
+ for remote in self._coordinator.list_remote_agents():
294
+ agent_id = remote.get('agent_id')
295
+ if agent_id and agent_id not in seen:
296
+ seen.add(agent_id)
297
+ peers.append({
298
+ "role": remote.get('role', 'unknown'),
299
+ "agent_id": agent_id,
300
+ "capabilities": remote.get('capabilities', []),
301
+ "description": remote.get('description', ''),
302
+ "status": "online",
303
+ "location": "remote",
304
+ "node_id": remote.get('node_id', '')
255
305
  })
306
+
256
307
  return peers
257
308
 
258
309
  # ─────────────────────────────────────────────────────────────────
@@ -439,6 +490,183 @@ class PeerClient:
439
490
  from .peer_tool import PeerTool
440
491
  return PeerTool(self)
441
492
 
493
+ # ─────────────────────────────────────────────────────────────────
494
+ # COGNITIVE CONTEXT (for LLM prompts)
495
+ # ─────────────────────────────────────────────────────────────────
496
+
497
+ def get_cognitive_context(
498
+ self,
499
+ format: str = "markdown",
500
+ include_capabilities: bool = True,
501
+ include_description: bool = True,
502
+ tool_name: str = "ask_peer"
503
+ ) -> str:
504
+ """
505
+ Generate a prompt-ready description of available mesh peers.
506
+
507
+ Bridges the gap between the Network Layer (who is connected)
508
+ and the Cognitive Layer (who the LLM should know about).
509
+
510
+ This enables dynamic system prompts that automatically update
511
+ as peers join or leave the mesh, eliminating hardcoded peer names.
512
+
513
+ Args:
514
+ format: Output format - "markdown", "json", or "text"
515
+ include_capabilities: Include peer capabilities in output
516
+ include_description: Include peer descriptions in output
517
+ tool_name: Name of the tool for peer communication
518
+
519
+ Returns:
520
+ Formatted string suitable for inclusion in system prompts
521
+
522
+ Example - Basic Usage:
523
+ system_prompt = BASE_PROMPT + "\\n\\n" + self.peers.get_cognitive_context()
524
+
525
+ Example - Output (markdown):
526
+ ## AVAILABLE MESH PEERS
527
+
528
+ You are part of a multi-agent mesh. The following peers are available:
529
+
530
+ - **analyst** (`agent-analyst-abc123`)
531
+ - Capabilities: analysis, charting, reporting
532
+ - Description: Analyzes data and generates insights
533
+
534
+ - **scout** (`agent-scout-def456`)
535
+ - Capabilities: research, reconnaissance
536
+ - Description: Gathers information from external sources
537
+
538
+ Use the `ask_peer` tool to delegate tasks to these specialists.
539
+ """
540
+ peers = self.list_peers()
541
+
542
+ if not peers:
543
+ return "No other agents are currently available in the mesh."
544
+
545
+ if format == "json":
546
+ return self._format_cognitive_json(peers)
547
+ elif format == "text":
548
+ return self._format_cognitive_text(peers, include_capabilities, tool_name)
549
+ else: # markdown (default)
550
+ return self._format_cognitive_markdown(
551
+ peers, include_capabilities, include_description, tool_name
552
+ )
553
+
554
+ def _format_cognitive_markdown(
555
+ self,
556
+ peers: List[Dict[str, Any]],
557
+ include_capabilities: bool,
558
+ include_description: bool,
559
+ tool_name: str
560
+ ) -> str:
561
+ """Format peers as markdown for LLM consumption."""
562
+ lines = [
563
+ "## AVAILABLE MESH PEERS",
564
+ "",
565
+ "You are part of a multi-agent mesh. The following peers are available for collaboration:",
566
+ ""
567
+ ]
568
+
569
+ for peer in peers:
570
+ role = peer.get("role", "unknown")
571
+ agent_id = peer.get("agent_id", "unknown")
572
+ capabilities = peer.get("capabilities", [])
573
+ description = peer.get("description", "")
574
+
575
+ # Role line with agent ID
576
+ lines.append(f"- **{role}** (`{agent_id}`)")
577
+
578
+ # Capabilities
579
+ if include_capabilities and capabilities:
580
+ lines.append(f" - Capabilities: {', '.join(capabilities)}")
581
+
582
+ # Description
583
+ if include_description:
584
+ desc = description if description else f"Specialist in {role} tasks"
585
+ lines.append(f" - Description: {desc}")
586
+
587
+ lines.append("") # Blank line between peers
588
+
589
+ # Usage instructions
590
+ lines.append(f"Use the `{tool_name}` tool to delegate tasks to these specialists.")
591
+ lines.append("When delegating, be specific about what you need and provide relevant context.")
592
+
593
+ return "\n".join(lines)
594
+
595
+ def _format_cognitive_text(
596
+ self,
597
+ peers: List[Dict[str, Any]],
598
+ include_capabilities: bool,
599
+ tool_name: str
600
+ ) -> str:
601
+ """Format peers as plain text for simpler LLM contexts."""
602
+ lines = ["Available Peers:"]
603
+
604
+ for peer in peers:
605
+ role = peer.get("role", "unknown")
606
+ capabilities = peer.get("capabilities", [])
607
+
608
+ if include_capabilities and capabilities:
609
+ lines.append(f"- {role}: {', '.join(capabilities)}")
610
+ else:
611
+ lines.append(f"- {role}")
612
+
613
+ lines.append(f"\nUse {tool_name} tool to communicate with peers.")
614
+
615
+ return "\n".join(lines)
616
+
617
+ def _format_cognitive_json(self, peers: List[Dict[str, Any]]) -> str:
618
+ """Format peers as JSON string for structured LLM contexts."""
619
+ import json
620
+ return json.dumps({
621
+ "available_peers": [
622
+ {
623
+ "role": p.get("role"),
624
+ "agent_id": p.get("agent_id"),
625
+ "capabilities": p.get("capabilities", [])
626
+ }
627
+ for p in peers
628
+ ],
629
+ "instruction": "Use ask_peer tool to communicate with these agents"
630
+ }, indent=2)
631
+
632
+ def build_system_prompt(self, base_prompt: str, **context_kwargs) -> str:
633
+ """
634
+ Build a complete system prompt with peer context appended.
635
+
636
+ Convenience method that combines your base prompt with
637
+ dynamic peer context.
638
+
639
+ Args:
640
+ base_prompt: Your base system prompt
641
+ **context_kwargs: Arguments passed to get_cognitive_context()
642
+ - format: "markdown", "json", or "text"
643
+ - include_capabilities: bool
644
+ - include_description: bool
645
+ - tool_name: str
646
+
647
+ Returns:
648
+ Complete system prompt with peer awareness
649
+
650
+ Example:
651
+ # Simple usage
652
+ prompt = self.peers.build_system_prompt("You are a helpful analyst.")
653
+
654
+ # With options
655
+ prompt = self.peers.build_system_prompt(
656
+ "You are a data processor.",
657
+ include_capabilities=True,
658
+ include_description=False
659
+ )
660
+
661
+ # Use in LLM call
662
+ response = await llm.chat(
663
+ messages=[{"role": "system", "content": prompt}, ...],
664
+ tools=[self.peers.as_tool().schema]
665
+ )
666
+ """
667
+ context = self.get_cognitive_context(**context_kwargs)
668
+ return f"{base_prompt}\n\n{context}"
669
+
442
670
  # ─────────────────────────────────────────────────────────────────
443
671
  # MESSAGING - RECEIVE
444
672
  # ─────────────────────────────────────────────────────────────────
@@ -483,25 +711,40 @@ class PeerClient:
483
711
 
484
712
  def _resolve_target(self, target: str):
485
713
  """
486
- Resolve target string to agent.
714
+ Resolve target string to agent (local or remote).
715
+
716
+ Checks:
717
+ 1. Local registry (same mesh)
718
+ 2. Remote registry (other nodes via P2P coordinator)
487
719
 
488
720
  Args:
489
721
  target: Role name or agent_id
490
722
 
491
723
  Returns:
492
- Agent instance or None
724
+ Agent instance (local) or RemoteAgentProxy (remote), or None
493
725
  """
494
- # First try as role
726
+ # 1. Try local registry first (by role)
495
727
  agents = self._agent_registry.get(target, [])
496
728
  if agents:
497
729
  return agents[0]
498
730
 
499
- # Try as agent_id
731
+ # 2. Try local agent_id match
500
732
  for role_name, agents in self._agent_registry.items():
501
733
  for agent in agents:
502
734
  if agent.agent_id == target:
503
735
  return agent
504
736
 
737
+ # 3. Try remote agents via coordinator
738
+ if self._coordinator:
739
+ remote_info = self._coordinator.get_remote_agent(target)
740
+ if remote_info:
741
+ return RemoteAgentProxy(
742
+ agent_id=remote_info.get('agent_id', target),
743
+ role=remote_info.get('role', target),
744
+ node_id=remote_info.get('node_id', ''),
745
+ capabilities=remote_info.get('capabilities', [])
746
+ )
747
+
505
748
  return None
506
749
 
507
750
  async def _send_message(self, target_agent, message: OutgoingMessage) -> bool:
@@ -509,9 +752,37 @@ class PeerClient:
509
752
  Send message to target agent via coordinator.
510
753
 
511
754
  For local agents (same mesh), delivers directly to their queue.
512
- For remote agents, sends via P2P coordinator.
755
+ For remote agents (RemoteAgentProxy), sends via P2P coordinator.
513
756
  """
514
757
  try:
758
+ # Check if it's a RemoteAgentProxy (remote agent on another node)
759
+ if isinstance(target_agent, RemoteAgentProxy):
760
+ # Remote delivery via P2P coordinator
761
+ if self._coordinator:
762
+ msg_type = f"PEER_{message.type.value.upper()}"
763
+ payload = {
764
+ 'sender': message.sender,
765
+ 'sender_node': message.sender_node,
766
+ 'target': target_agent.agent_id,
767
+ 'target_role': target_agent.role,
768
+ 'data': message.data,
769
+ 'correlation_id': message.correlation_id,
770
+ 'timestamp': message.timestamp
771
+ }
772
+ result = await self._coordinator._send_p2p_message(
773
+ target_agent.node_id,
774
+ msg_type,
775
+ payload
776
+ )
777
+ if result:
778
+ self._logger.debug(
779
+ f"Sent {message.type.value} to remote agent "
780
+ f"{target_agent.role}@{target_agent.node_id}"
781
+ )
782
+ return result
783
+ self._logger.warning("No coordinator available for remote delivery")
784
+ return False
785
+
515
786
  # Check if target has a peer client (local agent)
516
787
  if hasattr(target_agent, 'peers') and target_agent.peers:
517
788
  # Direct local delivery
@@ -529,7 +800,7 @@ class PeerClient:
529
800
  )
530
801
  return True
531
802
 
532
- # Remote delivery via P2P coordinator
803
+ # Fallback: Remote delivery via P2P coordinator
533
804
  if self._coordinator:
534
805
  msg_type = f"PEER_{message.type.value.upper()}"
535
806
  payload = {
@@ -540,8 +811,9 @@ class PeerClient:
540
811
  'correlation_id': message.correlation_id,
541
812
  'timestamp': message.timestamp
542
813
  }
814
+ node_id = getattr(target_agent, 'node_id', None) or self._node_id
543
815
  return await self._coordinator._send_p2p_message(
544
- target_agent.node_id or self._node_id,
816
+ node_id,
545
817
  msg_type,
546
818
  payload
547
819
  )
@@ -116,12 +116,17 @@ class SWIMThreadManager:
116
116
  self.bind_addr = swim_parse_address(f"{bind_host}:{bind_port}")
117
117
  logger.info(f"SWIM bind address: {self.bind_addr}")
118
118
 
119
- # Parse seed nodes
119
+ # Parse seed nodes - handle both string and list
120
120
  seed_addrs = []
121
121
  if seed_nodes:
122
- for seed in seed_nodes.split(','):
123
- if seed.strip():
124
- seed_addrs.append(swim_parse_address(seed.strip()))
122
+ # Handle both string (comma-separated) and list
123
+ if isinstance(seed_nodes, str):
124
+ seed_list = [s.strip() for s in seed_nodes.split(',') if s.strip()]
125
+ else:
126
+ seed_list = seed_nodes
127
+ for seed in seed_list:
128
+ if seed:
129
+ seed_addrs.append(swim_parse_address(seed.strip() if isinstance(seed, str) else seed))
125
130
  logger.info(f"SWIM seed nodes: {seed_addrs}")
126
131
 
127
132
  # Get SWIM config
@@ -1,6 +1,14 @@
1
- """Execution profiles for agents."""
1
+ """
2
+ Execution profiles for agents.
3
+
4
+ Profiles define HOW agents execute tasks:
5
+ - AutoAgent: LLM-powered code generation + sandboxed execution
6
+ - CustomAgent: User-defined logic (LangChain, MCP, raw Python)
7
+ - ListenerAgent: API-first agents with background P2P listening
8
+ """
2
9
 
3
10
  from .autoagent import AutoAgent
4
11
  from .customagent import CustomAgent
12
+ from .listeneragent import ListenerAgent
5
13
 
6
- __all__ = ["AutoAgent", "CustomAgent"]
14
+ __all__ = ["AutoAgent", "CustomAgent", "ListenerAgent"]