jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/fastapi_integration_example.py +570 -0
  3. examples/listeneragent_cognitive_discovery_example.py +343 -0
  4. jarviscore/__init__.py +22 -5
  5. jarviscore/cli/smoketest.py +8 -4
  6. jarviscore/core/agent.py +227 -0
  7. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  8. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  9. jarviscore/data/examples/listeneragent_cognitive_discovery_example.py +343 -0
  10. jarviscore/docs/API_REFERENCE.md +296 -3
  11. jarviscore/docs/CHANGELOG.md +97 -0
  12. jarviscore/docs/CUSTOMAGENT_GUIDE.md +832 -13
  13. jarviscore/docs/GETTING_STARTED.md +111 -7
  14. jarviscore/docs/USER_GUIDE.md +152 -6
  15. jarviscore/integrations/__init__.py +16 -0
  16. jarviscore/integrations/fastapi.py +247 -0
  17. jarviscore/p2p/broadcaster.py +10 -3
  18. jarviscore/p2p/coordinator.py +310 -14
  19. jarviscore/p2p/keepalive.py +45 -23
  20. jarviscore/p2p/peer_client.py +282 -10
  21. jarviscore/p2p/swim_manager.py +9 -4
  22. jarviscore/profiles/__init__.py +10 -2
  23. jarviscore/profiles/listeneragent.py +292 -0
  24. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/METADATA +37 -4
  25. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/RECORD +32 -18
  26. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/WHEEL +1 -1
  27. tests/test_13_dx_improvements.py +554 -0
  28. tests/test_14_cloud_deployment.py +403 -0
  29. tests/test_15_llm_cognitive_discovery.py +684 -0
  30. tests/test_16_unified_dx_flow.py +947 -0
  31. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/licenses/LICENSE +0 -0
  32. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,343 @@
1
+ """
2
+ ListenerAgent + Cognitive Discovery Example
3
+
4
+ Demonstrates two v0.3.0 features:
5
+
6
+ 1. ListenerAgent - Handler-based P2P agents (no run() loop needed)
7
+ - on_peer_request() handles incoming requests
8
+ - on_peer_notify() handles broadcast notifications
9
+
10
+ 2. Cognitive Discovery - Dynamic peer awareness for LLMs
11
+ - get_cognitive_context() generates LLM-ready peer descriptions
12
+ - No hardcoded agent names in prompts
13
+ - LLM autonomously decides when to delegate
14
+
15
+ Usage:
16
+ python examples/listeneragent_cognitive_discovery_example.py
17
+
18
+ Prerequisites:
19
+ - .env file with CLAUDE_API_KEY (or other LLM provider)
20
+ """
21
+ import asyncio
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ sys.path.insert(0, str(Path(__file__).parent.parent))
26
+
27
+ from jarviscore import Mesh
28
+ from jarviscore.profiles import ListenerAgent
29
+
30
+
31
+ # ═══════════════════════════════════════════════════════════════════════════════
32
+ # SPECIALIST AGENT - Responds to requests from other agents
33
+ # ═══════════════════════════════════════════════════════════════════════════════
34
+
35
+ class AnalystAgent(ListenerAgent):
36
+ """
37
+ Specialist agent that handles analysis requests.
38
+
39
+ Uses ListenerAgent profile - just implement handlers, no run() loop needed.
40
+ """
41
+ role = "analyst"
42
+ capabilities = ["data_analysis", "statistics", "insights"]
43
+ description = "Analyzes data and provides statistical insights"
44
+
45
+ async def on_peer_request(self, msg):
46
+ """Handle incoming analysis requests."""
47
+ query = msg.data.get("question", msg.data.get("query", ""))
48
+ print(f"\n[Analyst] Received request: {query[:50]}...")
49
+
50
+ # Simulate analysis (in real usage, this would use an LLM)
51
+ result = {
52
+ "analysis": f"Analysis of '{query}': The data shows positive trends.",
53
+ "confidence": 0.85,
54
+ "insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"]
55
+ }
56
+
57
+ print(f"[Analyst] Sending response with {len(result['insights'])} insights")
58
+ return result
59
+
60
+
61
+ # ═══════════════════════════════════════════════════════════════════════════════
62
+ # COORDINATOR AGENT - Uses LLM with cognitive discovery
63
+ # ═══════════════════════════════════════════════════════════════════════════════
64
+
65
+ class CoordinatorAgent(ListenerAgent):
66
+ """
67
+ Coordinator agent that uses LLM with dynamic peer discovery.
68
+
69
+ Key pattern:
70
+ 1. Uses get_cognitive_context() to learn about available peers
71
+ 2. Injects peer context into LLM system prompt
72
+ 3. LLM decides when to delegate to specialists
73
+ """
74
+ role = "coordinator"
75
+ capabilities = ["coordination", "delegation", "chat"]
76
+ description = "Coordinates tasks and delegates to specialists"
77
+
78
+ async def setup(self):
79
+ await super().setup()
80
+ self.llm = self._create_llm_client()
81
+
82
+ def _create_llm_client(self):
83
+ """Create LLM client with fallback to mock."""
84
+ try:
85
+ from anthropic import Anthropic
86
+ from jarviscore.config import settings
87
+ import os
88
+
89
+ api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
90
+ if not api_key:
91
+ raise RuntimeError("No API key")
92
+
93
+ # Check for custom endpoint (e.g., Azure-hosted Claude)
94
+ endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
95
+ model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
96
+
97
+ if endpoint:
98
+ client = Anthropic(api_key=api_key, base_url=endpoint)
99
+ else:
100
+ client = Anthropic(api_key=api_key)
101
+
102
+ # Test the API key with a minimal request
103
+ try:
104
+ client.messages.create(
105
+ model=model,
106
+ max_tokens=10,
107
+ messages=[{"role": "user", "content": "Hi"}]
108
+ )
109
+ except Exception as e:
110
+ raise RuntimeError(f"API key validation failed: {e}")
111
+
112
+ print(f"[Coordinator] LLM initialized: {model}")
113
+ return {"client": client, "model": model, "available": True}
114
+ except Exception as e:
115
+ print(f"[Coordinator] LLM not available ({e}), using mock responses")
116
+ return {"available": False}
117
+
118
+ def _build_dynamic_prompt(self, base_prompt: str) -> str:
119
+ """
120
+ Build system prompt with dynamic peer awareness.
121
+
122
+ THIS IS THE KEY PATTERN - the LLM learns about peers dynamically!
123
+ """
124
+ if not self.peers:
125
+ return base_prompt
126
+
127
+ # Use get_cognitive_context() for dynamic peer discovery
128
+ peer_context = self.peers.get_cognitive_context(
129
+ format="markdown",
130
+ include_capabilities=True,
131
+ include_description=True,
132
+ tool_name="ask_peer"
133
+ )
134
+
135
+ return f"{base_prompt}\n\n{peer_context}"
136
+
137
+ async def process_query(self, user_query: str) -> str:
138
+ """
139
+ Process a user query using LLM with peer awareness.
140
+
141
+ The LLM sees available peers and can decide to delegate.
142
+ """
143
+ base_prompt = """You are a coordinator assistant that delegates tasks to specialists.
144
+
145
+ IMPORTANT: You MUST use the ask_peer tool to delegate to specialists. You cannot perform analysis yourself.
146
+
147
+ When a user asks for data analysis, statistics, or insights:
148
+ 1. Use the ask_peer tool with role="analyst"
149
+ 2. Pass their question to the analyst
150
+ 3. Report the analyst's findings
151
+
152
+ Never try to do analysis yourself - always delegate to the analyst."""
153
+
154
+ # Build prompt with dynamic peer discovery
155
+ system_prompt = self._build_dynamic_prompt(base_prompt)
156
+
157
+ print(f"\n[Coordinator] System prompt includes peer context:")
158
+ print("-" * 40)
159
+ # Show just the peer context part
160
+ if "AVAILABLE MESH PEERS" in system_prompt:
161
+ peer_section = system_prompt.split("AVAILABLE MESH PEERS")[1][:200]
162
+ print(f"...AVAILABLE MESH PEERS{peer_section}...")
163
+ print("-" * 40)
164
+
165
+ # Check if LLM is available
166
+ if not self.llm.get("available"):
167
+ # Mock: simulate LLM deciding to delegate
168
+ if any(word in user_query.lower() for word in ["analyze", "analysis", "statistics", "data"]):
169
+ print("[Coordinator] Mock LLM decides to delegate to analyst")
170
+ response = await self.peers.request(
171
+ "analyst",
172
+ {"question": user_query},
173
+ timeout=30
174
+ )
175
+ return f"Based on the analyst's findings: {response.get('analysis', 'No response')}"
176
+ return f"I can help with: {user_query}"
177
+
178
+ # Real LLM call with tools
179
+ tools = self._get_tools()
180
+ messages = [{"role": "user", "content": user_query}]
181
+
182
+ print(f"[Coordinator] Calling LLM with {len(tools)} tools: {[t['name'] for t in tools]}")
183
+
184
+ response = self.llm["client"].messages.create(
185
+ model=self.llm["model"],
186
+ max_tokens=1024,
187
+ system=system_prompt,
188
+ messages=messages,
189
+ tools=tools
190
+ )
191
+
192
+ print(f"[Coordinator] LLM stop_reason: {response.stop_reason}")
193
+ print(f"[Coordinator] Response blocks: {[b.type for b in response.content]}")
194
+
195
+ # Handle tool use - check for tool_use FIRST (prioritize over text)
196
+ tool_use_block = None
197
+ text_content = None
198
+
199
+ for block in response.content:
200
+ if block.type == "tool_use" and block.name == "ask_peer":
201
+ tool_use_block = block
202
+ elif hasattr(block, 'text'):
203
+ text_content = block.text
204
+
205
+ # If there's a tool use, execute it
206
+ if tool_use_block:
207
+ print(f"[Coordinator] LLM decided to use ask_peer tool")
208
+ peer_response = await self._execute_peer_tool(tool_use_block.input)
209
+
210
+ # Continue conversation with tool result
211
+ messages.append({"role": "assistant", "content": response.content})
212
+ messages.append({
213
+ "role": "user",
214
+ "content": [{
215
+ "type": "tool_result",
216
+ "tool_use_id": tool_use_block.id,
217
+ "content": str(peer_response)
218
+ }]
219
+ })
220
+
221
+ final_response = self.llm["client"].messages.create(
222
+ model=self.llm["model"],
223
+ max_tokens=1024,
224
+ system=system_prompt,
225
+ messages=messages
226
+ )
227
+
228
+ for final_block in final_response.content:
229
+ if hasattr(final_block, 'text'):
230
+ return final_block.text
231
+
232
+ # No tool use, return text content
233
+ if text_content:
234
+ return text_content
235
+
236
+ return "I processed your request."
237
+
238
+ def _get_tools(self) -> list:
239
+ """Get tools for LLM, including peer tools."""
240
+ return [{
241
+ "name": "ask_peer",
242
+ "description": "Ask a specialist agent for help. Use this to delegate tasks to experts.",
243
+ "input_schema": {
244
+ "type": "object",
245
+ "properties": {
246
+ "role": {
247
+ "type": "string",
248
+ "description": "Role of the agent to ask (e.g., 'analyst')"
249
+ },
250
+ "question": {
251
+ "type": "string",
252
+ "description": "The question or task for the specialist"
253
+ }
254
+ },
255
+ "required": ["role", "question"]
256
+ }
257
+ }]
258
+
259
+ async def _execute_peer_tool(self, args: dict) -> dict:
260
+ """Execute ask_peer tool."""
261
+ role = args.get("role", "")
262
+ question = args.get("question", "")
263
+
264
+ print(f"[Coordinator] Asking {role}: {question[:50]}...")
265
+
266
+ response = await self.peers.request(
267
+ role,
268
+ {"question": question},
269
+ timeout=30
270
+ )
271
+
272
+ return response
273
+
274
+ async def on_peer_request(self, msg):
275
+ """Handle incoming peer requests (for workflow compatibility)."""
276
+ query = msg.data.get("query", msg.data.get("question", ""))
277
+ result = await self.process_query(query)
278
+ return {"response": result}
279
+
280
+
281
+ # ═══════════════════════════════════════════════════════════════════════════════
282
+ # MAIN - Demonstrate cognitive discovery
283
+ # ═══════════════════════════════════════════════════════════════════════════════
284
+
285
+ async def main():
286
+ print("=" * 60)
287
+ print("LLM Cognitive Discovery Example")
288
+ print("=" * 60)
289
+
290
+ # Create mesh with both agents
291
+ mesh = Mesh(mode="p2p", config={"bind_port": 7960})
292
+
293
+ analyst = mesh.add(AnalystAgent())
294
+ coordinator = mesh.add(CoordinatorAgent())
295
+
296
+ await mesh.start()
297
+
298
+ print(f"\n[Setup] Mesh started with agents:")
299
+ print(f" - {analyst.role}: {analyst.capabilities}")
300
+ print(f" - {coordinator.role}: {coordinator.capabilities}")
301
+
302
+ # Start analyst listener in background
303
+ analyst_task = asyncio.create_task(analyst.run())
304
+
305
+ # Give time for setup
306
+ await asyncio.sleep(0.5)
307
+
308
+ # Show cognitive context that LLM will see
309
+ print("\n" + "=" * 60)
310
+ print("COGNITIVE CONTEXT (what LLM sees about peers)")
311
+ print("=" * 60)
312
+ context = coordinator.peers.get_cognitive_context()
313
+ print(context)
314
+
315
+ # Test queries - one that should trigger delegation, one that shouldn't
316
+ test_queries = [
317
+ "Please analyze the Q4 sales data and give me insights",
318
+ "What time is it?",
319
+ ]
320
+
321
+ print("\n" + "=" * 60)
322
+ print("PROCESSING QUERIES")
323
+ print("=" * 60)
324
+
325
+ for query in test_queries:
326
+ print(f"\n>>> User: {query}")
327
+ response = await coordinator.process_query(query)
328
+ print(f"<<< Coordinator: {response}")
329
+
330
+ # Cleanup
331
+ analyst.request_shutdown()
332
+ analyst_task.cancel()
333
+ try:
334
+ await analyst_task
335
+ except asyncio.CancelledError:
336
+ pass
337
+
338
+ await mesh.stop()
339
+ print("\n[Done] Example completed!")
340
+
341
+
342
+ if __name__ == "__main__":
343
+ asyncio.run(main())
@@ -14,7 +14,14 @@ Complete API documentation for JarvisCore framework components.
14
14
  - [AutoAgent](#autoagent)
15
15
  - [Custom Profile](#custom-profile)
16
16
  - [CustomAgent](#customagent)
17
- 3. [Execution Components](#execution-components)
17
+ - [ListenerAgent (v0.3.0)](#listeneragent-v030)
18
+ 3. [P2P Communication (v0.3.0)](#p2p-communication-v030)
19
+ - [PeerClient](#peerclient)
20
+ - [IncomingMessage](#incomingmessage)
21
+ - [Cognitive Discovery](#cognitive-discovery)
22
+ 4. [Integrations (v0.3.0)](#integrations-v030)
23
+ - [JarvisLifespan](#jarvislifespan)
24
+ 5. [Execution Components](#execution-components)
18
25
  - [CodeGenerator](#codegenerator)
19
26
  - [SandboxExecutor](#sandboxexecutor)
20
27
  - [AutoRepair](#autorepair)
@@ -557,6 +564,292 @@ See [CustomAgent Guide](CUSTOMAGENT_GUIDE.md) for P2P and distributed mode detai
557
564
 
558
565
  ---
559
566
 
567
+ ### ListenerAgent (v0.3.0)
568
+
569
+ Handler-based agent for P2P communication without manual run loops.
570
+
571
+ #### Class: `ListenerAgent(CustomAgent)`
572
+
573
+ ```python
574
+ from jarviscore.profiles import ListenerAgent
575
+
576
+ class MyAgent(ListenerAgent):
577
+ role = "processor"
578
+ capabilities = ["processing"]
579
+
580
+ async def on_peer_request(self, msg):
581
+ """Handle incoming requests from peers."""
582
+ return {"result": msg.data.get("task", "").upper()}
583
+
584
+ async def on_peer_notify(self, msg):
585
+ """Handle broadcast notifications."""
586
+ print(f"Notification: {msg.data}")
587
+ ```
588
+
589
+ **Class Attributes:**
590
+ - `role` (str): Agent role identifier (required)
591
+ - `capabilities` (list): List of capability strings (required)
592
+
593
+ **Handler Methods:**
594
+
595
+ #### `async on_peer_request(msg) -> dict`
596
+
597
+ Handle incoming request messages. Return value is sent back to requester.
598
+
599
+ ```python
600
+ async def on_peer_request(self, msg):
601
+ query = msg.data.get("question", "")
602
+ result = self.process(query)
603
+ return {"response": result, "status": "success"}
604
+ ```
605
+
606
+ **Parameters:**
607
+ - `msg` (IncomingMessage): Incoming message with `data`, `sender_role`, `sender_id`
608
+
609
+ **Returns:** dict - Response sent back to requester
610
+
611
+ ---
612
+
613
+ #### `async on_peer_notify(msg) -> None`
614
+
615
+ Handle broadcast notifications. No return value needed.
616
+
617
+ ```python
618
+ async def on_peer_notify(self, msg):
619
+ event_type = msg.data.get("type")
620
+ if event_type == "status_update":
621
+ self.handle_status(msg.data)
622
+ ```
623
+
624
+ **Parameters:**
625
+ - `msg` (IncomingMessage): Incoming notification message
626
+
627
+ **Returns:** None
628
+
629
+ ---
630
+
631
+ **Self-Registration Methods (v0.3.0):**
632
+
633
+ #### `async join_mesh(seed_nodes, advertise_endpoint=None)`
634
+
635
+ Join an existing mesh without central orchestrator.
636
+
637
+ ```python
638
+ await agent.join_mesh(
639
+ seed_nodes="10.0.0.1:7950,10.0.0.2:7950",
640
+ advertise_endpoint="my-pod:7950"
641
+ )
642
+ ```
643
+
644
+ **Parameters:**
645
+ - `seed_nodes` (str): Comma-separated list of seed node addresses
646
+ - `advertise_endpoint` (str, optional): Address for other nodes to reach this agent
647
+
648
+ ---
649
+
650
+ #### `async leave_mesh()`
651
+
652
+ Gracefully leave the mesh network.
653
+
654
+ ```python
655
+ await agent.leave_mesh()
656
+ ```
657
+
658
+ ---
659
+
660
+ #### `async serve_forever()`
661
+
662
+ Run the agent until shutdown signal.
663
+
664
+ ```python
665
+ await agent.serve_forever()
666
+ ```
667
+
668
+ ---
669
+
670
+ ## P2P Communication (v0.3.0)
671
+
672
+ ### PeerClient
673
+
674
+ Client for peer-to-peer communication, available as `self.peers` on agents.
675
+
676
+ #### Class: `PeerClient`
677
+
678
+ **Methods:**
679
+
680
+ #### `get_cognitive_context() -> str`
681
+
682
+ Generate LLM-ready text describing available peers.
683
+
684
+ ```python
685
+ if self.peers:
686
+ context = self.peers.get_cognitive_context()
687
+ # Returns:
688
+ # "Available Peers:
689
+ # - analyst (capabilities: analysis, data_interpretation)
690
+ # Use ask_peer with role="analyst" for analysis tasks
691
+ # - researcher (capabilities: research, web_search)
692
+ # Use ask_peer with role="researcher" for research tasks"
693
+ ```
694
+
695
+ **Returns:** str - Human-readable peer descriptions for LLM prompts
696
+
697
+ ---
698
+
699
+ #### `list() -> List[PeerInfo]`
700
+
701
+ Get list of connected peers.
702
+
703
+ ```python
704
+ peers = self.peers.list()
705
+ for peer in peers:
706
+ print(f"{peer.role}: {peer.capabilities}")
707
+ ```
708
+
709
+ **Returns:** List of PeerInfo objects
710
+
711
+ ---
712
+
713
+ #### `as_tool() -> PeerTool`
714
+
715
+ Get peer tools for LLM tool use.
716
+
717
+ ```python
718
+ tools = self.peers.as_tool()
719
+ result = await tools.execute("ask_peer", {"role": "analyst", "question": "..."})
720
+ ```
721
+
722
+ **Available Tools:**
723
+ - `ask_peer` - Send request and wait for response
724
+ - `broadcast` - Send notification to all peers
725
+ - `list_peers` - List available peers
726
+
727
+ ---
728
+
729
+ #### `async receive(timeout) -> IncomingMessage`
730
+
731
+ Receive next message (for CustomAgent manual loops).
732
+
733
+ ```python
734
+ msg = await self.peers.receive(timeout=0.5)
735
+ if msg and msg.is_request:
736
+ await self.peers.respond(msg, {"result": "..."})
737
+ ```
738
+
739
+ ---
740
+
741
+ #### `async respond(msg, data) -> None`
742
+
743
+ Respond to a request message.
744
+
745
+ ```python
746
+ await self.peers.respond(msg, {"status": "success", "result": data})
747
+ ```
748
+
749
+ ---
750
+
751
+ ### IncomingMessage
752
+
753
+ Message received from a peer.
754
+
755
+ #### Class: `IncomingMessage`
756
+
757
+ **Attributes:**
758
+ - `data` (dict): Message payload
759
+ - `sender_role` (str): Role of sending agent
760
+ - `sender_id` (str): ID of sending agent
761
+ - `is_request` (bool): True if this is a request expecting response
762
+ - `is_notify` (bool): True if this is a notification
763
+
764
+ ```python
765
+ async def on_peer_request(self, msg):
766
+ print(f"From: {msg.sender_role}")
767
+ print(f"Data: {msg.data}")
768
+ return {"received": True}
769
+ ```
770
+
771
+ ---
772
+
773
+ ### Cognitive Discovery
774
+
775
+ Dynamic peer awareness for LLM prompts.
776
+
777
+ **Pattern:**
778
+
779
+ ```python
780
+ class MyAgent(CustomAgent):
781
+ def get_system_prompt(self) -> str:
782
+ base = "You are a helpful assistant."
783
+
784
+ # Dynamically add peer context
785
+ if self.peers:
786
+ peer_context = self.peers.get_cognitive_context()
787
+ return f"{base}\n\n{peer_context}"
788
+
789
+ return base
790
+ ```
791
+
792
+ **Benefits:**
793
+ - No hardcoded agent names in prompts
794
+ - Automatically updates when peers join/leave
795
+ - LLM always knows current capabilities
796
+
797
+ ---
798
+
799
+ ## Integrations (v0.3.0)
800
+
801
+ ### JarvisLifespan
802
+
803
+ FastAPI lifespan context manager for automatic agent lifecycle management.
804
+
805
+ #### Class: `JarvisLifespan`
806
+
807
+ ```python
808
+ from jarviscore.integrations.fastapi import JarvisLifespan
809
+
810
+ app = FastAPI(lifespan=JarvisLifespan(agent, mode="p2p"))
811
+ ```
812
+
813
+ **Parameters:**
814
+ - `agent`: Agent instance (ListenerAgent or CustomAgent)
815
+ - `mode` (str): "p2p" or "distributed"
816
+ - `bind_port` (int, optional): P2P port (default: 7950)
817
+ - `seed_nodes` (str, optional): Comma-separated seed node addresses
818
+
819
+ **Example:**
820
+
821
+ ```python
822
+ from fastapi import FastAPI
823
+ from jarviscore.profiles import ListenerAgent
824
+ from jarviscore.integrations.fastapi import JarvisLifespan
825
+
826
+
827
+ class ProcessorAgent(ListenerAgent):
828
+ role = "processor"
829
+ capabilities = ["processing"]
830
+
831
+ async def on_peer_request(self, msg):
832
+ return {"result": "processed"}
833
+
834
+
835
+ agent = ProcessorAgent()
836
+ app = FastAPI(lifespan=JarvisLifespan(agent, mode="p2p", bind_port=7950))
837
+
838
+
839
+ @app.post("/process")
840
+ async def process(data: dict):
841
+ # Agent is already running and connected to mesh
842
+ return {"status": "ok"}
843
+ ```
844
+
845
+ **Handles:**
846
+ - Agent setup and teardown
847
+ - Mesh initialization
848
+ - Background run loop (for ListenerAgent)
849
+ - Graceful shutdown
850
+
851
+ ---
852
+
560
853
  ## Execution Components
561
854
 
562
855
  ### CodeGenerator
@@ -1140,6 +1433,6 @@ async def execute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
1140
1433
 
1141
1434
  ## Version
1142
1435
 
1143
- API Reference for JarvisCore v0.2.1
1436
+ API Reference for JarvisCore v0.3.0
1144
1437
 
1145
- Last Updated: 2026-01-23
1438
+ Last Updated: 2026-01-29