jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/customagent_cognitive_discovery_example.py +343 -0
  3. examples/fastapi_integration_example.py +570 -0
  4. jarviscore/__init__.py +19 -5
  5. jarviscore/cli/smoketest.py +8 -4
  6. jarviscore/core/agent.py +227 -0
  7. jarviscore/core/mesh.py +9 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  9. jarviscore/data/examples/custom_profile_decorator.py +134 -0
  10. jarviscore/data/examples/custom_profile_wrap.py +168 -0
  11. jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
  12. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  13. jarviscore/docs/API_REFERENCE.md +283 -3
  14. jarviscore/docs/CHANGELOG.md +139 -0
  15. jarviscore/docs/CONFIGURATION.md +1 -1
  16. jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
  17. jarviscore/docs/GETTING_STARTED.md +228 -267
  18. jarviscore/docs/TROUBLESHOOTING.md +1 -1
  19. jarviscore/docs/USER_GUIDE.md +153 -8
  20. jarviscore/integrations/__init__.py +16 -0
  21. jarviscore/integrations/fastapi.py +247 -0
  22. jarviscore/p2p/broadcaster.py +10 -3
  23. jarviscore/p2p/coordinator.py +310 -14
  24. jarviscore/p2p/keepalive.py +45 -23
  25. jarviscore/p2p/peer_client.py +311 -12
  26. jarviscore/p2p/swim_manager.py +9 -4
  27. jarviscore/profiles/__init__.py +7 -1
  28. jarviscore/profiles/customagent.py +295 -74
  29. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
  30. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
  31. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
  32. tests/test_13_dx_improvements.py +554 -0
  33. tests/test_14_cloud_deployment.py +403 -0
  34. tests/test_15_llm_cognitive_discovery.py +684 -0
  35. tests/test_16_unified_dx_flow.py +947 -0
  36. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
  37. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,162 @@
1
+ """
2
+ Cloud Deployment Example (v0.3.0)
3
+
4
+ Demonstrates agent self-registration with join_mesh() and leave_mesh().
5
+ Agents join an existing mesh independently - no central orchestrator needed.
6
+
7
+ This is the pattern for:
8
+ - Docker containers where each container runs one agent
9
+ - Kubernetes pods with auto-scaling
10
+ - Cloud Functions / Lambda
11
+ - Any distributed deployment where agents start independently
12
+
13
+ Usage:
14
+ # Terminal 1: Start a mesh (or use an existing one)
15
+ python examples/customagent_p2p_example.py
16
+
17
+ # Terminal 2: Run standalone agent that joins the mesh
18
+ JARVISCORE_SEED_NODES=127.0.0.1:7946 python examples/cloud_deployment_example.py
19
+
20
+ Environment Variables:
21
+ JARVISCORE_SEED_NODES: Comma-separated seed nodes (e.g., "host1:7946,host2:7946")
22
+ JARVISCORE_MESH_ENDPOINT: Single mesh endpoint (alternative to seed_nodes)
23
+ """
24
+ import asyncio
25
+ import os
26
+ import signal
27
+ import sys
28
+
29
+ sys.path.insert(0, '.')
30
+
31
+ from jarviscore.profiles import CustomAgent
32
+
33
+
34
+ class StandaloneProcessor(CustomAgent):
35
+ """
36
+ Example standalone agent that joins mesh independently.
37
+
38
+ This agent:
39
+ - Self-registers with the mesh on startup
40
+ - Listens for peer requests
41
+ - Shows its view of the mesh (cognitive context)
42
+ - Gracefully leaves mesh on shutdown
43
+ """
44
+
45
+ role = "standalone_processor"
46
+ capabilities = ["standalone", "processing", "example"]
47
+ description = "Processes requests from other mesh agents (standalone deployment)"
48
+
49
+ async def on_peer_request(self, msg):
50
+ """Handle incoming requests from other agents."""
51
+ print(f"\n[{self.role}] Received request from {msg.sender}:")
52
+ print(f" Data: {msg.data}")
53
+
54
+ # Process the request
55
+ task = msg.data.get("task", "")
56
+ result = {
57
+ "status": "success",
58
+ "output": f"Processed: {task}",
59
+ "agent_id": self.agent_id,
60
+ "processed_by": self.role
61
+ }
62
+
63
+ print(f"[{self.role}] Sending response: {result}")
64
+ return result
65
+
66
+ async def on_peer_notify(self, msg):
67
+ """Handle incoming notifications from other agents."""
68
+ print(f"\n[{self.role}] Received notification from {msg.sender}:")
69
+ print(f" Event: {msg.data.get('event', 'unknown')}")
70
+ print(f" Data: {msg.data}")
71
+
72
+
73
+ async def main():
74
+ print("=" * 60)
75
+ print("Standalone Agent Example - Cloud Deployment Pattern")
76
+ print("=" * 60)
77
+
78
+ # Check for mesh connection info
79
+ endpoint = os.environ.get("JARVISCORE_MESH_ENDPOINT")
80
+ seed_nodes = os.environ.get("JARVISCORE_SEED_NODES")
81
+
82
+ if not endpoint and not seed_nodes:
83
+ print("\nNo mesh endpoint configured!")
84
+ print("\nSet one of:")
85
+ print(" - JARVISCORE_MESH_ENDPOINT (single endpoint)")
86
+ print(" - JARVISCORE_SEED_NODES (comma-separated list)")
87
+ print("\nExample:")
88
+ print(" JARVISCORE_SEED_NODES=127.0.0.1:7946 python cloud_deployment_example.py")
89
+ print("\nTo start a mesh first, run:")
90
+ print(" python examples/customagent_p2p_example.py")
91
+ return
92
+
93
+ print(f"\nConnecting to mesh via: {endpoint or seed_nodes}")
94
+
95
+ # Create agent
96
+ agent = StandaloneProcessor()
97
+
98
+ # Join the mesh
99
+ print(f"\nJoining mesh...")
100
+ try:
101
+ await agent.join_mesh()
102
+ except Exception as e:
103
+ print(f"Failed to join mesh: {e}")
104
+ return
105
+
106
+ print(f"\nSuccessfully joined mesh!")
107
+ print(f" Agent ID: {agent.agent_id}")
108
+ print(f" Role: {agent.role}")
109
+ print(f" Capabilities: {agent.capabilities}")
110
+
111
+ # Show discovered peers
112
+ print(f"\n--- Discovered Peers ---")
113
+ peers = agent.peers.list_peers()
114
+ if peers:
115
+ for p in peers:
116
+ location = f" ({p.get('location', 'unknown')})" if 'location' in p else ""
117
+ print(f" - {p['role']}: {p['capabilities']}{location}")
118
+ else:
119
+ print(" No other peers discovered yet")
120
+
121
+ # Show cognitive context (what an LLM would see)
122
+ print(f"\n--- Cognitive Context for LLM ---")
123
+ print(agent.peers.get_cognitive_context())
124
+
125
+ # Setup graceful shutdown
126
+ shutdown_event = asyncio.Event()
127
+
128
+ def signal_handler():
129
+ print("\n\nShutdown requested (Ctrl+C)...")
130
+ agent.request_shutdown()
131
+ shutdown_event.set()
132
+
133
+ # Register signal handlers
134
+ loop = asyncio.get_event_loop()
135
+ for sig in (signal.SIGINT, signal.SIGTERM):
136
+ try:
137
+ loop.add_signal_handler(sig, signal_handler)
138
+ except NotImplementedError:
139
+ # Windows doesn't support add_signal_handler
140
+ pass
141
+
142
+ print(f"\n--- Agent Running ---")
143
+ print("Listening for peer requests...")
144
+ print("Press Ctrl+C to stop.\n")
145
+
146
+ # Run agent (CustomAgent's run() handles the message loop)
147
+ try:
148
+ await agent.run()
149
+ except asyncio.CancelledError:
150
+ pass
151
+
152
+ # Leave mesh gracefully
153
+ print("\nLeaving mesh...")
154
+ await agent.leave_mesh()
155
+ print("Goodbye!")
156
+
157
+
158
+ if __name__ == "__main__":
159
+ try:
160
+ asyncio.run(main())
161
+ except KeyboardInterrupt:
162
+ print("\nInterrupted.")
@@ -0,0 +1,343 @@
1
+ """
2
+ CustomAgent + Cognitive Discovery Example
3
+
4
+ Demonstrates two v0.3.0 features:
5
+
6
+ 1. CustomAgent - Handler-based P2P agents (no run() loop needed)
7
+ - on_peer_request() handles incoming requests
8
+ - on_peer_notify() handles broadcast notifications
9
+
10
+ 2. Cognitive Discovery - Dynamic peer awareness for LLMs
11
+ - get_cognitive_context() generates LLM-ready peer descriptions
12
+ - No hardcoded agent names in prompts
13
+ - LLM autonomously decides when to delegate
14
+
15
+ Usage:
16
+ python examples/listeneragent_cognitive_discovery_example.py
17
+
18
+ Prerequisites:
19
+ - .env file with CLAUDE_API_KEY (or other LLM provider)
20
+ """
21
+ import asyncio
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ sys.path.insert(0, str(Path(__file__).parent.parent))
26
+
27
+ from jarviscore import Mesh
28
+ from jarviscore.profiles import CustomAgent
29
+
30
+
31
+ # ═══════════════════════════════════════════════════════════════════════════════
32
+ # SPECIALIST AGENT - Responds to requests from other agents
33
+ # ═══════════════════════════════════════════════════════════════════════════════
34
+
35
+ class AnalystAgent(CustomAgent):
36
+ """
37
+ Specialist agent that handles analysis requests.
38
+
39
+ Uses CustomAgent profile - just implement handlers, no run() loop needed.
40
+ """
41
+ role = "analyst"
42
+ capabilities = ["data_analysis", "statistics", "insights"]
43
+ description = "Analyzes data and provides statistical insights"
44
+
45
+ async def on_peer_request(self, msg):
46
+ """Handle incoming analysis requests."""
47
+ query = msg.data.get("question", msg.data.get("query", ""))
48
+ print(f"\n[Analyst] Received request: {query[:50]}...")
49
+
50
+ # Simulate analysis (in real usage, this would use an LLM)
51
+ result = {
52
+ "analysis": f"Analysis of '{query}': The data shows positive trends.",
53
+ "confidence": 0.85,
54
+ "insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"]
55
+ }
56
+
57
+ print(f"[Analyst] Sending response with {len(result['insights'])} insights")
58
+ return result
59
+
60
+
61
+ # ═══════════════════════════════════════════════════════════════════════════════
62
+ # COORDINATOR AGENT - Uses LLM with cognitive discovery
63
+ # ═══════════════════════════════════════════════════════════════════════════════
64
+
65
+ class CoordinatorAgent(CustomAgent):
66
+ """
67
+ Coordinator agent that uses LLM with dynamic peer discovery.
68
+
69
+ Key pattern:
70
+ 1. Uses get_cognitive_context() to learn about available peers
71
+ 2. Injects peer context into LLM system prompt
72
+ 3. LLM decides when to delegate to specialists
73
+ """
74
+ role = "coordinator"
75
+ capabilities = ["coordination", "delegation", "chat"]
76
+ description = "Coordinates tasks and delegates to specialists"
77
+
78
+ async def setup(self):
79
+ await super().setup()
80
+ self.llm = self._create_llm_client()
81
+
82
+ def _create_llm_client(self):
83
+ """Create LLM client with fallback to mock."""
84
+ try:
85
+ from anthropic import Anthropic
86
+ from jarviscore.config import settings
87
+ import os
88
+
89
+ api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
90
+ if not api_key:
91
+ raise RuntimeError("No API key")
92
+
93
+ # Check for custom endpoint (e.g., Azure-hosted Claude)
94
+ endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
95
+ model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
96
+
97
+ if endpoint:
98
+ client = Anthropic(api_key=api_key, base_url=endpoint)
99
+ else:
100
+ client = Anthropic(api_key=api_key)
101
+
102
+ # Test the API key with a minimal request
103
+ try:
104
+ client.messages.create(
105
+ model=model,
106
+ max_tokens=10,
107
+ messages=[{"role": "user", "content": "Hi"}]
108
+ )
109
+ except Exception as e:
110
+ raise RuntimeError(f"API key validation failed: {e}")
111
+
112
+ print(f"[Coordinator] LLM initialized: {model}")
113
+ return {"client": client, "model": model, "available": True}
114
+ except Exception as e:
115
+ print(f"[Coordinator] LLM not available ({e}), using mock responses")
116
+ return {"available": False}
117
+
118
+ def _build_dynamic_prompt(self, base_prompt: str) -> str:
119
+ """
120
+ Build system prompt with dynamic peer awareness.
121
+
122
+ THIS IS THE KEY PATTERN - the LLM learns about peers dynamically!
123
+ """
124
+ if not self.peers:
125
+ return base_prompt
126
+
127
+ # Use get_cognitive_context() for dynamic peer discovery
128
+ peer_context = self.peers.get_cognitive_context(
129
+ format="markdown",
130
+ include_capabilities=True,
131
+ include_description=True,
132
+ tool_name="ask_peer"
133
+ )
134
+
135
+ return f"{base_prompt}\n\n{peer_context}"
136
+
137
+ async def process_query(self, user_query: str) -> str:
138
+ """
139
+ Process a user query using LLM with peer awareness.
140
+
141
+ The LLM sees available peers and can decide to delegate.
142
+ """
143
+ base_prompt = """You are a coordinator assistant that delegates tasks to specialists.
144
+
145
+ IMPORTANT: You MUST use the ask_peer tool to delegate to specialists. You cannot perform analysis yourself.
146
+
147
+ When a user asks for data analysis, statistics, or insights:
148
+ 1. Use the ask_peer tool with role="analyst"
149
+ 2. Pass their question to the analyst
150
+ 3. Report the analyst's findings
151
+
152
+ Never try to do analysis yourself - always delegate to the analyst."""
153
+
154
+ # Build prompt with dynamic peer discovery
155
+ system_prompt = self._build_dynamic_prompt(base_prompt)
156
+
157
+ print(f"\n[Coordinator] System prompt includes peer context:")
158
+ print("-" * 40)
159
+ # Show just the peer context part
160
+ if "AVAILABLE MESH PEERS" in system_prompt:
161
+ peer_section = system_prompt.split("AVAILABLE MESH PEERS")[1][:200]
162
+ print(f"...AVAILABLE MESH PEERS{peer_section}...")
163
+ print("-" * 40)
164
+
165
+ # Check if LLM is available
166
+ if not self.llm.get("available"):
167
+ # Mock: simulate LLM deciding to delegate
168
+ if any(word in user_query.lower() for word in ["analyze", "analysis", "statistics", "data"]):
169
+ print("[Coordinator] Mock LLM decides to delegate to analyst")
170
+ response = await self.peers.request(
171
+ "analyst",
172
+ {"question": user_query},
173
+ timeout=30
174
+ )
175
+ return f"Based on the analyst's findings: {response.get('analysis', 'No response')}"
176
+ return f"I can help with: {user_query}"
177
+
178
+ # Real LLM call with tools
179
+ tools = self._get_tools()
180
+ messages = [{"role": "user", "content": user_query}]
181
+
182
+ print(f"[Coordinator] Calling LLM with {len(tools)} tools: {[t['name'] for t in tools]}")
183
+
184
+ response = self.llm["client"].messages.create(
185
+ model=self.llm["model"],
186
+ max_tokens=1024,
187
+ system=system_prompt,
188
+ messages=messages,
189
+ tools=tools
190
+ )
191
+
192
+ print(f"[Coordinator] LLM stop_reason: {response.stop_reason}")
193
+ print(f"[Coordinator] Response blocks: {[b.type for b in response.content]}")
194
+
195
+ # Handle tool use - check for tool_use FIRST (prioritize over text)
196
+ tool_use_block = None
197
+ text_content = None
198
+
199
+ for block in response.content:
200
+ if block.type == "tool_use" and block.name == "ask_peer":
201
+ tool_use_block = block
202
+ elif hasattr(block, 'text'):
203
+ text_content = block.text
204
+
205
+ # If there's a tool use, execute it
206
+ if tool_use_block:
207
+ print(f"[Coordinator] LLM decided to use ask_peer tool")
208
+ peer_response = await self._execute_peer_tool(tool_use_block.input)
209
+
210
+ # Continue conversation with tool result
211
+ messages.append({"role": "assistant", "content": response.content})
212
+ messages.append({
213
+ "role": "user",
214
+ "content": [{
215
+ "type": "tool_result",
216
+ "tool_use_id": tool_use_block.id,
217
+ "content": str(peer_response)
218
+ }]
219
+ })
220
+
221
+ final_response = self.llm["client"].messages.create(
222
+ model=self.llm["model"],
223
+ max_tokens=1024,
224
+ system=system_prompt,
225
+ messages=messages
226
+ )
227
+
228
+ for final_block in final_response.content:
229
+ if hasattr(final_block, 'text'):
230
+ return final_block.text
231
+
232
+ # No tool use, return text content
233
+ if text_content:
234
+ return text_content
235
+
236
+ return "I processed your request."
237
+
238
+ def _get_tools(self) -> list:
239
+ """Get tools for LLM, including peer tools."""
240
+ return [{
241
+ "name": "ask_peer",
242
+ "description": "Ask a specialist agent for help. Use this to delegate tasks to experts.",
243
+ "input_schema": {
244
+ "type": "object",
245
+ "properties": {
246
+ "role": {
247
+ "type": "string",
248
+ "description": "Role of the agent to ask (e.g., 'analyst')"
249
+ },
250
+ "question": {
251
+ "type": "string",
252
+ "description": "The question or task for the specialist"
253
+ }
254
+ },
255
+ "required": ["role", "question"]
256
+ }
257
+ }]
258
+
259
+ async def _execute_peer_tool(self, args: dict) -> dict:
260
+ """Execute ask_peer tool."""
261
+ role = args.get("role", "")
262
+ question = args.get("question", "")
263
+
264
+ print(f"[Coordinator] Asking {role}: {question[:50]}...")
265
+
266
+ response = await self.peers.request(
267
+ role,
268
+ {"question": question},
269
+ timeout=30
270
+ )
271
+
272
+ return response
273
+
274
+ async def on_peer_request(self, msg):
275
+ """Handle incoming peer requests (for workflow compatibility)."""
276
+ query = msg.data.get("query", msg.data.get("question", ""))
277
+ result = await self.process_query(query)
278
+ return {"response": result}
279
+
280
+
281
+ # ═══════════════════════════════════════════════════════════════════════════════
282
+ # MAIN - Demonstrate cognitive discovery
283
+ # ═══════════════════════════════════════════════════════════════════════════════
284
+
285
+ async def main():
286
+ print("=" * 60)
287
+ print("LLM Cognitive Discovery Example")
288
+ print("=" * 60)
289
+
290
+ # Create mesh with both agents
291
+ mesh = Mesh(mode="p2p", config={"bind_port": 7960})
292
+
293
+ analyst = mesh.add(AnalystAgent())
294
+ coordinator = mesh.add(CoordinatorAgent())
295
+
296
+ await mesh.start()
297
+
298
+ print(f"\n[Setup] Mesh started with agents:")
299
+ print(f" - {analyst.role}: {analyst.capabilities}")
300
+ print(f" - {coordinator.role}: {coordinator.capabilities}")
301
+
302
+ # Start analyst listener in background
303
+ analyst_task = asyncio.create_task(analyst.run())
304
+
305
+ # Give time for setup
306
+ await asyncio.sleep(0.5)
307
+
308
+ # Show cognitive context that LLM will see
309
+ print("\n" + "=" * 60)
310
+ print("COGNITIVE CONTEXT (what LLM sees about peers)")
311
+ print("=" * 60)
312
+ context = coordinator.peers.get_cognitive_context()
313
+ print(context)
314
+
315
+ # Test queries - one that should trigger delegation, one that shouldn't
316
+ test_queries = [
317
+ "Please analyze the Q4 sales data and give me insights",
318
+ "What time is it?",
319
+ ]
320
+
321
+ print("\n" + "=" * 60)
322
+ print("PROCESSING QUERIES")
323
+ print("=" * 60)
324
+
325
+ for query in test_queries:
326
+ print(f"\n>>> User: {query}")
327
+ response = await coordinator.process_query(query)
328
+ print(f"<<< Coordinator: {response}")
329
+
330
+ # Cleanup
331
+ analyst.request_shutdown()
332
+ analyst_task.cancel()
333
+ try:
334
+ await analyst_task
335
+ except asyncio.CancelledError:
336
+ pass
337
+
338
+ await mesh.stop()
339
+ print("\n[Done] Example completed!")
340
+
341
+
342
+ if __name__ == "__main__":
343
+ asyncio.run(main())