jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. examples/cloud_deployment_example.py +162 -0
  2. examples/customagent_cognitive_discovery_example.py +343 -0
  3. examples/fastapi_integration_example.py +570 -0
  4. jarviscore/__init__.py +19 -5
  5. jarviscore/cli/smoketest.py +8 -4
  6. jarviscore/core/agent.py +227 -0
  7. jarviscore/core/mesh.py +9 -0
  8. jarviscore/data/examples/cloud_deployment_example.py +162 -0
  9. jarviscore/data/examples/custom_profile_decorator.py +134 -0
  10. jarviscore/data/examples/custom_profile_wrap.py +168 -0
  11. jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
  12. jarviscore/data/examples/fastapi_integration_example.py +570 -0
  13. jarviscore/docs/API_REFERENCE.md +283 -3
  14. jarviscore/docs/CHANGELOG.md +139 -0
  15. jarviscore/docs/CONFIGURATION.md +1 -1
  16. jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
  17. jarviscore/docs/GETTING_STARTED.md +228 -267
  18. jarviscore/docs/TROUBLESHOOTING.md +1 -1
  19. jarviscore/docs/USER_GUIDE.md +153 -8
  20. jarviscore/integrations/__init__.py +16 -0
  21. jarviscore/integrations/fastapi.py +247 -0
  22. jarviscore/p2p/broadcaster.py +10 -3
  23. jarviscore/p2p/coordinator.py +310 -14
  24. jarviscore/p2p/keepalive.py +45 -23
  25. jarviscore/p2p/peer_client.py +311 -12
  26. jarviscore/p2p/swim_manager.py +9 -4
  27. jarviscore/profiles/__init__.py +7 -1
  28. jarviscore/profiles/customagent.py +295 -74
  29. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
  30. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
  31. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
  32. tests/test_13_dx_improvements.py +554 -0
  33. tests/test_14_cloud_deployment.py +403 -0
  34. tests/test_15_llm_cognitive_discovery.py +684 -0
  35. tests/test_16_unified_dx_flow.py +947 -0
  36. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
  37. {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,570 @@
1
+ """
2
+ FastAPI Integration Example (v0.3.0)
3
+
4
+ Demonstrates JarvisLifespan for 3-line FastAPI integration with autonomous agents.
5
+
6
+ Features shown:
7
+ 1. JarvisLifespan - Automatic agent lifecycle management
8
+ 2. CustomAgent - API-first agents with on_peer_request handlers
9
+ 3. Cognitive Discovery - get_cognitive_context() for LLM awareness
10
+ 4. Autonomous Agents - Each agent has MESH as a TOOL, LLM decides when to delegate
11
+
12
+ Real-World Flow:
13
+ HTTP Request → Agent A (with LLM) → LLM sees peers as tools
14
+ → LLM decides to ask Agent B → Agent B responds → HTTP Response
15
+
16
+ Usage:
17
+ # Start FastAPI server with all agents
18
+ python examples/fastapi_integration_example.py
19
+
20
+ # Test the endpoint
21
+ curl -X POST http://localhost:8000/chat \
22
+ -H "Content-Type: application/json" \
23
+ -d '{"message": "Analyze the Q4 sales trends"}'
24
+
25
+ # Optional: Start a standalone agent that joins the mesh (in another terminal)
26
+ python examples/fastapi_integration_example.py --join-as scout
27
+
28
+ Prerequisites:
29
+ - pip install fastapi uvicorn
30
+ - .env file with CLAUDE_API_KEY
31
+ """
32
+ import asyncio
33
+ import sys
34
+ import os
35
+ from pathlib import Path
36
+
37
+ sys.path.insert(0, str(Path(__file__).parent.parent))
38
+
39
+ try:
40
+ from fastapi import FastAPI, Request
41
+ from fastapi.responses import JSONResponse
42
+ import uvicorn
43
+ FASTAPI_AVAILABLE = True
44
+ except ImportError:
45
+ FASTAPI_AVAILABLE = False
46
+ print("FastAPI not installed. Run: pip install fastapi uvicorn")
47
+
48
+ from jarviscore.profiles import CustomAgent
49
+
50
+
51
+ # ═══════════════════════════════════════════════════════════════════════════════
52
+ # LLM-POWERED AGENT BASE - Each agent can discover and delegate
53
+ # ═══════════════════════════════════════════════════════════════════════════════
54
+
55
+ class LLMAgent(CustomAgent):
56
+ """
57
+ Base for LLM-powered agents that can discover and delegate to peers.
58
+
59
+ KEY PATTERN: The mesh is a TOOL for the LLM.
60
+ - get_cognitive_context() tells LLM who's available
61
+ - ask_peer tool lets LLM delegate to specialists
62
+ - Each agent is autonomous - no central coordinator needed
63
+ """
64
+
65
+ async def setup(self):
66
+ await super().setup()
67
+ self.llm = self._create_llm_client()
68
+
69
+ def _create_llm_client(self):
70
+ """Create LLM client."""
71
+ try:
72
+ from anthropic import Anthropic
73
+ from jarviscore.config import settings
74
+
75
+ api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
76
+ if not api_key:
77
+ return None
78
+
79
+ endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
80
+ model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
81
+
82
+ client = Anthropic(api_key=api_key, base_url=endpoint) if endpoint else Anthropic(api_key=api_key)
83
+
84
+ # Validate
85
+ client.messages.create(model=model, max_tokens=10, messages=[{"role": "user", "content": "Hi"}])
86
+ print(f"[{self.role}] LLM initialized: {model}")
87
+ return {"client": client, "model": model}
88
+ except Exception as e:
89
+ print(f"[{self.role}] LLM not available: {e}")
90
+ return None
91
+
92
+ def _get_tools(self):
93
+ """Get tools for LLM - includes ask_peer for mesh communication."""
94
+ return [{
95
+ "name": "ask_peer",
96
+ "description": "Ask another agent in the mesh for help. Use this to delegate tasks to specialists.",
97
+ "input_schema": {
98
+ "type": "object",
99
+ "properties": {
100
+ "role": {"type": "string", "description": "Role of the agent to ask (e.g., 'analyst', 'researcher')"},
101
+ "question": {"type": "string", "description": "The question or task for that agent"}
102
+ },
103
+ "required": ["role", "question"]
104
+ }
105
+ }]
106
+
107
+ async def _ask_peer(self, role: str, question: str) -> dict:
108
+ """Execute ask_peer tool - send request to another agent."""
109
+ print(f"[{self.role}] Asking {role}: {question[:50]}...")
110
+ response = await self.peers.request(role, {"question": question}, timeout=30)
111
+ print(f"[{self.role}] Got response from {role}")
112
+ return response
113
+
114
+ async def chat(self, message: str) -> dict:
115
+ """
116
+ Process a message with LLM that can discover and delegate to peers.
117
+
118
+ This is the CORE PATTERN:
119
+ 1. Build system prompt with WHO I AM + WHO ELSE IS AVAILABLE
120
+ 2. LLM sees available peers as potential helpers
121
+ 3. LLM decides whether to handle directly or delegate
122
+ """
123
+ if not self.llm:
124
+ return await self._chat_mock(message)
125
+
126
+ # DYNAMIC DISCOVERY: Tell LLM who it is and who else is available
127
+ peer_context = self.peers.get_cognitive_context() if self.peers else ""
128
+
129
+ system_prompt = f"""{self.system_prompt}
130
+
131
+ {peer_context}"""
132
+
133
+ print(f"\n[{self.role}] Processing: {message[:50]}...")
134
+
135
+ response = self.llm["client"].messages.create(
136
+ model=self.llm["model"],
137
+ max_tokens=1024,
138
+ system=system_prompt,
139
+ messages=[{"role": "user", "content": message}],
140
+ tools=self._get_tools()
141
+ )
142
+
143
+ # Handle tool use
144
+ tool_use_block = None
145
+ text_content = None
146
+
147
+ for block in response.content:
148
+ if block.type == "tool_use" and block.name == "ask_peer":
149
+ tool_use_block = block
150
+ elif hasattr(block, 'text'):
151
+ text_content = block.text
152
+
153
+ if tool_use_block:
154
+ role = tool_use_block.input.get("role")
155
+ question = tool_use_block.input.get("question")
156
+
157
+ peer_response = await self._ask_peer(role, question)
158
+
159
+ # Continue with tool result
160
+ messages = [{"role": "user", "content": message}]
161
+ messages.append({"role": "assistant", "content": response.content})
162
+ messages.append({
163
+ "role": "user",
164
+ "content": [{"type": "tool_result", "tool_use_id": tool_use_block.id, "content": str(peer_response)}]
165
+ })
166
+
167
+ final = self.llm["client"].messages.create(
168
+ model=self.llm["model"],
169
+ max_tokens=1024,
170
+ system=system_prompt,
171
+ messages=messages
172
+ )
173
+
174
+ for block in final.content:
175
+ if hasattr(block, 'text'):
176
+ return {"response": block.text,
177
+ "delegated_to": role, "peer_data": peer_response}
178
+
179
+ return {"response": text_content or "Processed.", "delegated_to": None}
180
+
181
+ async def _chat_mock(self, message: str) -> dict:
182
+ """Mock when LLM unavailable - for testing."""
183
+ return {"response": f"[{self.role}] Received: {message}", "delegated_to": None}
184
+
185
+ # System prompt - override in subclasses
186
+ system_prompt = "You are a helpful agent."
187
+
188
+
189
+ # ═══════════════════════════════════════════════════════════════════════════════
190
+ # AUTONOMOUS AGENTS - Each has LLM + mesh discovery
191
+ # ═══════════════════════════════════════════════════════════════════════════════
192
+
193
+ class AssistantAgent(LLMAgent):
194
+ """
195
+ General assistant that can delegate to specialists.
196
+
197
+ When user asks something outside its expertise, it discovers
198
+ and delegates to the appropriate specialist via the mesh.
199
+ """
200
+ role = "assistant"
201
+ capabilities = ["chat", "general_help", "delegation"]
202
+ description = "General assistant that delegates specialized tasks to experts"
203
+
204
+ system_prompt = """You are a helpful assistant. You can answer general questions directly.
205
+
206
+ For specialized tasks, you have access to other agents via the ask_peer tool:
207
+ - For data analysis, statistics, or insights → ask the "analyst" agent
208
+ - For research or information gathering → ask the "researcher" agent
209
+
210
+ Use ask_peer when the task requires specialized expertise. Be helpful and concise."""
211
+
212
+ async def on_peer_request(self, msg):
213
+ """Handle requests from other agents."""
214
+ return await self.chat(msg.data.get("question", ""))
215
+
216
+
217
+ class AnalystAgent(LLMAgent):
218
+ """
219
+ Data analysis specialist with LLM.
220
+
221
+ Can also discover and ask other agents if needed.
222
+ """
223
+ role = "analyst"
224
+ capabilities = ["data_analysis", "statistics", "insights", "reporting"]
225
+ description = "Expert data analyst for statistics and insights"
226
+
227
+ system_prompt = """You are an expert data analyst. You specialize in:
228
+ - Statistical analysis
229
+ - Data insights and trends
230
+ - Business metrics and KPIs
231
+ - Data visualization recommendations
232
+
233
+ Provide clear, actionable insights. If you need research data, you can ask the "researcher" agent."""
234
+
235
+ async def on_peer_request(self, msg):
236
+ """Handle analysis requests."""
237
+ question = msg.data.get("question", "")
238
+ print(f"\n[Analyst] Received: {question[:50]}...")
239
+
240
+ # Analyst can use LLM to generate analysis
241
+ if self.llm:
242
+ response = self.llm["client"].messages.create(
243
+ model=self.llm["model"],
244
+ max_tokens=512,
245
+ system=self.system_prompt,
246
+ messages=[{"role": "user", "content": f"Analyze this request and provide insights: {question}"}]
247
+ )
248
+ for block in response.content:
249
+ if hasattr(block, 'text'):
250
+ return {"analysis": block.text, "confidence": 0.9}
251
+
252
+ # Fallback
253
+ return {
254
+ "analysis": f"Analysis of: {question}",
255
+ "findings": ["Revenue up 15%", "Costs down 8%", "Growth trend positive"],
256
+ "confidence": 0.85
257
+ }
258
+
259
+
260
+ class ResearcherAgent(LLMAgent):
261
+ """
262
+ Research specialist with LLM.
263
+
264
+ Can also discover and ask other agents if needed.
265
+ """
266
+ role = "researcher"
267
+ capabilities = ["research", "web_search", "fact_checking", "information_gathering"]
268
+ description = "Research specialist for gathering and verifying information"
269
+
270
+ system_prompt = """You are an expert researcher. You specialize in:
271
+ - Information gathering
272
+ - Fact checking
273
+ - Market research
274
+ - Competitive analysis
275
+
276
+ Provide well-sourced, accurate information. If you need data analysis, you can ask the "analyst" agent."""
277
+
278
+ async def on_peer_request(self, msg):
279
+ """Handle research requests."""
280
+ question = msg.data.get("question", "")
281
+ print(f"\n[Researcher] Received: {question[:50]}...")
282
+
283
+ if self.llm:
284
+ response = self.llm["client"].messages.create(
285
+ model=self.llm["model"],
286
+ max_tokens=512,
287
+ system=self.system_prompt,
288
+ messages=[{"role": "user", "content": f"Research this topic: {question}"}]
289
+ )
290
+ for block in response.content:
291
+ if hasattr(block, 'text'):
292
+ return {"research": block.text, "sources": ["Internal analysis"]}
293
+
294
+ return {
295
+ "research": f"Research on: {question}",
296
+ "sources": ["Industry Report 2024", "Market Analysis"],
297
+ "summary": "Research findings compiled"
298
+ }
299
+
300
+
301
+ # ═══════════════════════════════════════════════════════════════════════════════
302
+ # STANDALONE AGENT - Cloud Deployment Pattern
303
+ # ═══════════════════════════════════════════════════════════════════════════════
304
+
305
+ class ScoutAgent(LLMAgent):
306
+ """
307
+ Standalone agent that can join an existing mesh from anywhere.
308
+
309
+ This demonstrates the CLOUD DEPLOYMENT pattern:
310
+ - Agent runs independently (different process, container, or machine)
311
+ - Uses join_mesh() to self-register with an existing mesh
312
+ - Automatically becomes visible to all other agents
313
+ - Can discover and communicate with mesh peers
314
+ """
315
+ role = "scout"
316
+ capabilities = ["scouting", "reconnaissance", "market_intel", "trend_detection"]
317
+ description = "Scout agent that gathers market intelligence and detects trends"
318
+
319
+ system_prompt = """You are a scout agent specializing in:
320
+ - Market intelligence gathering
321
+ - Trend detection and early signals
322
+ - Competitive reconnaissance
323
+ - Opportunity identification
324
+
325
+ You can ask the "analyst" for data analysis or "researcher" for deep research."""
326
+
327
+ async def on_peer_request(self, msg):
328
+ """Handle scouting requests."""
329
+ question = msg.data.get("question", "")
330
+ print(f"\n[Scout] Received: {question[:50]}...")
331
+
332
+ if self.llm:
333
+ response = self.llm["client"].messages.create(
334
+ model=self.llm["model"],
335
+ max_tokens=512,
336
+ system=self.system_prompt,
337
+ messages=[{"role": "user", "content": f"Scout this: {question}"}]
338
+ )
339
+ for block in response.content:
340
+ if hasattr(block, 'text'):
341
+ return {"intel": block.text, "confidence": 0.85}
342
+
343
+ return {
344
+ "intel": f"Scouting report on: {question}",
345
+ "signals": ["Emerging trend detected", "Competitor activity noted"],
346
+ "confidence": 0.8
347
+ }
348
+
349
+
350
+ async def run_standalone_scout(mesh_endpoint: str):
351
+ """
352
+ Run scout as a standalone agent that joins an existing mesh.
353
+
354
+ This demonstrates the TRUE CLOUD DEPLOYMENT pattern:
355
+ - Scout runs in a SEPARATE PROCESS from the main mesh
356
+ - Uses join_mesh() to self-register with the existing mesh
357
+ - Automatically discovers all other agents
358
+ - Can communicate with mesh peers via P2P messaging
359
+
360
+ Usage:
361
+ Terminal 1: python examples/fastapi_integration_example.py (starts FastAPI + mesh)
362
+ Terminal 2: python examples/fastapi_integration_example.py --join-as scout
363
+ """
364
+ print("=" * 60)
365
+ print("STANDALONE SCOUT - Cloud Deployment Demo")
366
+ print("=" * 60)
367
+ print(f"\nJoining existing mesh at {mesh_endpoint}...")
368
+
369
+ # Create standalone scout agent
370
+ scout = ScoutAgent()
371
+
372
+ try:
373
+ # Join the existing mesh
374
+ await scout.join_mesh(seed_nodes=mesh_endpoint)
375
+ print(f"Successfully joined mesh!")
376
+ print(f" - is_mesh_connected: {scout.is_mesh_connected}")
377
+
378
+ # Wait for capability exchange to complete
379
+ await asyncio.sleep(2)
380
+
381
+ # Show what scout can see
382
+ print("\n=== CAPABILITY DISCOVERY ===")
383
+ if scout.peers:
384
+ peers = scout.peers.list_peers()
385
+ print(f"Scout discovered {len(peers)} peer(s):")
386
+ for p in peers:
387
+ print(f" - {p['role']}: {p['capabilities']}")
388
+
389
+ # Show cognitive context (what LLM would see)
390
+ print("\n=== COGNITIVE CONTEXT (for LLM) ===")
391
+ context = scout.peers.get_cognitive_context(format="text")
392
+ print(context)
393
+
394
+ # Show capability map from coordinator
395
+ if scout._standalone_p2p:
396
+ print("\n=== FULL CAPABILITY MAP ===")
397
+ cap_map = scout._standalone_p2p._capability_map
398
+ for cap, agents in cap_map.items():
399
+ print(f" {cap}: {agents}")
400
+
401
+ # Run scout's event loop - handles incoming requests
402
+ print("\n=== SCOUT RUNNING ===")
403
+ print("Scout is now active and can receive requests from other agents.")
404
+ print("Press Ctrl+C to leave mesh and exit.")
405
+
406
+ # Start scout's run loop
407
+ scout_task = asyncio.create_task(scout.run())
408
+
409
+ try:
410
+ # Keep running until interrupted
411
+ while not scout.shutdown_requested:
412
+ await asyncio.sleep(1)
413
+ except KeyboardInterrupt:
414
+ print("\n\nShutdown requested...")
415
+ finally:
416
+ scout.request_shutdown()
417
+ scout_task.cancel()
418
+ try:
419
+ await scout_task
420
+ except asyncio.CancelledError:
421
+ pass
422
+
423
+ except Exception as e:
424
+ print(f"Error joining mesh: {e}")
425
+ raise
426
+ finally:
427
+ # Gracefully leave the mesh
428
+ print("\n=== LEAVING MESH ===")
429
+ await scout.leave_mesh()
430
+ print("Scout has left the mesh.")
431
+
432
+ print("\n" + "=" * 60)
433
+ print("Standalone scout demo complete!")
434
+ print("=" * 60)
435
+
436
+
437
+ # ═══════════════════════════════════════════════════════════════════════════════
438
+ # FASTAPI APPLICATION - Minimal code with JarvisLifespan
439
+ # ═══════════════════════════════════════════════════════════════════════════════
440
+
441
+ def create_app():
442
+ """Create FastAPI app - 3 lines of JarvisCore integration."""
443
+ from jarviscore.integrations import JarvisLifespan
444
+
445
+ # Create autonomous agents
446
+ agents = [AssistantAgent(), AnalystAgent(), ResearcherAgent()]
447
+
448
+ # ONE LINE: JarvisLifespan handles everything
449
+ app = FastAPI(
450
+ title="Autonomous Agents Demo",
451
+ lifespan=JarvisLifespan(agents, mode="p2p", bind_port=7980)
452
+ )
453
+
454
+ @app.get("/")
455
+ async def root():
456
+ return {"status": "ok", "agents": ["assistant", "analyst", "researcher"]}
457
+
458
+ @app.get("/agents")
459
+ async def list_agents(request: Request):
460
+ """Show what each agent can see about others."""
461
+ result = {}
462
+ for role, agent in request.app.state.jarvis_agents.items():
463
+ if agent.peers:
464
+ result[role] = {
465
+ "can_see": [p["role"] for p in agent.peers.list_peers()],
466
+ "cognitive_context": agent.peers.get_cognitive_context(format="text")
467
+ }
468
+ return result
469
+
470
+ @app.post("/chat")
471
+ async def chat(request: Request):
472
+ """
473
+ Chat endpoint - assistant uses mesh to discover and delegate.
474
+
475
+ The assistant's LLM:
476
+ 1. Sees other agents via get_cognitive_context()
477
+ 2. Decides if it needs to delegate
478
+ 3. Uses ask_peer tool to communicate
479
+ """
480
+ body = await request.json()
481
+ message = body.get("message", "")
482
+
483
+ assistant = request.app.state.jarvis_agents.get("assistant")
484
+ if not assistant:
485
+ return JSONResponse(status_code=503, content={"error": "Assistant not available"})
486
+
487
+ result = await assistant.chat(message)
488
+ return {"message": message, **result}
489
+
490
+ @app.post("/ask/{agent_role}")
491
+ async def ask_agent(agent_role: str, request: Request):
492
+ """Ask a specific agent directly."""
493
+ body = await request.json()
494
+ message = body.get("message", "")
495
+
496
+ agent = request.app.state.jarvis_agents.get(agent_role)
497
+ if not agent:
498
+ return JSONResponse(status_code=404, content={"error": f"Agent '{agent_role}' not found"})
499
+
500
+ result = await agent.chat(message)
501
+ return {"agent": agent_role, "message": message, **result}
502
+
503
+ return app
504
+
505
+
506
+ # ═══════════════════════════════════════════════════════════════════════════════
507
+ # MAIN - Supports both FastAPI server and standalone agent modes
508
+ # ═══════════════════════════════════════════════════════════════════════════════
509
+
510
+ def main():
511
+ import argparse
512
+
513
+ parser = argparse.ArgumentParser(
514
+ description="Unified DX Example - Autonomous Agents with Mesh Discovery"
515
+ )
516
+ parser.add_argument(
517
+ "--join-as",
518
+ type=str,
519
+ choices=["scout"],
520
+ help="Run as standalone agent that joins existing mesh (e.g., --join-as scout)"
521
+ )
522
+ parser.add_argument(
523
+ "--mesh-endpoint",
524
+ type=str,
525
+ default="127.0.0.1:7980",
526
+ help="Mesh endpoint to join (default: 127.0.0.1:7980)"
527
+ )
528
+
529
+ args = parser.parse_args()
530
+
531
+ # MODE 2: Standalone agent joins existing mesh
532
+ if args.join_as:
533
+ print(f"Starting {args.join_as} as standalone agent...")
534
+ asyncio.run(run_standalone_scout(args.mesh_endpoint))
535
+ return
536
+
537
+ # MODE 1: FastAPI server with all agents
538
+ if not FASTAPI_AVAILABLE:
539
+ print("Install FastAPI: pip install fastapi uvicorn")
540
+ return
541
+
542
+ print("=" * 60)
543
+ print("Autonomous Agents with Mesh Discovery")
544
+ print("=" * 60)
545
+ print("\n - FastAPI Integration:")
546
+ print(" - JarvisLifespan for one-line integration")
547
+ print(" - CustomAgent with on_peer_request handlers")
548
+ print(" - Cognitive discovery via get_cognitive_context()")
549
+ print("\n - Cloud Deployment:")
550
+ print(" - Each agent has MESH as a TOOL")
551
+ print(" - LLM decides when to delegate autonomously")
552
+ print(" - Standalone agents can join with --join-as flag")
553
+ print("\nEndpoints:")
554
+ print(" GET /agents - Show what each agent sees")
555
+ print(" POST /chat - Chat with assistant (may delegate)")
556
+ print(" POST /ask/{role} - Ask specific agent directly")
557
+ print("\nTest:")
558
+ print(' curl -X POST http://localhost:8000/chat \\')
559
+ print(' -H "Content-Type: application/json" \\')
560
+ print(' -d \'{"message": "Analyze Q4 sales trends"}\'')
561
+ print("\nCloud Deployment (in another terminal):")
562
+ print(" python examples/fastapi_integration_example.py --join-as scout")
563
+ print("=" * 60)
564
+
565
+ app = create_app()
566
+ uvicorn.run(app, host="0.0.0.0", port=8000)
567
+
568
+
569
+ if __name__ == "__main__":
570
+ main()