jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +730 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +730 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +1362 -0
  21. jarviscore/docs/GETTING_STARTED.md +107 -14
  22. jarviscore/docs/TROUBLESHOOTING.md +145 -7
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. jarviscore_framework-0.2.1.dist-info/METADATA +144 -0
  31. jarviscore_framework-0.2.1.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/METADATA +0 -137
  85. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  86. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,730 @@
1
+ """
2
+ CustomAgent P2P Mode Example
3
+
4
+ Demonstrates LLM-DRIVEN PEER COMMUNICATION where:
5
+ - Agents have their own LLM for reasoning
6
+ - Peer tools (ask_peer, broadcast) are added to the LLM's toolset
7
+ - The LLM AUTONOMOUSLY decides when to ask other agents for help
8
+
9
+ KEY PATTERN:
10
+ 1. Add peer tools to get_tools() → LLM sees them
11
+ 2. Route tool execution in execute_tool() → handles peer calls
12
+ 3. Update system prompt → tells LLM about peer capabilities
13
+ 4. LLM decides → "I need analysis help, let me ask the analyst"
14
+
15
+ This is ideal for:
16
+ - Autonomous agent swarms
17
+ - Real-time collaborative systems
18
+ - Agents that intelligently delegate tasks
19
+
20
+ Usage:
21
+ python examples/customagent_p2p_example.py
22
+
23
+ Prerequisites:
24
+ - .env file with LLM API key (CLAUDE_API_KEY, etc.)
25
+ """
26
+ import asyncio
27
+ import sys
28
+ from pathlib import Path
29
+
30
+ sys.path.insert(0, str(Path(__file__).parent.parent))
31
+
32
+ from jarviscore import Mesh
33
+ from jarviscore.profiles import CustomAgent
34
+
35
+
36
+ # ═══════════════════════════════════════════════════════════════════════════════
37
+ # LLM CLIENT
38
+ # ═══════════════════════════════════════════════════════════════════════════════
39
+
40
+ class LLMClient:
41
+ """
42
+ LLM client with tool calling support.
43
+ Replace with your actual LLM client (OpenAI, Anthropic, etc.)
44
+ """
45
+
46
+ def __init__(self):
47
+ self.available = False
48
+ self.client = None
49
+ self.model = None
50
+
51
+ try:
52
+ from anthropic import Anthropic
53
+ from jarviscore.config import settings
54
+
55
+ api_key = settings.claude_api_key
56
+ if not api_key:
57
+ raise RuntimeError("No API key")
58
+
59
+ endpoint = settings.claude_endpoint
60
+ if endpoint:
61
+ self.client = Anthropic(api_key=api_key, base_url=endpoint)
62
+ else:
63
+ self.client = Anthropic(api_key=api_key)
64
+
65
+ self.model = settings.claude_model or "claude-sonnet-4-20250514"
66
+ self.available = True
67
+ print(f"[LLM] Initialized with model: {self.model}")
68
+ except Exception as e:
69
+ print(f"[LLM] Not available: {e} - using mock responses")
70
+
71
+ def chat_with_tools(
72
+ self,
73
+ messages: list,
74
+ tools: list,
75
+ system: str = None,
76
+ max_tokens: int = 1024
77
+ ) -> dict:
78
+ """
79
+ Chat with LLM and tools.
80
+
81
+ Returns:
82
+ {"type": "text", "content": "..."} or
83
+ {"type": "tool_use", "tool_name": "...", "tool_args": {...}, "tool_use_id": "..."}
84
+ """
85
+ if not self.available:
86
+ # Mock response for testing without API key
87
+ user_msg = ""
88
+ for msg in messages:
89
+ if isinstance(msg.get("content"), str):
90
+ user_msg = msg.get("content", "").lower()
91
+
92
+ if "analyze" in user_msg or "analysis" in user_msg or "trend" in user_msg:
93
+ return {
94
+ "type": "tool_use",
95
+ "tool_name": "ask_peer",
96
+ "tool_args": {"role": "analyst", "question": user_msg},
97
+ "tool_use_id": "mock_id_001"
98
+ }
99
+ if "search" in user_msg:
100
+ return {
101
+ "type": "tool_use",
102
+ "tool_name": "web_search",
103
+ "tool_args": {"query": user_msg},
104
+ "tool_use_id": "mock_id_002"
105
+ }
106
+ return {"type": "text", "content": f"Hello! How can I help you today?"}
107
+
108
+ # Build request
109
+ request_kwargs = {
110
+ "model": self.model,
111
+ "max_tokens": max_tokens,
112
+ "messages": messages,
113
+ }
114
+
115
+ if system:
116
+ request_kwargs["system"] = system
117
+
118
+ if tools:
119
+ request_kwargs["tools"] = tools
120
+
121
+ # Make the API call
122
+ response = self.client.messages.create(**request_kwargs)
123
+
124
+ # Parse response - check for tool_use first
125
+ result = {"stop_reason": response.stop_reason}
126
+
127
+ for block in response.content:
128
+ if block.type == "tool_use":
129
+ result["type"] = "tool_use"
130
+ result["tool_name"] = block.name
131
+ result["tool_args"] = block.input
132
+ result["tool_use_id"] = block.id
133
+ return result # Return immediately on tool use
134
+ elif block.type == "text":
135
+ result["type"] = "text"
136
+ result["content"] = block.text
137
+
138
+ return result
139
+
140
+ def continue_with_tool_result(
141
+ self,
142
+ messages: list,
143
+ tool_use_id: str,
144
+ tool_name: str,
145
+ tool_args: dict,
146
+ tool_result: str,
147
+ tools: list = None,
148
+ system: str = None
149
+ ) -> dict:
150
+ """
151
+ Continue conversation after tool execution.
152
+
153
+ This properly formats the assistant's tool use and the tool result.
154
+ """
155
+ if not self.available:
156
+ return {"type": "text", "content": f"Based on the {tool_name} result: {tool_result[:100]}..."}
157
+
158
+ # Build new messages with tool use and result
159
+ new_messages = messages + [
160
+ {
161
+ "role": "assistant",
162
+ "content": [
163
+ {
164
+ "type": "tool_use",
165
+ "id": tool_use_id,
166
+ "name": tool_name,
167
+ "input": tool_args
168
+ }
169
+ ]
170
+ },
171
+ {
172
+ "role": "user",
173
+ "content": [
174
+ {
175
+ "type": "tool_result",
176
+ "tool_use_id": tool_use_id,
177
+ "content": tool_result
178
+ }
179
+ ]
180
+ }
181
+ ]
182
+
183
+ # Continue the conversation
184
+ return self.chat_with_tools(new_messages, tools or [], system)
185
+
186
+
187
+ # ═══════════════════════════════════════════════════════════════════════════════
188
+ # ANALYST AGENT - Specialist in data analysis
189
+ # ═══════════════════════════════════════════════════════════════════════════════
190
+
191
+ class AnalystAgent(CustomAgent):
192
+ """
193
+ Analyst agent - specialist in data analysis.
194
+
195
+ This agent:
196
+ 1. Listens for incoming requests from peers
197
+ 2. Processes requests using its own LLM
198
+ 3. Has local tools (statistical_analysis, trend_detection)
199
+ 4. Can also ask other peers if needed (via peer tools)
200
+ """
201
+ role = "analyst"
202
+ capabilities = ["analysis", "data_interpretation", "reporting"]
203
+
204
+ def __init__(self, agent_id=None):
205
+ super().__init__(agent_id)
206
+ self.llm = None
207
+ self.requests_received = []
208
+
209
+ async def setup(self):
210
+ """Initialize LLM client."""
211
+ await super().setup()
212
+ self.llm = LLMClient()
213
+ self._logger.info(f"[{self.role}] Ready with LLM-powered analysis")
214
+
215
+ def get_tools(self) -> list:
216
+ """
217
+ Tools available to THIS agent's LLM.
218
+
219
+ Includes local analysis tools AND peer tools.
220
+ """
221
+ tools = [
222
+ {
223
+ "name": "statistical_analysis",
224
+ "description": "Run statistical analysis on numeric data (mean, std, variance)",
225
+ "input_schema": {
226
+ "type": "object",
227
+ "properties": {
228
+ "data": {"type": "string", "description": "Data to analyze"}
229
+ },
230
+ "required": ["data"]
231
+ }
232
+ },
233
+ {
234
+ "name": "trend_detection",
235
+ "description": "Detect trends and patterns in time series data",
236
+ "input_schema": {
237
+ "type": "object",
238
+ "properties": {
239
+ "data": {"type": "string", "description": "Time series data"}
240
+ },
241
+ "required": ["data"]
242
+ }
243
+ }
244
+ ]
245
+
246
+ # ADD PEER TOOLS - analyst can ask other agents if needed
247
+ if self.peers:
248
+ tools.extend(self.peers.as_tool().schema)
249
+
250
+ return tools
251
+
252
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
253
+ """Execute a tool - routes to peer tools or local tools."""
254
+ # PEER TOOLS
255
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
256
+ return await self.peers.as_tool().execute(tool_name, args)
257
+
258
+ # LOCAL TOOLS
259
+ if tool_name == "statistical_analysis":
260
+ data = args.get("data", "")
261
+ return f"Statistical analysis of '{data}': mean=150.3, std=23.4, variance=547.6, trend=positive"
262
+
263
+ if tool_name == "trend_detection":
264
+ data = args.get("data", "")
265
+ return f"Trend analysis of '{data}': Upward trend detected with 92% confidence, growth rate 3.2%"
266
+
267
+ return f"Unknown tool: {tool_name}"
268
+
269
+ async def process_with_llm(self, query: str) -> str:
270
+ """Process request using LLM with tools."""
271
+ system_prompt = """You are an expert data analyst.
272
+ You specialize in analyzing data, finding patterns, and providing insights.
273
+ You have tools for statistical analysis and trend detection.
274
+ Be concise but thorough in your analysis."""
275
+
276
+ # Get tools (excluding peer tools to avoid loops in analyst)
277
+ tools = [t for t in self.get_tools()
278
+ if t["name"] not in ["ask_peer", "broadcast_update", "list_peers"]]
279
+
280
+ messages = [{"role": "user", "content": query}]
281
+ response = self.llm.chat_with_tools(messages, tools, system_prompt)
282
+
283
+ # Handle tool use
284
+ if response.get("type") == "tool_use":
285
+ tool_name = response["tool_name"]
286
+ tool_args = response["tool_args"]
287
+ tool_use_id = response["tool_use_id"]
288
+
289
+ tool_result = await self.execute_tool(tool_name, tool_args)
290
+
291
+ response = self.llm.continue_with_tool_result(
292
+ messages, tool_use_id, tool_name, tool_args, tool_result, tools, system_prompt
293
+ )
294
+
295
+ return response.get("content", "Analysis complete.")
296
+
297
+ async def run(self):
298
+ """Main loop - listen for incoming requests."""
299
+ self._logger.info(f"[{self.role}] Starting run loop...")
300
+
301
+ while not self.shutdown_requested:
302
+ if self.peers:
303
+ msg = await self.peers.receive(timeout=0.5)
304
+ if msg and msg.is_request:
305
+ query = msg.data.get("question", msg.data.get("query", ""))
306
+ self.requests_received.append(query)
307
+
308
+ # Show receipt
309
+ print(f"\n │ ┌─ [ANALYST] Received request from {msg.sender}")
310
+ print(f" │ │ Query: {query[:80]}...")
311
+
312
+ # Process with LLM
313
+ result = await self.process_with_llm(query)
314
+
315
+ # Show response
316
+ print(f" │ │ Processing with LLM...")
317
+ print(f" │ └─ [ANALYST] Sending response back")
318
+
319
+ await self.peers.respond(msg, {"response": result})
320
+ else:
321
+ await asyncio.sleep(0.1)
322
+
323
+ async def execute_task(self, task: dict) -> dict:
324
+ """Required by base class."""
325
+ return {"status": "success", "note": "This agent uses run() for P2P mode"}
326
+
327
+
328
+ # ═══════════════════════════════════════════════════════════════════════════════
329
+ # ASSISTANT AGENT - Coordinator that delegates to specialists
330
+ # ═══════════════════════════════════════════════════════════════════════════════
331
+
332
+ class AssistantAgent(CustomAgent):
333
+ """
334
+ Assistant agent - coordinates with specialist agents.
335
+
336
+ KEY PATTERN DEMONSTRATED:
337
+ 1. Has its own LLM for reasoning
338
+ 2. Peer tools (ask_peer, broadcast) are in its toolset
339
+ 3. LLM AUTONOMOUSLY decides when to ask other agents
340
+ 4. No manual "if analysis_needed: call_analyst()" logic!
341
+
342
+ The LLM sees:
343
+ - web_search (local tool)
344
+ - ask_peer (peer tool) ← LLM decides when to use this!
345
+ - broadcast_update (peer tool)
346
+ - list_peers (peer tool)
347
+ """
348
+ role = "assistant"
349
+ capabilities = ["chat", "coordination", "search"]
350
+
351
+ def __init__(self, agent_id=None):
352
+ super().__init__(agent_id)
353
+ self.llm = None
354
+ self.tool_calls = [] # Track what tools LLM uses
355
+
356
+ async def setup(self):
357
+ """Initialize LLM client."""
358
+ await super().setup()
359
+ self.llm = LLMClient()
360
+ self._logger.info(f"[{self.role}] Ready with LLM + peer tools")
361
+
362
+ def get_tools(self) -> list:
363
+ """
364
+ Tools available to THIS agent's LLM.
365
+
366
+ IMPORTANT: This includes PEER TOOLS!
367
+ The LLM sees ask_peer, broadcast_update, list_peers
368
+ and decides when to use them autonomously.
369
+ """
370
+ # Local tools
371
+ tools = [
372
+ {
373
+ "name": "web_search",
374
+ "description": "Search the web for information",
375
+ "input_schema": {
376
+ "type": "object",
377
+ "properties": {
378
+ "query": {"type": "string", "description": "Search query"}
379
+ },
380
+ "required": ["query"]
381
+ }
382
+ }
383
+ ]
384
+
385
+ # ═══════════════════════════════════════════════════════════════════════
386
+ # KEY: ADD PEER TOOLS TO LLM'S TOOLSET
387
+ #
388
+ # This is the core pattern! After this, LLM will see:
389
+ # - ask_peer: Ask another agent by role
390
+ # - broadcast_update: Send message to all peers
391
+ # - list_peers: See available agents and their capabilities
392
+ #
393
+ # The LLM decides when to use these based on the user's request.
394
+ # ═══════════════════════════════════════════════════════════════════════
395
+ if self.peers:
396
+ tools.extend(self.peers.as_tool().schema)
397
+
398
+ return tools
399
+
400
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
401
+ """
402
+ Execute a tool by name.
403
+
404
+ When LLM decides to use ask_peer, this routes to the peer system.
405
+ No manual delegation logic - just routing!
406
+ """
407
+ self.tool_calls.append({"tool": tool_name, "args": args})
408
+
409
+ # PEER TOOLS - route to peer system
410
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
411
+ return await self.peers.as_tool().execute(tool_name, args)
412
+
413
+ # LOCAL TOOLS
414
+ if tool_name == "web_search":
415
+ query = args.get("query", "")
416
+ return f"Search results for '{query}': Found 10 relevant articles about {query}."
417
+
418
+ return f"Unknown tool: {tool_name}"
419
+
420
+ async def chat(self, user_message: str) -> str:
421
+ """
422
+ Complete LLM chat with autonomous tool use.
423
+
424
+ The LLM sees ALL tools (including peer tools) and decides
425
+ which to use. If user asks for analysis, LLM will use
426
+ ask_peer to contact the analyst - we don't hardcode this!
427
+ """
428
+ # System prompt tells LLM about its capabilities
429
+ system_prompt = """You are a helpful assistant with access to specialist agents.
430
+
431
+ YOUR TOOLS:
432
+ - web_search: Search the web for information
433
+ - ask_peer: Ask specialist agents for help. Available specialists:
434
+ * analyst: Expert in data analysis, statistics, and trends
435
+ - broadcast_update: Send updates to all connected agents
436
+ - list_peers: See what other agents are available
437
+
438
+ IMPORTANT GUIDELINES:
439
+ - When users ask for DATA ANALYSIS, USE ask_peer to ask the analyst
440
+ - When users ask for WEB INFORMATION, USE web_search
441
+ - Be concise and helpful in your responses
442
+ - Always explain what you found from specialists"""
443
+
444
+ tools = self.get_tools()
445
+ messages = [{"role": "user", "content": user_message}]
446
+
447
+ self._logger.info(f"[{self.role}] Processing: {user_message[:50]}...")
448
+ self._logger.info(f"[{self.role}] Tools available: {[t['name'] for t in tools]}")
449
+
450
+ # Call LLM with tools - IT decides which to use
451
+ response = self.llm.chat_with_tools(messages, tools, system_prompt)
452
+
453
+ # Handle tool use loop (LLM might use multiple tools)
454
+ iterations = 0
455
+ while response.get("type") == "tool_use" and iterations < 3:
456
+ iterations += 1
457
+ tool_name = response["tool_name"]
458
+ tool_args = response["tool_args"]
459
+ tool_use_id = response["tool_use_id"]
460
+
461
+ print(f"\n ┌─ [ASSISTANT LLM] Decided to use tool: {tool_name}")
462
+ print(f" │ Args: {tool_args}")
463
+
464
+ # Execute the tool (might be ask_peer!)
465
+ tool_result = await self.execute_tool(tool_name, tool_args)
466
+
467
+ # Show the result from peer if it was ask_peer
468
+ if tool_name == "ask_peer":
469
+ print(f" │")
470
+ print(f" │ ──► [SENT TO ANALYST]")
471
+ print(f" │")
472
+ print(f" │ ◄── [ANALYST RESPONDED]:")
473
+ print(f" │ {tool_result[:200]}...")
474
+ else:
475
+ print(f" │ Result: {tool_result[:100]}...")
476
+
477
+ print(f" └─ [ASSISTANT LLM] Processing response...")
478
+
479
+ # Continue conversation with tool result
480
+ response = self.llm.continue_with_tool_result(
481
+ messages, tool_use_id, tool_name, tool_args, tool_result, tools, system_prompt
482
+ )
483
+
484
+ return response.get("content", "I processed your request.")
485
+
486
+ async def run(self):
487
+ """Main loop - listen for incoming requests from peers."""
488
+ self._logger.info(f"[{self.role}] Starting run loop...")
489
+
490
+ while not self.shutdown_requested:
491
+ if self.peers:
492
+ msg = await self.peers.receive(timeout=0.5)
493
+ if msg and msg.is_request:
494
+ query = msg.data.get("query", "")
495
+ result = await self.chat(query)
496
+ await self.peers.respond(msg, {"response": result})
497
+ else:
498
+ await asyncio.sleep(0.1)
499
+
500
+ async def execute_task(self, task: dict) -> dict:
501
+ """Required by base class."""
502
+ return {"status": "success", "note": "This agent uses run() for P2P mode"}
503
+
504
+
505
+ # ═══════════════════════════════════════════════════════════════════════════════
506
+ # MAIN EXAMPLE
507
+ # ═══════════════════════════════════════════════════════════════════════════════
508
+
509
+ async def main():
510
+ """Run CustomAgent P2P mode example with LLM-driven peer communication."""
511
+ print("\n" + "="*70)
512
+ print("JarvisCore: LLM-DRIVEN PEER COMMUNICATION")
513
+ print("="*70)
514
+
515
+ print("""
516
+ This example demonstrates the KEY P2P PATTERN:
517
+
518
+ ┌─────────────────────────────────────────────────────────────────┐
519
+ │ User: "Analyze the Q4 sales data" │
520
+ │ │ │
521
+ │ ▼ │
522
+ │ ┌─────────────────────────────────────────┐ │
523
+ │ │ ASSISTANT'S LLM │ │
524
+ │ │ │ │
525
+ │ │ Tools: [web_search, ask_peer, ...] │ │
526
+ │ │ │ │
527
+ │ │ LLM thinks: "User needs analysis, │ │
528
+ │ │ I should ask the analyst agent" │ │
529
+ │ │ │ │
530
+ │ │ → Uses ask_peer(role="analyst", ...) │ │
531
+ │ └─────────────────────────────────────────┘ │
532
+ │ │ │
533
+ │ ▼ │
534
+ │ ┌─────────────────────────────────────────┐ │
535
+ │ │ ANALYST AGENT │ │
536
+ │ │ (Processes with its own LLM + tools) │ │
537
+ │ └─────────────────────────────────────────┘ │
538
+ │ │ │
539
+ │ ▼ Returns analysis │
540
+ │ ┌─────────────────────────────────────────┐ │
541
+ │ │ ASSISTANT'S LLM │ │
542
+ │ │ "Based on the analyst's findings..." │ │
543
+ │ └─────────────────────────────────────────┘ │
544
+ └─────────────────────────────────────────────────────────────────┘
545
+
546
+ The LLM DECIDES to use ask_peer - we don't hardcode this!
547
+ """)
548
+
549
+ # Create mesh
550
+ mesh = Mesh(
551
+ mode="p2p",
552
+ config={
553
+ 'bind_host': '127.0.0.1',
554
+ 'bind_port': 7960,
555
+ 'node_name': 'p2p-demo-node',
556
+ }
557
+ )
558
+
559
+ # Add agents
560
+ analyst = mesh.add(AnalystAgent)
561
+ assistant = mesh.add(AssistantAgent)
562
+
563
+ try:
564
+ await mesh.start()
565
+
566
+ print("\n[SETUP] Mesh started in P2P mode")
567
+ print(f" Agents: {[a.role for a in mesh.agents]}")
568
+
569
+ # Show assistant's tools (including peer tools!)
570
+ tools = assistant.get_tools()
571
+ print(f"\n[TOOLS] Assistant's LLM sees these tools:")
572
+ for tool in tools:
573
+ print(f" - {tool['name']}: {tool['description'][:50]}...")
574
+
575
+ # Start analyst's run loop in background
576
+ analyst_task = asyncio.create_task(analyst.run())
577
+ await asyncio.sleep(0.3)
578
+
579
+ # ─────────────────────────────────────────────────────────────────
580
+ # TEST 1: Request that should trigger ask_peer → analyst
581
+ # ─────────────────────────────────────────────────────────────────
582
+ print("\n" + "─"*70)
583
+ print("TEST 1: Analysis request (LLM should use ask_peer → analyst)")
584
+ print("─"*70)
585
+
586
+ user_message = "Please analyze the Q4 sales trends and identify any anomalies"
587
+ print(f"\n[USER] {user_message}")
588
+
589
+ assistant.tool_calls = [] # Reset tracking
590
+ response = await assistant.chat(user_message)
591
+
592
+ print(f"\n[ASSISTANT] {response}")
593
+ print(f"\n[TOOLS USED] {assistant.tool_calls}")
594
+
595
+ # Verify LLM used ask_peer
596
+ peer_calls = [c for c in assistant.tool_calls if c["tool"] == "ask_peer"]
597
+ if peer_calls:
598
+ print("✓ LLM autonomously decided to ask the analyst!")
599
+ else:
600
+ print("○ LLM responded without asking analyst (might happen with mock)")
601
+
602
+ # ─────────────────────────────────────────────────────────────────
603
+ # TEST 2: Request that should use local tool (web_search)
604
+ # ─────────────────────────────────────────────────────────────────
605
+ print("\n" + "─"*70)
606
+ print("TEST 2: Search request (LLM should use web_search)")
607
+ print("─"*70)
608
+
609
+ user_message = "Search for the latest Python 3.12 features"
610
+ print(f"\n[USER] {user_message}")
611
+
612
+ assistant.tool_calls = []
613
+ response = await assistant.chat(user_message)
614
+
615
+ print(f"\n[ASSISTANT] {response}")
616
+ print(f"\n[TOOLS USED] {assistant.tool_calls}")
617
+
618
+ search_calls = [c for c in assistant.tool_calls if c["tool"] == "web_search"]
619
+ if search_calls:
620
+ print("✓ LLM used local web_search tool!")
621
+
622
+ # ─────────────────────────────────────────────────────────────────
623
+ # TEST 3: Simple greeting (no tools needed)
624
+ # ─────────────────────────────────────────────────────────────────
625
+ print("\n" + "─"*70)
626
+ print("TEST 3: Simple greeting (no tools needed)")
627
+ print("─"*70)
628
+
629
+ user_message = "Hello! How are you?"
630
+ print(f"\n[USER] {user_message}")
631
+
632
+ assistant.tool_calls = []
633
+ response = await assistant.chat(user_message)
634
+
635
+ print(f"\n[ASSISTANT] {response}")
636
+ print(f"\n[TOOLS USED] {assistant.tool_calls}")
637
+
638
+ if not assistant.tool_calls:
639
+ print("✓ LLM responded directly without tools!")
640
+
641
+ # ─────────────────────────────────────────────────────────────────
642
+ # TEST 4: Analysis with REAL DATA (full bidirectional flow)
643
+ # ─────────────────────────────────────────────────────────────────
644
+ print("\n" + "─"*70)
645
+ print("TEST 4: Analysis with REAL DATA (full flow demonstration)")
646
+ print("─"*70)
647
+
648
+ # Actual Q4 sales data with clear anomalies
649
+ q4_sales_data = """
650
+ Here is our Q4 2024 monthly sales data:
651
+
652
+ | Month | Revenue | Units Sold | Avg Order Value |
653
+ |-----------|------------|------------|-----------------|
654
+ | October | $142,500 | 2,850 | $50.00 |
655
+ | November | $168,300 | 3,366 | $50.00 |
656
+ | December | $312,750 | 4,170 | $75.00 |
657
+
658
+ Weekly breakdown for December:
659
+ - Week 1: $45,200 (normal)
660
+ - Week 2: $52,100 (normal)
661
+ - Week 3: $185,450 (BLACK FRIDAY + CYBER MONDAY spillover)
662
+ - Week 4: $30,000 (post-holiday drop)
663
+
664
+ Please analyze this data and identify:
665
+ 1. Key trends
666
+ 2. Any anomalies
667
+ 3. Recommendations
668
+ """
669
+ user_message = f"Analyze this Q4 sales data:\n{q4_sales_data}"
670
+ print(f"\n[USER] Providing actual Q4 sales data for analysis...")
671
+ print(q4_sales_data)
672
+
673
+ assistant.tool_calls = []
674
+ response = await assistant.chat(user_message)
675
+
676
+ print(f"\n[ASSISTANT] {response}")
677
+ print(f"\n[TOOLS USED] {assistant.tool_calls}")
678
+
679
+ peer_calls = [c for c in assistant.tool_calls if c["tool"] == "ask_peer"]
680
+ if peer_calls:
681
+ print("✓ Full bidirectional flow completed with real data!")
682
+ print(f"✓ Analyst processed actual sales figures and provided insights!")
683
+
684
+ # ─────────────────────────────────────────────────────────────────
685
+ # Summary
686
+ # ─────────────────────────────────────────────────────────────────
687
+ print("\n" + "="*70)
688
+ print("EXAMPLE COMPLETE")
689
+ print("="*70)
690
+ print(f"""
691
+ KEY TAKEAWAYS:
692
+
693
+ 1. PEER TOOLS IN TOOLSET
694
+ tools.extend(self.peers.as_tool().schema)
695
+
696
+ 2. LLM DECIDES AUTONOMOUSLY
697
+ - Analysis request → LLM uses ask_peer → analyst
698
+ - Search request → LLM uses web_search
699
+ - Greeting → LLM responds directly
700
+ - Real data analysis → Full bidirectional flow
701
+
702
+ 3. NO HARDCODED DELEGATION
703
+ We don't write: if "analyze" in msg: call_analyst()
704
+ The LLM figures it out from the system prompt!
705
+
706
+ 4. ANALYST RECEIVED: {len(analyst.requests_received)} requests
707
+
708
+ 5. REAL DATA FLOW
709
+ User provides data → Assistant delegates → Analyst analyzes →
710
+ Analyst responds with insights → Assistant presents to user
711
+ """)
712
+
713
+ # Cleanup
714
+ analyst.request_shutdown()
715
+ analyst_task.cancel()
716
+ try:
717
+ await analyst_task
718
+ except asyncio.CancelledError:
719
+ pass
720
+
721
+ await mesh.stop()
722
+
723
+ except Exception as e:
724
+ print(f"\nError: {e}")
725
+ import traceback
726
+ traceback.print_exc()
727
+
728
+
729
+ if __name__ == "__main__":
730
+ asyncio.run(main())