jarviscore-framework 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/customagent_p2p_example.py +566 -183
- jarviscore/__init__.py +1 -1
- jarviscore/data/examples/customagent_p2p_example.py +566 -183
- jarviscore/docs/API_REFERENCE.md +2 -2
- jarviscore/docs/CONFIGURATION.md +2 -2
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +1156 -209
- jarviscore/docs/GETTING_STARTED.md +1 -1
- jarviscore/docs/TROUBLESHOOTING.md +3 -3
- jarviscore/docs/USER_GUIDE.md +2 -2
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/METADATA +7 -6
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/RECORD +14 -14
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,21 @@
|
|
|
1
1
|
"""
|
|
2
2
|
CustomAgent P2P Mode Example
|
|
3
3
|
|
|
4
|
-
Demonstrates
|
|
5
|
-
- Agents
|
|
6
|
-
-
|
|
7
|
-
-
|
|
8
|
-
|
|
4
|
+
Demonstrates LLM-DRIVEN PEER COMMUNICATION where:
|
|
5
|
+
- Agents have their own LLM for reasoning
|
|
6
|
+
- Peer tools (ask_peer, broadcast) are added to the LLM's toolset
|
|
7
|
+
- The LLM AUTONOMOUSLY decides when to ask other agents for help
|
|
8
|
+
|
|
9
|
+
KEY PATTERN:
|
|
10
|
+
1. Add peer tools to get_tools() → LLM sees them
|
|
11
|
+
2. Route tool execution in execute_tool() → handles peer calls
|
|
12
|
+
3. Update system prompt → tells LLM about peer capabilities
|
|
13
|
+
4. LLM decides → "I need analysis help, let me ask the analyst"
|
|
9
14
|
|
|
10
15
|
This is ideal for:
|
|
11
16
|
- Autonomous agent swarms
|
|
12
17
|
- Real-time collaborative systems
|
|
13
|
-
-
|
|
14
|
-
- Agents that need to run indefinitely
|
|
18
|
+
- Agents that intelligently delegate tasks
|
|
15
19
|
|
|
16
20
|
Usage:
|
|
17
21
|
python examples/customagent_p2p_example.py
|
|
@@ -30,13 +34,20 @@ from jarviscore.profiles import CustomAgent
|
|
|
30
34
|
|
|
31
35
|
|
|
32
36
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
33
|
-
# LLM CLIENT
|
|
37
|
+
# LLM CLIENT
|
|
34
38
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
35
39
|
|
|
36
|
-
class
|
|
37
|
-
"""
|
|
40
|
+
class LLMClient:
|
|
41
|
+
"""
|
|
42
|
+
LLM client with tool calling support.
|
|
43
|
+
Replace with your actual LLM client (OpenAI, Anthropic, etc.)
|
|
44
|
+
"""
|
|
38
45
|
|
|
39
46
|
def __init__(self):
|
|
47
|
+
self.available = False
|
|
48
|
+
self.client = None
|
|
49
|
+
self.model = None
|
|
50
|
+
|
|
40
51
|
try:
|
|
41
52
|
from anthropic import Anthropic
|
|
42
53
|
from jarviscore.config import settings
|
|
@@ -53,185 +64,442 @@ class SimpleLLMClient:
|
|
|
53
64
|
|
|
54
65
|
self.model = settings.claude_model or "claude-sonnet-4-20250514"
|
|
55
66
|
self.available = True
|
|
67
|
+
print(f"[LLM] Initialized with model: {self.model}")
|
|
56
68
|
except Exception as e:
|
|
57
|
-
print(f"[LLM] Not available: {e}")
|
|
58
|
-
|
|
69
|
+
print(f"[LLM] Not available: {e} - using mock responses")
|
|
70
|
+
|
|
71
|
+
def chat_with_tools(
|
|
72
|
+
self,
|
|
73
|
+
messages: list,
|
|
74
|
+
tools: list,
|
|
75
|
+
system: str = None,
|
|
76
|
+
max_tokens: int = 1024
|
|
77
|
+
) -> dict:
|
|
78
|
+
"""
|
|
79
|
+
Chat with LLM and tools.
|
|
59
80
|
|
|
60
|
-
|
|
61
|
-
|
|
81
|
+
Returns:
|
|
82
|
+
{"type": "text", "content": "..."} or
|
|
83
|
+
{"type": "tool_use", "tool_name": "...", "tool_args": {...}, "tool_use_id": "..."}
|
|
84
|
+
"""
|
|
62
85
|
if not self.available:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
86
|
+
# Mock response for testing without API key
|
|
87
|
+
user_msg = ""
|
|
88
|
+
for msg in messages:
|
|
89
|
+
if isinstance(msg.get("content"), str):
|
|
90
|
+
user_msg = msg.get("content", "").lower()
|
|
91
|
+
|
|
92
|
+
if "analyze" in user_msg or "analysis" in user_msg or "trend" in user_msg:
|
|
93
|
+
return {
|
|
94
|
+
"type": "tool_use",
|
|
95
|
+
"tool_name": "ask_peer",
|
|
96
|
+
"tool_args": {"role": "analyst", "question": user_msg},
|
|
97
|
+
"tool_use_id": "mock_id_001"
|
|
98
|
+
}
|
|
99
|
+
if "search" in user_msg:
|
|
100
|
+
return {
|
|
101
|
+
"type": "tool_use",
|
|
102
|
+
"tool_name": "web_search",
|
|
103
|
+
"tool_args": {"query": user_msg},
|
|
104
|
+
"tool_use_id": "mock_id_002"
|
|
105
|
+
}
|
|
106
|
+
return {"type": "text", "content": f"Hello! How can I help you today?"}
|
|
107
|
+
|
|
108
|
+
# Build request
|
|
109
|
+
request_kwargs = {
|
|
66
110
|
"model": self.model,
|
|
67
|
-
"max_tokens":
|
|
68
|
-
"messages":
|
|
111
|
+
"max_tokens": max_tokens,
|
|
112
|
+
"messages": messages,
|
|
69
113
|
}
|
|
114
|
+
|
|
70
115
|
if system:
|
|
71
|
-
|
|
116
|
+
request_kwargs["system"] = system
|
|
117
|
+
|
|
118
|
+
if tools:
|
|
119
|
+
request_kwargs["tools"] = tools
|
|
72
120
|
|
|
73
|
-
|
|
74
|
-
|
|
121
|
+
# Make the API call
|
|
122
|
+
response = self.client.messages.create(**request_kwargs)
|
|
123
|
+
|
|
124
|
+
# Parse response - check for tool_use first
|
|
125
|
+
result = {"stop_reason": response.stop_reason}
|
|
126
|
+
|
|
127
|
+
for block in response.content:
|
|
128
|
+
if block.type == "tool_use":
|
|
129
|
+
result["type"] = "tool_use"
|
|
130
|
+
result["tool_name"] = block.name
|
|
131
|
+
result["tool_args"] = block.input
|
|
132
|
+
result["tool_use_id"] = block.id
|
|
133
|
+
return result # Return immediately on tool use
|
|
134
|
+
elif block.type == "text":
|
|
135
|
+
result["type"] = "text"
|
|
136
|
+
result["content"] = block.text
|
|
137
|
+
|
|
138
|
+
return result
|
|
139
|
+
|
|
140
|
+
def continue_with_tool_result(
|
|
141
|
+
self,
|
|
142
|
+
messages: list,
|
|
143
|
+
tool_use_id: str,
|
|
144
|
+
tool_name: str,
|
|
145
|
+
tool_args: dict,
|
|
146
|
+
tool_result: str,
|
|
147
|
+
tools: list = None,
|
|
148
|
+
system: str = None
|
|
149
|
+
) -> dict:
|
|
150
|
+
"""
|
|
151
|
+
Continue conversation after tool execution.
|
|
152
|
+
|
|
153
|
+
This properly formats the assistant's tool use and the tool result.
|
|
154
|
+
"""
|
|
155
|
+
if not self.available:
|
|
156
|
+
return {"type": "text", "content": f"Based on the {tool_name} result: {tool_result[:100]}..."}
|
|
157
|
+
|
|
158
|
+
# Build new messages with tool use and result
|
|
159
|
+
new_messages = messages + [
|
|
160
|
+
{
|
|
161
|
+
"role": "assistant",
|
|
162
|
+
"content": [
|
|
163
|
+
{
|
|
164
|
+
"type": "tool_use",
|
|
165
|
+
"id": tool_use_id,
|
|
166
|
+
"name": tool_name,
|
|
167
|
+
"input": tool_args
|
|
168
|
+
}
|
|
169
|
+
]
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
"role": "user",
|
|
173
|
+
"content": [
|
|
174
|
+
{
|
|
175
|
+
"type": "tool_result",
|
|
176
|
+
"tool_use_id": tool_use_id,
|
|
177
|
+
"content": tool_result
|
|
178
|
+
}
|
|
179
|
+
]
|
|
180
|
+
}
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
# Continue the conversation
|
|
184
|
+
return self.chat_with_tools(new_messages, tools or [], system)
|
|
75
185
|
|
|
76
186
|
|
|
77
187
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
78
|
-
#
|
|
188
|
+
# ANALYST AGENT - Specialist in data analysis
|
|
79
189
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
80
190
|
|
|
81
|
-
class
|
|
191
|
+
class AnalystAgent(CustomAgent):
|
|
82
192
|
"""
|
|
83
|
-
|
|
193
|
+
Analyst agent - specialist in data analysis.
|
|
84
194
|
|
|
85
|
-
|
|
86
|
-
1.
|
|
87
|
-
2.
|
|
88
|
-
3.
|
|
89
|
-
4.
|
|
195
|
+
This agent:
|
|
196
|
+
1. Listens for incoming requests from peers
|
|
197
|
+
2. Processes requests using its own LLM
|
|
198
|
+
3. Has local tools (statistical_analysis, trend_detection)
|
|
199
|
+
4. Can also ask other peers if needed (via peer tools)
|
|
90
200
|
"""
|
|
91
|
-
role = "
|
|
92
|
-
capabilities = ["
|
|
201
|
+
role = "analyst"
|
|
202
|
+
capabilities = ["analysis", "data_interpretation", "reporting"]
|
|
93
203
|
|
|
94
204
|
def __init__(self, agent_id=None):
|
|
95
205
|
super().__init__(agent_id)
|
|
96
206
|
self.llm = None
|
|
97
|
-
self.
|
|
207
|
+
self.requests_received = []
|
|
98
208
|
|
|
99
209
|
async def setup(self):
|
|
100
210
|
"""Initialize LLM client."""
|
|
101
211
|
await super().setup()
|
|
102
|
-
self.llm =
|
|
103
|
-
self._logger.info(f"[{self.role}] Ready
|
|
212
|
+
self.llm = LLMClient()
|
|
213
|
+
self._logger.info(f"[{self.role}] Ready with LLM-powered analysis")
|
|
104
214
|
|
|
105
|
-
|
|
215
|
+
def get_tools(self) -> list:
|
|
106
216
|
"""
|
|
107
|
-
|
|
217
|
+
Tools available to THIS agent's LLM.
|
|
108
218
|
|
|
109
|
-
|
|
110
|
-
The agent runs indefinitely, processing incoming messages.
|
|
219
|
+
Includes local analysis tools AND peer tools.
|
|
111
220
|
"""
|
|
112
|
-
|
|
221
|
+
tools = [
|
|
222
|
+
{
|
|
223
|
+
"name": "statistical_analysis",
|
|
224
|
+
"description": "Run statistical analysis on numeric data (mean, std, variance)",
|
|
225
|
+
"input_schema": {
|
|
226
|
+
"type": "object",
|
|
227
|
+
"properties": {
|
|
228
|
+
"data": {"type": "string", "description": "Data to analyze"}
|
|
229
|
+
},
|
|
230
|
+
"required": ["data"]
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"name": "trend_detection",
|
|
235
|
+
"description": "Detect trends and patterns in time series data",
|
|
236
|
+
"input_schema": {
|
|
237
|
+
"type": "object",
|
|
238
|
+
"properties": {
|
|
239
|
+
"data": {"type": "string", "description": "Time series data"}
|
|
240
|
+
},
|
|
241
|
+
"required": ["data"]
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
# ADD PEER TOOLS - analyst can ask other agents if needed
|
|
247
|
+
if self.peers:
|
|
248
|
+
tools.extend(self.peers.as_tool().schema)
|
|
249
|
+
|
|
250
|
+
return tools
|
|
251
|
+
|
|
252
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
253
|
+
"""Execute a tool - routes to peer tools or local tools."""
|
|
254
|
+
# PEER TOOLS
|
|
255
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
256
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
257
|
+
|
|
258
|
+
# LOCAL TOOLS
|
|
259
|
+
if tool_name == "statistical_analysis":
|
|
260
|
+
data = args.get("data", "")
|
|
261
|
+
return f"Statistical analysis of '{data}': mean=150.3, std=23.4, variance=547.6, trend=positive"
|
|
262
|
+
|
|
263
|
+
if tool_name == "trend_detection":
|
|
264
|
+
data = args.get("data", "")
|
|
265
|
+
return f"Trend analysis of '{data}': Upward trend detected with 92% confidence, growth rate 3.2%"
|
|
266
|
+
|
|
267
|
+
return f"Unknown tool: {tool_name}"
|
|
268
|
+
|
|
269
|
+
async def process_with_llm(self, query: str) -> str:
|
|
270
|
+
"""Process request using LLM with tools."""
|
|
271
|
+
system_prompt = """You are an expert data analyst.
|
|
272
|
+
You specialize in analyzing data, finding patterns, and providing insights.
|
|
273
|
+
You have tools for statistical analysis and trend detection.
|
|
274
|
+
Be concise but thorough in your analysis."""
|
|
275
|
+
|
|
276
|
+
# Get tools (excluding peer tools to avoid loops in analyst)
|
|
277
|
+
tools = [t for t in self.get_tools()
|
|
278
|
+
if t["name"] not in ["ask_peer", "broadcast_update", "list_peers"]]
|
|
279
|
+
|
|
280
|
+
messages = [{"role": "user", "content": query}]
|
|
281
|
+
response = self.llm.chat_with_tools(messages, tools, system_prompt)
|
|
282
|
+
|
|
283
|
+
# Handle tool use
|
|
284
|
+
if response.get("type") == "tool_use":
|
|
285
|
+
tool_name = response["tool_name"]
|
|
286
|
+
tool_args = response["tool_args"]
|
|
287
|
+
tool_use_id = response["tool_use_id"]
|
|
288
|
+
|
|
289
|
+
tool_result = await self.execute_tool(tool_name, tool_args)
|
|
290
|
+
|
|
291
|
+
response = self.llm.continue_with_tool_result(
|
|
292
|
+
messages, tool_use_id, tool_name, tool_args, tool_result, tools, system_prompt
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
return response.get("content", "Analysis complete.")
|
|
296
|
+
|
|
297
|
+
async def run(self):
|
|
298
|
+
"""Main loop - listen for incoming requests."""
|
|
299
|
+
self._logger.info(f"[{self.role}] Starting run loop...")
|
|
113
300
|
|
|
114
301
|
while not self.shutdown_requested:
|
|
115
|
-
# Check for incoming peer messages
|
|
116
302
|
if self.peers:
|
|
117
303
|
msg = await self.peers.receive(timeout=0.5)
|
|
118
|
-
|
|
119
304
|
if msg and msg.is_request:
|
|
120
|
-
# Process the research query
|
|
121
305
|
query = msg.data.get("question", msg.data.get("query", ""))
|
|
122
|
-
self.
|
|
123
|
-
|
|
124
|
-
#
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
306
|
+
self.requests_received.append(query)
|
|
307
|
+
|
|
308
|
+
# Show receipt
|
|
309
|
+
print(f"\n │ ┌─ [ANALYST] Received request from {msg.sender}")
|
|
310
|
+
print(f" │ │ Query: {query[:80]}...")
|
|
311
|
+
|
|
312
|
+
# Process with LLM
|
|
313
|
+
result = await self.process_with_llm(query)
|
|
314
|
+
|
|
315
|
+
# Show response
|
|
316
|
+
print(f" │ │ Processing with LLM...")
|
|
317
|
+
print(f" │ └─ [ANALYST] Sending response back")
|
|
318
|
+
|
|
319
|
+
await self.peers.respond(msg, {"response": result})
|
|
134
320
|
else:
|
|
135
321
|
await asyncio.sleep(0.1)
|
|
136
322
|
|
|
137
|
-
async def execute_task(self, task):
|
|
138
|
-
"""
|
|
139
|
-
return {"status": "success", "note": "
|
|
323
|
+
async def execute_task(self, task: dict) -> dict:
|
|
324
|
+
"""Required by base class."""
|
|
325
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
140
326
|
|
|
141
327
|
|
|
328
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
329
|
+
# ASSISTANT AGENT - Coordinator that delegates to specialists
|
|
330
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
331
|
+
|
|
142
332
|
class AssistantAgent(CustomAgent):
|
|
143
333
|
"""
|
|
144
|
-
Assistant agent
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
1.
|
|
148
|
-
2.
|
|
149
|
-
3.
|
|
334
|
+
Assistant agent - coordinates with specialist agents.
|
|
335
|
+
|
|
336
|
+
KEY PATTERN DEMONSTRATED:
|
|
337
|
+
1. Has its own LLM for reasoning
|
|
338
|
+
2. Peer tools (ask_peer, broadcast) are in its toolset
|
|
339
|
+
3. LLM AUTONOMOUSLY decides when to ask other agents
|
|
340
|
+
4. No manual "if analysis_needed: call_analyst()" logic!
|
|
341
|
+
|
|
342
|
+
The LLM sees:
|
|
343
|
+
- web_search (local tool)
|
|
344
|
+
- ask_peer (peer tool) ← LLM decides when to use this!
|
|
345
|
+
- broadcast_update (peer tool)
|
|
346
|
+
- list_peers (peer tool)
|
|
150
347
|
"""
|
|
151
348
|
role = "assistant"
|
|
152
|
-
capabilities = ["
|
|
349
|
+
capabilities = ["chat", "coordination", "search"]
|
|
153
350
|
|
|
154
351
|
def __init__(self, agent_id=None):
|
|
155
352
|
super().__init__(agent_id)
|
|
156
353
|
self.llm = None
|
|
157
|
-
self.
|
|
354
|
+
self.tool_calls = [] # Track what tools LLM uses
|
|
158
355
|
|
|
159
356
|
async def setup(self):
|
|
160
357
|
"""Initialize LLM client."""
|
|
161
358
|
await super().setup()
|
|
162
|
-
self.llm =
|
|
163
|
-
self._logger.info(f"[{self.role}] Ready
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
"""
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
359
|
+
self.llm = LLMClient()
|
|
360
|
+
self._logger.info(f"[{self.role}] Ready with LLM + peer tools")
|
|
361
|
+
|
|
362
|
+
def get_tools(self) -> list:
|
|
363
|
+
"""
|
|
364
|
+
Tools available to THIS agent's LLM.
|
|
365
|
+
|
|
366
|
+
IMPORTANT: This includes PEER TOOLS!
|
|
367
|
+
The LLM sees ask_peer, broadcast_update, list_peers
|
|
368
|
+
and decides when to use them autonomously.
|
|
369
|
+
"""
|
|
370
|
+
# Local tools
|
|
371
|
+
tools = [
|
|
372
|
+
{
|
|
373
|
+
"name": "web_search",
|
|
374
|
+
"description": "Search the web for information",
|
|
375
|
+
"input_schema": {
|
|
376
|
+
"type": "object",
|
|
377
|
+
"properties": {
|
|
378
|
+
"query": {"type": "string", "description": "Search query"}
|
|
379
|
+
},
|
|
380
|
+
"required": ["query"]
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
]
|
|
175
384
|
|
|
176
|
-
|
|
385
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
386
|
+
# KEY: ADD PEER TOOLS TO LLM'S TOOLSET
|
|
387
|
+
#
|
|
388
|
+
# This is the core pattern! After this, LLM will see:
|
|
389
|
+
# - ask_peer: Ask another agent by role
|
|
390
|
+
# - broadcast_update: Send message to all peers
|
|
391
|
+
# - list_peers: See available agents and their capabilities
|
|
392
|
+
#
|
|
393
|
+
# The LLM decides when to use these based on the user's request.
|
|
394
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
395
|
+
if self.peers:
|
|
396
|
+
tools.extend(self.peers.as_tool().schema)
|
|
397
|
+
|
|
398
|
+
return tools
|
|
399
|
+
|
|
400
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
177
401
|
"""
|
|
178
|
-
|
|
402
|
+
Execute a tool by name.
|
|
179
403
|
|
|
180
|
-
|
|
404
|
+
When LLM decides to use ask_peer, this routes to the peer system.
|
|
405
|
+
No manual delegation logic - just routing!
|
|
181
406
|
"""
|
|
182
|
-
self.
|
|
407
|
+
self.tool_calls.append({"tool": tool_name, "args": args})
|
|
183
408
|
|
|
184
|
-
#
|
|
185
|
-
|
|
186
|
-
|
|
409
|
+
# PEER TOOLS - route to peer system
|
|
410
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
411
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
187
412
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
413
|
+
# LOCAL TOOLS
|
|
414
|
+
if tool_name == "web_search":
|
|
415
|
+
query = args.get("query", "")
|
|
416
|
+
return f"Search results for '{query}': Found 10 relevant articles about {query}."
|
|
191
417
|
|
|
192
|
-
|
|
193
|
-
final_response = self.llm.chat(
|
|
194
|
-
f"Based on this research: {research_result}\n\nProvide a helpful summary.",
|
|
195
|
-
system="You are a helpful assistant. Summarize research findings clearly."
|
|
196
|
-
)
|
|
197
|
-
return final_response
|
|
198
|
-
else:
|
|
199
|
-
# Handle directly
|
|
200
|
-
return self.llm.chat(
|
|
201
|
-
user_input,
|
|
202
|
-
system="You are a helpful assistant. Be concise and friendly."
|
|
203
|
-
)
|
|
418
|
+
return f"Unknown tool: {tool_name}"
|
|
204
419
|
|
|
205
|
-
async def
|
|
420
|
+
async def chat(self, user_message: str) -> str:
|
|
206
421
|
"""
|
|
207
|
-
|
|
422
|
+
Complete LLM chat with autonomous tool use.
|
|
208
423
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
-
|
|
212
|
-
- Message queue events
|
|
213
|
-
- Other peer requests
|
|
424
|
+
The LLM sees ALL tools (including peer tools) and decides
|
|
425
|
+
which to use. If user asks for analysis, LLM will use
|
|
426
|
+
ask_peer to contact the analyst - we don't hardcode this!
|
|
214
427
|
"""
|
|
215
|
-
|
|
428
|
+
# System prompt tells LLM about its capabilities
|
|
429
|
+
system_prompt = """You are a helpful assistant with access to specialist agents.
|
|
430
|
+
|
|
431
|
+
YOUR TOOLS:
|
|
432
|
+
- web_search: Search the web for information
|
|
433
|
+
- ask_peer: Ask specialist agents for help. Available specialists:
|
|
434
|
+
* analyst: Expert in data analysis, statistics, and trends
|
|
435
|
+
- broadcast_update: Send updates to all connected agents
|
|
436
|
+
- list_peers: See what other agents are available
|
|
437
|
+
|
|
438
|
+
IMPORTANT GUIDELINES:
|
|
439
|
+
- When users ask for DATA ANALYSIS, USE ask_peer to ask the analyst
|
|
440
|
+
- When users ask for WEB INFORMATION, USE web_search
|
|
441
|
+
- Be concise and helpful in your responses
|
|
442
|
+
- Always explain what you found from specialists"""
|
|
443
|
+
|
|
444
|
+
tools = self.get_tools()
|
|
445
|
+
messages = [{"role": "user", "content": user_message}]
|
|
446
|
+
|
|
447
|
+
self._logger.info(f"[{self.role}] Processing: {user_message[:50]}...")
|
|
448
|
+
self._logger.info(f"[{self.role}] Tools available: {[t['name'] for t in tools]}")
|
|
449
|
+
|
|
450
|
+
# Call LLM with tools - IT decides which to use
|
|
451
|
+
response = self.llm.chat_with_tools(messages, tools, system_prompt)
|
|
452
|
+
|
|
453
|
+
# Handle tool use loop (LLM might use multiple tools)
|
|
454
|
+
iterations = 0
|
|
455
|
+
while response.get("type") == "tool_use" and iterations < 3:
|
|
456
|
+
iterations += 1
|
|
457
|
+
tool_name = response["tool_name"]
|
|
458
|
+
tool_args = response["tool_args"]
|
|
459
|
+
tool_use_id = response["tool_use_id"]
|
|
460
|
+
|
|
461
|
+
print(f"\n ┌─ [ASSISTANT LLM] Decided to use tool: {tool_name}")
|
|
462
|
+
print(f" │ Args: {tool_args}")
|
|
463
|
+
|
|
464
|
+
# Execute the tool (might be ask_peer!)
|
|
465
|
+
tool_result = await self.execute_tool(tool_name, tool_args)
|
|
466
|
+
|
|
467
|
+
# Show the result from peer if it was ask_peer
|
|
468
|
+
if tool_name == "ask_peer":
|
|
469
|
+
print(f" │")
|
|
470
|
+
print(f" │ ──► [SENT TO ANALYST]")
|
|
471
|
+
print(f" │")
|
|
472
|
+
print(f" │ ◄── [ANALYST RESPONDED]:")
|
|
473
|
+
print(f" │ {tool_result[:200]}...")
|
|
474
|
+
else:
|
|
475
|
+
print(f" │ Result: {tool_result[:100]}...")
|
|
216
476
|
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
#
|
|
220
|
-
|
|
221
|
-
|
|
477
|
+
print(f" └─ [ASSISTANT LLM] Processing response...")
|
|
478
|
+
|
|
479
|
+
# Continue conversation with tool result
|
|
480
|
+
response = self.llm.continue_with_tool_result(
|
|
481
|
+
messages, tool_use_id, tool_name, tool_args, tool_result, tools, system_prompt
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
return response.get("content", "I processed your request.")
|
|
222
485
|
|
|
486
|
+
async def run(self):
|
|
487
|
+
"""Main loop - listen for incoming requests from peers."""
|
|
488
|
+
self._logger.info(f"[{self.role}] Starting run loop...")
|
|
489
|
+
|
|
490
|
+
while not self.shutdown_requested:
|
|
223
491
|
if self.peers:
|
|
224
492
|
msg = await self.peers.receive(timeout=0.5)
|
|
225
493
|
if msg and msg.is_request:
|
|
226
494
|
query = msg.data.get("query", "")
|
|
227
|
-
|
|
228
|
-
await self.peers.respond(msg, {"response":
|
|
495
|
+
result = await self.chat(query)
|
|
496
|
+
await self.peers.respond(msg, {"response": result})
|
|
229
497
|
else:
|
|
230
498
|
await asyncio.sleep(0.1)
|
|
231
499
|
|
|
232
|
-
async def execute_task(self, task):
|
|
233
|
-
"""
|
|
234
|
-
return {"status": "success", "note": "
|
|
500
|
+
async def execute_task(self, task: dict) -> dict:
|
|
501
|
+
"""Required by base class."""
|
|
502
|
+
return {"status": "success", "note": "This agent uses run() for P2P mode"}
|
|
235
503
|
|
|
236
504
|
|
|
237
505
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
@@ -239,16 +507,48 @@ class AssistantAgent(CustomAgent):
|
|
|
239
507
|
# ═══════════════════════════════════════════════════════════════════════════════
|
|
240
508
|
|
|
241
509
|
async def main():
|
|
242
|
-
"""Run CustomAgent P2P mode example."""
|
|
510
|
+
"""Run CustomAgent P2P mode example with LLM-driven peer communication."""
|
|
243
511
|
print("\n" + "="*70)
|
|
244
|
-
print("JarvisCore:
|
|
512
|
+
print("JarvisCore: LLM-DRIVEN PEER COMMUNICATION")
|
|
245
513
|
print("="*70)
|
|
246
514
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
515
|
+
print("""
|
|
516
|
+
This example demonstrates the KEY P2P PATTERN:
|
|
517
|
+
|
|
518
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
519
|
+
│ User: "Analyze the Q4 sales data" │
|
|
520
|
+
│ │ │
|
|
521
|
+
│ ▼ │
|
|
522
|
+
│ ┌─────────────────────────────────────────┐ │
|
|
523
|
+
│ │ ASSISTANT'S LLM │ │
|
|
524
|
+
│ │ │ │
|
|
525
|
+
│ │ Tools: [web_search, ask_peer, ...] │ │
|
|
526
|
+
│ │ │ │
|
|
527
|
+
│ │ LLM thinks: "User needs analysis, │ │
|
|
528
|
+
│ │ I should ask the analyst agent" │ │
|
|
529
|
+
│ │ │ │
|
|
530
|
+
│ │ → Uses ask_peer(role="analyst", ...) │ │
|
|
531
|
+
│ └─────────────────────────────────────────┘ │
|
|
532
|
+
│ │ │
|
|
533
|
+
│ ▼ │
|
|
534
|
+
│ ┌─────────────────────────────────────────┐ │
|
|
535
|
+
│ │ ANALYST AGENT │ │
|
|
536
|
+
│ │ (Processes with its own LLM + tools) │ │
|
|
537
|
+
│ └─────────────────────────────────────────┘ │
|
|
538
|
+
│ │ │
|
|
539
|
+
│ ▼ Returns analysis │
|
|
540
|
+
│ ┌─────────────────────────────────────────┐ │
|
|
541
|
+
│ │ ASSISTANT'S LLM │ │
|
|
542
|
+
│ │ "Based on the analyst's findings..." │ │
|
|
543
|
+
│ └─────────────────────────────────────────┘ │
|
|
544
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
545
|
+
|
|
546
|
+
The LLM DECIDES to use ask_peer - we don't hardcode this!
|
|
547
|
+
""")
|
|
548
|
+
|
|
549
|
+
# Create mesh
|
|
250
550
|
mesh = Mesh(
|
|
251
|
-
mode="p2p",
|
|
551
|
+
mode="p2p",
|
|
252
552
|
config={
|
|
253
553
|
'bind_host': '127.0.0.1',
|
|
254
554
|
'bind_port': 7960,
|
|
@@ -256,61 +556,169 @@ async def main():
|
|
|
256
556
|
}
|
|
257
557
|
)
|
|
258
558
|
|
|
259
|
-
|
|
559
|
+
# Add agents
|
|
560
|
+
analyst = mesh.add(AnalystAgent)
|
|
260
561
|
assistant = mesh.add(AssistantAgent)
|
|
261
562
|
|
|
262
563
|
try:
|
|
263
564
|
await mesh.start()
|
|
264
565
|
|
|
265
|
-
print("\n[
|
|
266
|
-
print(f"
|
|
267
|
-
print(f" - Workflow Engine: NOT available (use run_forever instead)")
|
|
268
|
-
print(f" - Agents: {len(mesh.agents)}")
|
|
566
|
+
print("\n[SETUP] Mesh started in P2P mode")
|
|
567
|
+
print(f" Agents: {[a.role for a in mesh.agents]}")
|
|
269
568
|
|
|
270
|
-
#
|
|
271
|
-
|
|
569
|
+
# Show assistant's tools (including peer tools!)
|
|
570
|
+
tools = assistant.get_tools()
|
|
571
|
+
print(f"\n[TOOLS] Assistant's LLM sees these tools:")
|
|
572
|
+
for tool in tools:
|
|
573
|
+
print(f" - {tool['name']}: {tool['description'][:50]}...")
|
|
272
574
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
575
|
+
# Start analyst's run loop in background
|
|
576
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
577
|
+
await asyncio.sleep(0.3)
|
|
276
578
|
|
|
277
|
-
#
|
|
278
|
-
|
|
579
|
+
# ─────────────────────────────────────────────────────────────────
|
|
580
|
+
# TEST 1: Request that should trigger ask_peer → analyst
|
|
581
|
+
# ─────────────────────────────────────────────────────────────────
|
|
582
|
+
print("\n" + "─"*70)
|
|
583
|
+
print("TEST 1: Analysis request (LLM should use ask_peer → analyst)")
|
|
584
|
+
print("─"*70)
|
|
279
585
|
|
|
280
|
-
|
|
281
|
-
|
|
586
|
+
user_message = "Please analyze the Q4 sales trends and identify any anomalies"
|
|
587
|
+
print(f"\n[USER] {user_message}")
|
|
282
588
|
|
|
283
|
-
|
|
284
|
-
await
|
|
589
|
+
assistant.tool_calls = [] # Reset tracking
|
|
590
|
+
response = await assistant.chat(user_message)
|
|
285
591
|
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
"Research the benefits of renewable energy",
|
|
289
|
-
"Hello, how are you?", # This won't be delegated
|
|
290
|
-
"Analyze the latest trends in AI development",
|
|
291
|
-
]
|
|
592
|
+
print(f"\n[ASSISTANT] {response}")
|
|
593
|
+
print(f"\n[TOOLS USED] {assistant.tool_calls}")
|
|
292
594
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
print(
|
|
595
|
+
# Verify LLM used ask_peer
|
|
596
|
+
peer_calls = [c for c in assistant.tool_calls if c["tool"] == "ask_peer"]
|
|
597
|
+
if peer_calls:
|
|
598
|
+
print("✓ LLM autonomously decided to ask the analyst!")
|
|
599
|
+
else:
|
|
600
|
+
print("○ LLM responded without asking analyst (might happen with mock)")
|
|
601
|
+
|
|
602
|
+
# ─────────────────────────────────────────────────────────────────
|
|
603
|
+
# TEST 2: Request that should use local tool (web_search)
|
|
604
|
+
# ─────────────────────────────────────────────────────────────────
|
|
605
|
+
print("\n" + "─"*70)
|
|
606
|
+
print("TEST 2: Search request (LLM should use web_search)")
|
|
607
|
+
print("─"*70)
|
|
608
|
+
|
|
609
|
+
user_message = "Search for the latest Python 3.12 features"
|
|
610
|
+
print(f"\n[USER] {user_message}")
|
|
611
|
+
|
|
612
|
+
assistant.tool_calls = []
|
|
613
|
+
response = await assistant.chat(user_message)
|
|
614
|
+
|
|
615
|
+
print(f"\n[ASSISTANT] {response}")
|
|
616
|
+
print(f"\n[TOOLS USED] {assistant.tool_calls}")
|
|
617
|
+
|
|
618
|
+
search_calls = [c for c in assistant.tool_calls if c["tool"] == "web_search"]
|
|
619
|
+
if search_calls:
|
|
620
|
+
print("✓ LLM used local web_search tool!")
|
|
621
|
+
|
|
622
|
+
# ─────────────────────────────────────────────────────────────────
|
|
623
|
+
# TEST 3: Simple greeting (no tools needed)
|
|
624
|
+
# ─────────────────────────────────────────────────────────────────
|
|
625
|
+
print("\n" + "─"*70)
|
|
626
|
+
print("TEST 3: Simple greeting (no tools needed)")
|
|
627
|
+
print("─"*70)
|
|
628
|
+
|
|
629
|
+
user_message = "Hello! How are you?"
|
|
630
|
+
print(f"\n[USER] {user_message}")
|
|
631
|
+
|
|
632
|
+
assistant.tool_calls = []
|
|
633
|
+
response = await assistant.chat(user_message)
|
|
634
|
+
|
|
635
|
+
print(f"\n[ASSISTANT] {response}")
|
|
636
|
+
print(f"\n[TOOLS USED] {assistant.tool_calls}")
|
|
637
|
+
|
|
638
|
+
if not assistant.tool_calls:
|
|
639
|
+
print("✓ LLM responded directly without tools!")
|
|
640
|
+
|
|
641
|
+
# ─────────────────────────────────────────────────────────────────
|
|
642
|
+
# TEST 4: Analysis with REAL DATA (full bidirectional flow)
|
|
643
|
+
# ─────────────────────────────────────────────────────────────────
|
|
644
|
+
print("\n" + "─"*70)
|
|
645
|
+
print("TEST 4: Analysis with REAL DATA (full flow demonstration)")
|
|
646
|
+
print("─"*70)
|
|
647
|
+
|
|
648
|
+
# Actual Q4 sales data with clear anomalies
|
|
649
|
+
q4_sales_data = """
|
|
650
|
+
Here is our Q4 2024 monthly sales data:
|
|
651
|
+
|
|
652
|
+
| Month | Revenue | Units Sold | Avg Order Value |
|
|
653
|
+
|-----------|------------|------------|-----------------|
|
|
654
|
+
| October | $142,500 | 2,850 | $50.00 |
|
|
655
|
+
| November | $168,300 | 3,366 | $50.00 |
|
|
656
|
+
| December | $312,750 | 4,170 | $75.00 |
|
|
657
|
+
|
|
658
|
+
Weekly breakdown for December:
|
|
659
|
+
- Week 1: $45,200 (normal)
|
|
660
|
+
- Week 2: $52,100 (normal)
|
|
661
|
+
- Week 3: $185,450 (BLACK FRIDAY + CYBER MONDAY spillover)
|
|
662
|
+
- Week 4: $30,000 (post-holiday drop)
|
|
663
|
+
|
|
664
|
+
Please analyze this data and identify:
|
|
665
|
+
1. Key trends
|
|
666
|
+
2. Any anomalies
|
|
667
|
+
3. Recommendations
|
|
668
|
+
"""
|
|
669
|
+
user_message = f"Analyze this Q4 sales data:\n{q4_sales_data}"
|
|
670
|
+
print(f"\n[USER] Providing actual Q4 sales data for analysis...")
|
|
671
|
+
print(q4_sales_data)
|
|
672
|
+
|
|
673
|
+
assistant.tool_calls = []
|
|
674
|
+
response = await assistant.chat(user_message)
|
|
297
675
|
|
|
298
|
-
|
|
676
|
+
print(f"\n[ASSISTANT] {response}")
|
|
677
|
+
print(f"\n[TOOLS USED] {assistant.tool_calls}")
|
|
678
|
+
|
|
679
|
+
peer_calls = [c for c in assistant.tool_calls if c["tool"] == "ask_peer"]
|
|
680
|
+
if peer_calls:
|
|
681
|
+
print("✓ Full bidirectional flow completed with real data!")
|
|
682
|
+
print(f"✓ Analyst processed actual sales figures and provided insights!")
|
|
683
|
+
|
|
684
|
+
# ─────────────────────────────────────────────────────────────────
|
|
685
|
+
# Summary
|
|
686
|
+
# ─────────────────────────────────────────────────────────────────
|
|
299
687
|
print("\n" + "="*70)
|
|
300
|
-
print("
|
|
688
|
+
print("EXAMPLE COMPLETE")
|
|
301
689
|
print("="*70)
|
|
302
|
-
print(f"
|
|
690
|
+
print(f"""
|
|
691
|
+
KEY TAKEAWAYS:
|
|
692
|
+
|
|
693
|
+
1. PEER TOOLS IN TOOLSET
|
|
694
|
+
tools.extend(self.peers.as_tool().schema)
|
|
695
|
+
|
|
696
|
+
2. LLM DECIDES AUTONOMOUSLY
|
|
697
|
+
- Analysis request → LLM uses ask_peer → analyst
|
|
698
|
+
- Search request → LLM uses web_search
|
|
699
|
+
- Greeting → LLM responds directly
|
|
700
|
+
- Real data analysis → Full bidirectional flow
|
|
701
|
+
|
|
702
|
+
3. NO HARDCODED DELEGATION
|
|
703
|
+
We don't write: if "analyze" in msg: call_analyst()
|
|
704
|
+
The LLM figures it out from the system prompt!
|
|
705
|
+
|
|
706
|
+
4. ANALYST RECEIVED: {len(analyst.requests_received)} requests
|
|
707
|
+
|
|
708
|
+
5. REAL DATA FLOW
|
|
709
|
+
User provides data → Assistant delegates → Analyst analyzes →
|
|
710
|
+
Analyst responds with insights → Assistant presents to user
|
|
711
|
+
""")
|
|
303
712
|
|
|
304
713
|
# Cleanup
|
|
305
|
-
|
|
306
|
-
|
|
714
|
+
analyst.request_shutdown()
|
|
715
|
+
analyst_task.cancel()
|
|
307
716
|
try:
|
|
308
|
-
await
|
|
717
|
+
await analyst_task
|
|
309
718
|
except asyncio.CancelledError:
|
|
310
719
|
pass
|
|
311
720
|
|
|
312
721
|
await mesh.stop()
|
|
313
|
-
print("\n[INFO] P2P mesh stopped")
|
|
314
722
|
|
|
315
723
|
except Exception as e:
|
|
316
724
|
print(f"\nError: {e}")
|
|
@@ -318,30 +726,5 @@ async def main():
|
|
|
318
726
|
traceback.print_exc()
|
|
319
727
|
|
|
320
728
|
|
|
321
|
-
# ═══════════════════════════════════════════════════════════════════════════════
|
|
322
|
-
# LONG-RUNNING P2P EXAMPLE
|
|
323
|
-
# ═══════════════════════════════════════════════════════════════════════════════
|
|
324
|
-
|
|
325
|
-
async def run_forever_example():
|
|
326
|
-
"""
|
|
327
|
-
Example: Running P2P agents indefinitely.
|
|
328
|
-
|
|
329
|
-
Use mesh.run_forever() to keep all agents running:
|
|
330
|
-
|
|
331
|
-
mesh = Mesh(mode="p2p", config={...})
|
|
332
|
-
mesh.add(ResearcherAgent)
|
|
333
|
-
mesh.add(AssistantAgent)
|
|
334
|
-
|
|
335
|
-
await mesh.start()
|
|
336
|
-
await mesh.run_forever() # Blocks until shutdown signal
|
|
337
|
-
|
|
338
|
-
Agents will run their run() loops continuously until:
|
|
339
|
-
- SIGINT (Ctrl+C)
|
|
340
|
-
- SIGTERM
|
|
341
|
-
- Programmatic shutdown
|
|
342
|
-
"""
|
|
343
|
-
pass
|
|
344
|
-
|
|
345
|
-
|
|
346
729
|
if __name__ == "__main__":
|
|
347
730
|
asyncio.run(main())
|