jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +49 -36
- jarviscore/adapter/__init__.py +15 -9
- jarviscore/adapter/decorator.py +23 -19
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/scaffold.py +1 -1
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +35 -21
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +106 -13
- jarviscore/docs/TROUBLESHOOTING.md +144 -6
- jarviscore/docs/USER_GUIDE.md +138 -361
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/METADATA +60 -54
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
- {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,945 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test 3: Analyst WITH JarvisCore Framework
|
|
3
|
+
|
|
4
|
+
Demonstrates an LLM-POWERED AGENT that can BOTH send AND receive.
|
|
5
|
+
|
|
6
|
+
KEY CONCEPT - All agents are equal participants:
|
|
7
|
+
- Every agent has an LLM for reasoning
|
|
8
|
+
- Every agent can SEND requests to peers (via ask_peer tool)
|
|
9
|
+
- Every agent can RECEIVE requests from peers (via run() loop)
|
|
10
|
+
- The "role" defines what they're GOOD at, not communication direction
|
|
11
|
+
|
|
12
|
+
BEFORE (Standalone):
|
|
13
|
+
- Analyst has analyze() capability
|
|
14
|
+
- Analyst has get_tools() for its LLM
|
|
15
|
+
- Cannot communicate with other agents
|
|
16
|
+
|
|
17
|
+
AFTER (With Framework):
|
|
18
|
+
- Same analyze() capability
|
|
19
|
+
- get_tools() NOW includes peer tools (ask_peer, broadcast_update, list_peers)
|
|
20
|
+
- Can RECEIVE requests and process them with LLM
|
|
21
|
+
- Can SEND requests to other peers when processing
|
|
22
|
+
- Full mesh participant
|
|
23
|
+
|
|
24
|
+
DEVELOPER CHANGES REQUIRED:
|
|
25
|
+
1. Inherit from Agent
|
|
26
|
+
2. Add `role` and `capabilities` class attributes
|
|
27
|
+
3. Modify get_tools() to include self.peers.as_tool().schema
|
|
28
|
+
4. Modify execute_tool() to dispatch peer tools
|
|
29
|
+
5. Add async def run() loop for incoming requests
|
|
30
|
+
6. Add async def execute_task() (required by base class)
|
|
31
|
+
"""
|
|
32
|
+
import asyncio
|
|
33
|
+
import sys
|
|
34
|
+
import pytest
|
|
35
|
+
sys.path.insert(0, '.')
|
|
36
|
+
|
|
37
|
+
from jarviscore.core.agent import Agent
|
|
38
|
+
from jarviscore.core.mesh import Mesh
|
|
39
|
+
from jarviscore.p2p.peer_client import PeerClient
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
43
|
+
# FIXTURES
|
|
44
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
45
|
+
|
|
46
|
+
@pytest.fixture
|
|
47
|
+
def mesh():
|
|
48
|
+
"""Create a fresh mesh for each test."""
|
|
49
|
+
return Mesh(mode="p2p")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@pytest.fixture
|
|
53
|
+
def analyst(mesh):
|
|
54
|
+
"""Create an analyst added to mesh."""
|
|
55
|
+
return mesh.add(ConnectedAnalyst)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@pytest.fixture
|
|
59
|
+
def analyst_with_peers(mesh, analyst):
|
|
60
|
+
"""Create analyst with peer client injected."""
|
|
61
|
+
analyst.peers = PeerClient(
|
|
62
|
+
coordinator=None,
|
|
63
|
+
agent_id=analyst.agent_id,
|
|
64
|
+
agent_role=analyst.role,
|
|
65
|
+
agent_registry=mesh._agent_registry,
|
|
66
|
+
node_id="local"
|
|
67
|
+
)
|
|
68
|
+
return analyst
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
72
|
+
# THE AGENT - LLM-powered agent that can BOTH send AND receive
|
|
73
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
74
|
+
|
|
75
|
+
class ConnectedAnalyst(Agent):
|
|
76
|
+
"""
|
|
77
|
+
Analyst agent - AFTER installing jarviscore.
|
|
78
|
+
|
|
79
|
+
This is a FULL LLM-POWERED AGENT that can:
|
|
80
|
+
- Use its own tools (analyze)
|
|
81
|
+
- Ask other peers for help (ask_peer)
|
|
82
|
+
- Receive and process requests from other agents
|
|
83
|
+
- Broadcast updates to all peers
|
|
84
|
+
|
|
85
|
+
The LLM decides what to do - it might receive a request and
|
|
86
|
+
then ask another peer for additional data before responding.
|
|
87
|
+
"""
|
|
88
|
+
# Identity for mesh registration
|
|
89
|
+
role = "analyst"
|
|
90
|
+
capabilities = ["analysis", "synthesis", "reporting"]
|
|
91
|
+
|
|
92
|
+
def __init__(self, agent_id=None):
|
|
93
|
+
super().__init__(agent_id)
|
|
94
|
+
self.analyses_count = 0
|
|
95
|
+
self.requests_processed = []
|
|
96
|
+
self.received_broadcasts = []
|
|
97
|
+
|
|
98
|
+
# ─────────────────────────────────────────────────────────────────
|
|
99
|
+
# CORE CAPABILITIES - What this agent is good at
|
|
100
|
+
# ─────────────────────────────────────────────────────────────────
|
|
101
|
+
|
|
102
|
+
def analyze(self, data: str) -> dict:
|
|
103
|
+
"""Analyze data and return insights. (Core capability)"""
|
|
104
|
+
self.analyses_count += 1
|
|
105
|
+
return {
|
|
106
|
+
"response": f"Analysis #{self.analyses_count}: '{data}' shows positive trends",
|
|
107
|
+
"confidence": 0.85,
|
|
108
|
+
"recommendation": "Proceed with caution"
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
def generate_report(self, analysis: dict) -> str:
|
|
112
|
+
"""Generate a text report from analysis. (Core capability)"""
|
|
113
|
+
return (
|
|
114
|
+
f"Report\n"
|
|
115
|
+
f"Summary: {analysis['response']}\n"
|
|
116
|
+
f"Confidence: {analysis['confidence']}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# ─────────────────────────────────────────────────────────────────
|
|
120
|
+
# LLM TOOL INTERFACE - What LLM can use
|
|
121
|
+
# ─────────────────────────────────────────────────────────────────
|
|
122
|
+
|
|
123
|
+
def get_tools(self) -> list:
|
|
124
|
+
"""
|
|
125
|
+
Return tool definitions for THIS AGENT'S LLM.
|
|
126
|
+
|
|
127
|
+
Includes:
|
|
128
|
+
- Local tools (analyze, generate_report)
|
|
129
|
+
- Peer tools (ask_peer, broadcast_update, list_peers)
|
|
130
|
+
|
|
131
|
+
The LLM decides which tools to use based on the task.
|
|
132
|
+
"""
|
|
133
|
+
tools = [
|
|
134
|
+
{
|
|
135
|
+
"name": "analyze",
|
|
136
|
+
"description": "Analyze data and return insights with confidence score",
|
|
137
|
+
"input_schema": {
|
|
138
|
+
"type": "object",
|
|
139
|
+
"properties": {
|
|
140
|
+
"data": {"type": "string", "description": "Data to analyze"}
|
|
141
|
+
},
|
|
142
|
+
"required": ["data"]
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
"name": "generate_report",
|
|
147
|
+
"description": "Generate a formatted report from analysis results",
|
|
148
|
+
"input_schema": {
|
|
149
|
+
"type": "object",
|
|
150
|
+
"properties": {
|
|
151
|
+
"analysis": {"type": "object", "description": "Analysis result"}
|
|
152
|
+
},
|
|
153
|
+
"required": ["analysis"]
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
]
|
|
157
|
+
|
|
158
|
+
# Add peer tools if connected to mesh
|
|
159
|
+
if self.peers:
|
|
160
|
+
tools.extend(self.peers.as_tool().schema)
|
|
161
|
+
|
|
162
|
+
return tools
|
|
163
|
+
|
|
164
|
+
async def execute_tool(self, tool_name: str, args: dict) -> str:
|
|
165
|
+
"""
|
|
166
|
+
Execute a tool by name.
|
|
167
|
+
|
|
168
|
+
This is called when the LLM decides to use a tool.
|
|
169
|
+
Routes to local tools or peer tools as appropriate.
|
|
170
|
+
"""
|
|
171
|
+
# Peer tools
|
|
172
|
+
if self.peers and tool_name in self.peers.as_tool().tool_names:
|
|
173
|
+
return await self.peers.as_tool().execute(tool_name, args)
|
|
174
|
+
|
|
175
|
+
# Local tools
|
|
176
|
+
if tool_name == "analyze":
|
|
177
|
+
result = self.analyze(args.get("data", ""))
|
|
178
|
+
return str(result)
|
|
179
|
+
elif tool_name == "generate_report":
|
|
180
|
+
result = self.generate_report(args.get("analysis", {}))
|
|
181
|
+
return result
|
|
182
|
+
|
|
183
|
+
return f"Unknown tool: {tool_name}"
|
|
184
|
+
|
|
185
|
+
# ─────────────────────────────────────────────────────────────────
|
|
186
|
+
# MESSAGE HANDLING - Process incoming requests with LLM
|
|
187
|
+
# ─────────────────────────────────────────────────────────────────
|
|
188
|
+
|
|
189
|
+
async def run(self):
|
|
190
|
+
"""
|
|
191
|
+
Main loop - receive and process requests.
|
|
192
|
+
|
|
193
|
+
When a request comes in, the LLM decides how to handle it.
|
|
194
|
+
The LLM might:
|
|
195
|
+
- Use local tools (analyze)
|
|
196
|
+
- Ask other peers for help (ask_peer)
|
|
197
|
+
- Combine multiple tool calls
|
|
198
|
+
"""
|
|
199
|
+
self._logger.info(f"Analyst {self.agent_id} listening...")
|
|
200
|
+
|
|
201
|
+
while not self.shutdown_requested:
|
|
202
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
203
|
+
if msg is None:
|
|
204
|
+
continue
|
|
205
|
+
|
|
206
|
+
if msg.is_request:
|
|
207
|
+
# Process request with LLM
|
|
208
|
+
query = msg.data.get("query", "")
|
|
209
|
+
self._logger.info(f"Request from {msg.sender}: {query}")
|
|
210
|
+
|
|
211
|
+
# Simulate LLM deciding to use analyze tool
|
|
212
|
+
# In real code: response = await self.llm.chat(query, tools=self.get_tools())
|
|
213
|
+
result = self.analyze(query)
|
|
214
|
+
self.requests_processed.append({"from": msg.sender, "query": query})
|
|
215
|
+
|
|
216
|
+
await self.peers.respond(msg, result)
|
|
217
|
+
|
|
218
|
+
elif msg.is_notify:
|
|
219
|
+
self._logger.info(f"Broadcast: {msg.data}")
|
|
220
|
+
self.received_broadcasts.append(msg.data)
|
|
221
|
+
|
|
222
|
+
async def execute_task(self, task: dict) -> dict:
|
|
223
|
+
"""Required by Agent base class."""
|
|
224
|
+
result = self.analyze(task.get("task", ""))
|
|
225
|
+
return {"status": "success", "output": result}
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
229
|
+
# TESTS - Organized by what they verify
|
|
230
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
231
|
+
|
|
232
|
+
class TestFrameworkIntegration:
|
|
233
|
+
"""Tests that verify the agent integrates correctly with jarviscore."""
|
|
234
|
+
|
|
235
|
+
def test_inherits_from_agent(self):
|
|
236
|
+
"""Agent must inherit from jarviscore.Agent."""
|
|
237
|
+
assert issubclass(ConnectedAnalyst, Agent)
|
|
238
|
+
|
|
239
|
+
def test_has_required_attributes(self):
|
|
240
|
+
"""Agent must declare role and capabilities."""
|
|
241
|
+
assert ConnectedAnalyst.role == "analyst"
|
|
242
|
+
assert len(ConnectedAnalyst.capabilities) > 0
|
|
243
|
+
|
|
244
|
+
def test_can_be_added_to_mesh(self, mesh):
|
|
245
|
+
"""Agent can be registered with the mesh."""
|
|
246
|
+
analyst = mesh.add(ConnectedAnalyst)
|
|
247
|
+
assert analyst in mesh.agents
|
|
248
|
+
assert analyst.agent_id is not None
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class TestLocalTools:
|
|
252
|
+
"""Tests for the agent's local tools (before peer tools)."""
|
|
253
|
+
|
|
254
|
+
def test_analyze_works(self, analyst):
|
|
255
|
+
"""Core analyze capability should work."""
|
|
256
|
+
result = analyst.analyze("Q4 sales data")
|
|
257
|
+
assert "Q4 sales data" in result["response"]
|
|
258
|
+
assert result["confidence"] == 0.85
|
|
259
|
+
|
|
260
|
+
def test_generate_report_works(self, analyst):
|
|
261
|
+
"""Core report generation should work."""
|
|
262
|
+
analysis = analyst.analyze("test data")
|
|
263
|
+
report = analyst.generate_report(analysis)
|
|
264
|
+
assert "Report" in report
|
|
265
|
+
assert "Summary:" in report
|
|
266
|
+
|
|
267
|
+
def test_get_tools_returns_local_tools(self, analyst):
|
|
268
|
+
"""get_tools() should return local tools (before peers injected)."""
|
|
269
|
+
tools = analyst.get_tools()
|
|
270
|
+
tool_names = [t["name"] for t in tools]
|
|
271
|
+
|
|
272
|
+
assert "analyze" in tool_names
|
|
273
|
+
assert "generate_report" in tool_names
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
class TestPeerToolsIntegration:
|
|
277
|
+
"""Tests for peer tools being added to the agent's toolset."""
|
|
278
|
+
|
|
279
|
+
def test_get_tools_includes_peer_tools_when_connected(self, analyst_with_peers, mesh):
|
|
280
|
+
"""After peer injection, get_tools() should include peer tools."""
|
|
281
|
+
# Add another agent so there's someone to talk to
|
|
282
|
+
other = mesh.add(ConnectedAnalyst, agent_id="other-analyst")
|
|
283
|
+
other.peers = PeerClient(
|
|
284
|
+
coordinator=None,
|
|
285
|
+
agent_id=other.agent_id,
|
|
286
|
+
agent_role=other.role,
|
|
287
|
+
agent_registry=mesh._agent_registry,
|
|
288
|
+
node_id="local"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
tools = analyst_with_peers.get_tools()
|
|
292
|
+
tool_names = [t["name"] for t in tools]
|
|
293
|
+
|
|
294
|
+
# Local tools
|
|
295
|
+
assert "analyze" in tool_names
|
|
296
|
+
assert "generate_report" in tool_names
|
|
297
|
+
|
|
298
|
+
# Peer tools
|
|
299
|
+
assert "ask_peer" in tool_names
|
|
300
|
+
assert "broadcast_update" in tool_names
|
|
301
|
+
assert "list_peers" in tool_names
|
|
302
|
+
|
|
303
|
+
def test_ask_peer_schema_shows_other_analysts(self, analyst_with_peers, mesh):
|
|
304
|
+
"""ask_peer tool should show other agents in the enum."""
|
|
305
|
+
# Add a researcher
|
|
306
|
+
class Researcher(Agent):
|
|
307
|
+
role = "researcher"
|
|
308
|
+
capabilities = ["research"]
|
|
309
|
+
async def execute_task(self, task): return {}
|
|
310
|
+
|
|
311
|
+
researcher = mesh.add(Researcher)
|
|
312
|
+
researcher.peers = PeerClient(
|
|
313
|
+
coordinator=None,
|
|
314
|
+
agent_id=researcher.agent_id,
|
|
315
|
+
agent_role=researcher.role,
|
|
316
|
+
agent_registry=mesh._agent_registry,
|
|
317
|
+
node_id="local"
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
tools = analyst_with_peers.get_tools()
|
|
321
|
+
ask_peer = next(t for t in tools if t["name"] == "ask_peer")
|
|
322
|
+
|
|
323
|
+
role_enum = ask_peer["input_schema"]["properties"]["role"]["enum"]
|
|
324
|
+
assert "researcher" in role_enum
|
|
325
|
+
|
|
326
|
+
@pytest.mark.asyncio
|
|
327
|
+
async def test_analyst_can_ask_other_peers(self, analyst_with_peers, mesh):
|
|
328
|
+
"""Analyst should be able to ask other peers for help."""
|
|
329
|
+
# Add a researcher that responds
|
|
330
|
+
class Researcher(Agent):
|
|
331
|
+
role = "researcher"
|
|
332
|
+
capabilities = ["research"]
|
|
333
|
+
async def execute_task(self, task): return {}
|
|
334
|
+
async def run(self):
|
|
335
|
+
while not self.shutdown_requested:
|
|
336
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
337
|
+
if msg and msg.is_request:
|
|
338
|
+
await self.peers.respond(msg, {
|
|
339
|
+
"response": f"Research results for: {msg.data.get('query')}"
|
|
340
|
+
})
|
|
341
|
+
|
|
342
|
+
researcher = mesh.add(Researcher)
|
|
343
|
+
researcher.peers = PeerClient(
|
|
344
|
+
coordinator=None,
|
|
345
|
+
agent_id=researcher.agent_id,
|
|
346
|
+
agent_role=researcher.role,
|
|
347
|
+
agent_registry=mesh._agent_registry,
|
|
348
|
+
node_id="local"
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
352
|
+
await asyncio.sleep(0.1)
|
|
353
|
+
|
|
354
|
+
try:
|
|
355
|
+
# Analyst asks researcher for help
|
|
356
|
+
result = await analyst_with_peers.execute_tool("ask_peer", {
|
|
357
|
+
"role": "researcher",
|
|
358
|
+
"question": "Find papers on market trends"
|
|
359
|
+
})
|
|
360
|
+
|
|
361
|
+
assert "Research results" in result
|
|
362
|
+
|
|
363
|
+
finally:
|
|
364
|
+
researcher.request_shutdown()
|
|
365
|
+
researcher_task.cancel()
|
|
366
|
+
try:
|
|
367
|
+
await researcher_task
|
|
368
|
+
except asyncio.CancelledError:
|
|
369
|
+
pass
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
class TestReceivingRequests:
|
|
373
|
+
"""Tests for the agent receiving and processing requests."""
|
|
374
|
+
|
|
375
|
+
@pytest.fixture
|
|
376
|
+
def requester(self, mesh):
|
|
377
|
+
"""Create another agent to send requests."""
|
|
378
|
+
class Requester(Agent):
|
|
379
|
+
role = "requester"
|
|
380
|
+
capabilities = ["requesting"]
|
|
381
|
+
async def execute_task(self, task): return {}
|
|
382
|
+
|
|
383
|
+
req = mesh.add(Requester)
|
|
384
|
+
req.peers = PeerClient(
|
|
385
|
+
coordinator=None,
|
|
386
|
+
agent_id=req.agent_id,
|
|
387
|
+
agent_role=req.role,
|
|
388
|
+
agent_registry=mesh._agent_registry,
|
|
389
|
+
node_id="local"
|
|
390
|
+
)
|
|
391
|
+
return req
|
|
392
|
+
|
|
393
|
+
@pytest.mark.asyncio
|
|
394
|
+
async def test_analyst_receives_and_responds(self, analyst_with_peers, requester):
|
|
395
|
+
"""Analyst should receive request, process with LLM, and respond."""
|
|
396
|
+
analyst_task = asyncio.create_task(analyst_with_peers.run())
|
|
397
|
+
await asyncio.sleep(0.1)
|
|
398
|
+
|
|
399
|
+
try:
|
|
400
|
+
# Requester asks analyst
|
|
401
|
+
response = await requester.peers.request("analyst", {
|
|
402
|
+
"query": "Analyze market data"
|
|
403
|
+
}, timeout=5.0)
|
|
404
|
+
|
|
405
|
+
assert response is not None
|
|
406
|
+
assert "Analysis" in response["response"]
|
|
407
|
+
assert analyst_with_peers.analyses_count == 1
|
|
408
|
+
assert len(analyst_with_peers.requests_processed) == 1
|
|
409
|
+
|
|
410
|
+
finally:
|
|
411
|
+
analyst_with_peers.request_shutdown()
|
|
412
|
+
analyst_task.cancel()
|
|
413
|
+
try:
|
|
414
|
+
await analyst_task
|
|
415
|
+
except asyncio.CancelledError:
|
|
416
|
+
pass
|
|
417
|
+
|
|
418
|
+
@pytest.mark.asyncio
|
|
419
|
+
async def test_analyst_receives_broadcasts(self, analyst_with_peers, requester):
|
|
420
|
+
"""Analyst should receive broadcast notifications."""
|
|
421
|
+
analyst_task = asyncio.create_task(analyst_with_peers.run())
|
|
422
|
+
await asyncio.sleep(0.1)
|
|
423
|
+
|
|
424
|
+
try:
|
|
425
|
+
await requester.peers.broadcast({"message": "System update!"})
|
|
426
|
+
await asyncio.sleep(0.2)
|
|
427
|
+
|
|
428
|
+
assert len(analyst_with_peers.received_broadcasts) == 1
|
|
429
|
+
|
|
430
|
+
finally:
|
|
431
|
+
analyst_with_peers.request_shutdown()
|
|
432
|
+
analyst_task.cancel()
|
|
433
|
+
try:
|
|
434
|
+
await analyst_task
|
|
435
|
+
except asyncio.CancelledError:
|
|
436
|
+
pass
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
class TestBidirectionalCommunication:
|
|
440
|
+
"""Tests proving the agent can BOTH send AND receive."""
|
|
441
|
+
|
|
442
|
+
@pytest.mark.asyncio
|
|
443
|
+
async def test_analyst_sends_while_receiving(self, mesh):
|
|
444
|
+
"""
|
|
445
|
+
Analyst receives a request AND asks another peer for help.
|
|
446
|
+
|
|
447
|
+
This proves the agent is a full mesh participant.
|
|
448
|
+
"""
|
|
449
|
+
# Create analyst
|
|
450
|
+
analyst = mesh.add(ConnectedAnalyst)
|
|
451
|
+
analyst.peers = PeerClient(
|
|
452
|
+
coordinator=None,
|
|
453
|
+
agent_id=analyst.agent_id,
|
|
454
|
+
agent_role=analyst.role,
|
|
455
|
+
agent_registry=mesh._agent_registry,
|
|
456
|
+
node_id="local"
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# Create researcher that analyst can ask
|
|
460
|
+
class Researcher(Agent):
|
|
461
|
+
role = "researcher"
|
|
462
|
+
capabilities = ["research"]
|
|
463
|
+
requests_received = []
|
|
464
|
+
async def execute_task(self, task): return {}
|
|
465
|
+
async def run(self):
|
|
466
|
+
while not self.shutdown_requested:
|
|
467
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
468
|
+
if msg and msg.is_request:
|
|
469
|
+
self.requests_received.append(msg.data)
|
|
470
|
+
await self.peers.respond(msg, {"response": "Research data"})
|
|
471
|
+
|
|
472
|
+
researcher = mesh.add(Researcher)
|
|
473
|
+
researcher.peers = PeerClient(
|
|
474
|
+
coordinator=None,
|
|
475
|
+
agent_id=researcher.agent_id,
|
|
476
|
+
agent_role=researcher.role,
|
|
477
|
+
agent_registry=mesh._agent_registry,
|
|
478
|
+
node_id="local"
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Create requester
|
|
482
|
+
class Requester(Agent):
|
|
483
|
+
role = "requester"
|
|
484
|
+
capabilities = ["requesting"]
|
|
485
|
+
async def execute_task(self, task): return {}
|
|
486
|
+
|
|
487
|
+
requester = mesh.add(Requester)
|
|
488
|
+
requester.peers = PeerClient(
|
|
489
|
+
coordinator=None,
|
|
490
|
+
agent_id=requester.agent_id,
|
|
491
|
+
agent_role=requester.role,
|
|
492
|
+
agent_registry=mesh._agent_registry,
|
|
493
|
+
node_id="local"
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
# Start agents
|
|
497
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
498
|
+
await asyncio.sleep(0.1)
|
|
499
|
+
|
|
500
|
+
try:
|
|
501
|
+
# Analyst asks researcher (SENDING)
|
|
502
|
+
result = await analyst.execute_tool("ask_peer", {
|
|
503
|
+
"role": "researcher",
|
|
504
|
+
"question": "Get market research"
|
|
505
|
+
})
|
|
506
|
+
assert "Research data" in result
|
|
507
|
+
|
|
508
|
+
# At the same time, analyst can receive requests
|
|
509
|
+
# (In a real scenario, analyst.run() would be running)
|
|
510
|
+
|
|
511
|
+
finally:
|
|
512
|
+
researcher.request_shutdown()
|
|
513
|
+
researcher_task.cancel()
|
|
514
|
+
try:
|
|
515
|
+
await researcher_task
|
|
516
|
+
except asyncio.CancelledError:
|
|
517
|
+
pass
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
521
|
+
# MANUAL RUN
|
|
522
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
523
|
+
|
|
524
|
+
async def _run_integration_test():
|
|
525
|
+
"""Full integration showing analyst as a complete mesh participant."""
|
|
526
|
+
print("\n[Integration: Analyst as full participant]")
|
|
527
|
+
|
|
528
|
+
mesh = Mesh(mode="p2p")
|
|
529
|
+
|
|
530
|
+
# Create analyst
|
|
531
|
+
analyst = mesh.add(ConnectedAnalyst)
|
|
532
|
+
|
|
533
|
+
# Create researcher for analyst to talk to
|
|
534
|
+
class Researcher(Agent):
|
|
535
|
+
role = "researcher"
|
|
536
|
+
capabilities = ["research"]
|
|
537
|
+
async def execute_task(self, task): return {}
|
|
538
|
+
async def run(self):
|
|
539
|
+
while not self.shutdown_requested:
|
|
540
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
541
|
+
if msg and msg.is_request:
|
|
542
|
+
await self.peers.respond(msg, {
|
|
543
|
+
"response": f"Research: {msg.data.get('query')}"
|
|
544
|
+
})
|
|
545
|
+
|
|
546
|
+
researcher = mesh.add(Researcher)
|
|
547
|
+
|
|
548
|
+
# Create requester that will ask analyst
|
|
549
|
+
class Requester(Agent):
|
|
550
|
+
role = "requester"
|
|
551
|
+
capabilities = ["requesting"]
|
|
552
|
+
async def execute_task(self, task): return {}
|
|
553
|
+
|
|
554
|
+
requester = mesh.add(Requester)
|
|
555
|
+
|
|
556
|
+
# Inject peers
|
|
557
|
+
for agent in mesh.agents:
|
|
558
|
+
agent.peers = PeerClient(
|
|
559
|
+
coordinator=None,
|
|
560
|
+
agent_id=agent.agent_id,
|
|
561
|
+
agent_role=agent.role,
|
|
562
|
+
agent_registry=mesh._agent_registry,
|
|
563
|
+
node_id="local"
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
# Start agents
|
|
567
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
568
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
569
|
+
await asyncio.sleep(0.1)
|
|
570
|
+
|
|
571
|
+
try:
|
|
572
|
+
# Show analyst's tools (includes peer tools)
|
|
573
|
+
tools = analyst.get_tools()
|
|
574
|
+
print(f" Analyst tools: {[t['name'] for t in tools]}")
|
|
575
|
+
assert "ask_peer" in [t['name'] for t in tools]
|
|
576
|
+
print(" ✓ Analyst has peer tools")
|
|
577
|
+
|
|
578
|
+
# Analyst SENDS to researcher
|
|
579
|
+
result = await analyst.execute_tool("ask_peer", {
|
|
580
|
+
"role": "researcher",
|
|
581
|
+
"question": "Get market data"
|
|
582
|
+
})
|
|
583
|
+
print(f" ✓ Analyst asked researcher: {result}")
|
|
584
|
+
|
|
585
|
+
# Analyst RECEIVES from requester
|
|
586
|
+
response = await requester.peers.request("analyst", {
|
|
587
|
+
"query": "Analyze sales"
|
|
588
|
+
}, timeout=5.0)
|
|
589
|
+
print(f" ✓ Analyst received and responded: {response['response'][:40]}...")
|
|
590
|
+
|
|
591
|
+
# Analyst broadcasts
|
|
592
|
+
result = await analyst.execute_tool("broadcast_update", {
|
|
593
|
+
"message": "Analysis complete!"
|
|
594
|
+
})
|
|
595
|
+
print(f" ✓ Analyst broadcast: {result}")
|
|
596
|
+
|
|
597
|
+
print("\n PROVED: Analyst can SEND, RECEIVE, and BROADCAST")
|
|
598
|
+
|
|
599
|
+
finally:
|
|
600
|
+
analyst.request_shutdown()
|
|
601
|
+
researcher.request_shutdown()
|
|
602
|
+
analyst_task.cancel()
|
|
603
|
+
researcher_task.cancel()
|
|
604
|
+
for t in [analyst_task, researcher_task]:
|
|
605
|
+
try:
|
|
606
|
+
await t
|
|
607
|
+
except asyncio.CancelledError:
|
|
608
|
+
pass
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
612
|
+
# LLM TOOL-USE DEMO - Shows how Analyst's LLM decides to use tools
|
|
613
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
614
|
+
|
|
615
|
+
class AnalystMockLLM:
|
|
616
|
+
"""
|
|
617
|
+
Simulates Analyst's LLM decision-making.
|
|
618
|
+
|
|
619
|
+
The Analyst's LLM might decide to:
|
|
620
|
+
- Use LOCAL tool (analyze) for analysis requests
|
|
621
|
+
- Use PEER tool (ask_peer → researcher) when it needs data
|
|
622
|
+
- Respond directly for simple queries
|
|
623
|
+
"""
|
|
624
|
+
|
|
625
|
+
def __init__(self):
|
|
626
|
+
self.calls = []
|
|
627
|
+
|
|
628
|
+
def chat(self, messages: list, tools: list) -> dict:
|
|
629
|
+
"""Simulate LLM deciding what tool to use."""
|
|
630
|
+
self.calls.append({"messages": messages, "tools": tools})
|
|
631
|
+
|
|
632
|
+
user_msg = ""
|
|
633
|
+
for msg in reversed(messages):
|
|
634
|
+
if msg.get("role") == "user":
|
|
635
|
+
user_msg = msg.get("content", "").lower()
|
|
636
|
+
break
|
|
637
|
+
|
|
638
|
+
tool_names = [t["name"] for t in tools]
|
|
639
|
+
|
|
640
|
+
# Analyst's decision logic
|
|
641
|
+
if "analyze" in user_msg and "analyze" in tool_names:
|
|
642
|
+
# Use LOCAL analyze tool
|
|
643
|
+
return {
|
|
644
|
+
"type": "tool_use",
|
|
645
|
+
"tool": "analyze",
|
|
646
|
+
"args": {"data": user_msg}
|
|
647
|
+
}
|
|
648
|
+
elif "research" in user_msg or "data" in user_msg or "find" in user_msg:
|
|
649
|
+
if "ask_peer" in tool_names:
|
|
650
|
+
# Need data - ask researcher
|
|
651
|
+
return {
|
|
652
|
+
"type": "tool_use",
|
|
653
|
+
"tool": "ask_peer",
|
|
654
|
+
"args": {"role": "researcher", "question": user_msg}
|
|
655
|
+
}
|
|
656
|
+
elif "report" in user_msg and "generate_report" in tool_names:
|
|
657
|
+
# Generate report
|
|
658
|
+
return {
|
|
659
|
+
"type": "tool_use",
|
|
660
|
+
"tool": "generate_report",
|
|
661
|
+
"args": {"analysis": {"response": "Previous analysis", "confidence": 0.85}}
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
# Default: respond directly
|
|
665
|
+
return {
|
|
666
|
+
"type": "text",
|
|
667
|
+
"content": f"[analyst] I understand: {user_msg}"
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
def incorporate_tool_result(self, tool_name: str, result: str) -> str:
|
|
671
|
+
return f"Based on {tool_name}: {result}"
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
class LLMPoweredAnalyst(ConnectedAnalyst):
|
|
675
|
+
"""Analyst with full LLM simulation for demo."""
|
|
676
|
+
|
|
677
|
+
def __init__(self, agent_id=None):
|
|
678
|
+
super().__init__(agent_id)
|
|
679
|
+
self.llm = AnalystMockLLM()
|
|
680
|
+
self.conversation_history = []
|
|
681
|
+
self.tool_calls_made = []
|
|
682
|
+
|
|
683
|
+
async def chat(self, user_message: str) -> str:
|
|
684
|
+
"""Complete LLM chat loop."""
|
|
685
|
+
self.conversation_history.append({"role": "user", "content": user_message})
|
|
686
|
+
|
|
687
|
+
tools = self.get_tools()
|
|
688
|
+
llm_response = self.llm.chat(self.conversation_history, tools)
|
|
689
|
+
|
|
690
|
+
if llm_response["type"] == "tool_use":
|
|
691
|
+
tool_name = llm_response["tool"]
|
|
692
|
+
tool_args = llm_response["args"]
|
|
693
|
+
self.tool_calls_made.append({"tool": tool_name, "args": tool_args})
|
|
694
|
+
|
|
695
|
+
tool_result = await self.execute_tool(tool_name, tool_args)
|
|
696
|
+
|
|
697
|
+
self.conversation_history.append({"role": "assistant", "content": f"[Tool: {tool_name}]"})
|
|
698
|
+
self.conversation_history.append({"role": "tool", "content": tool_result})
|
|
699
|
+
|
|
700
|
+
final = self.llm.incorporate_tool_result(tool_name, tool_result)
|
|
701
|
+
else:
|
|
702
|
+
final = llm_response["content"]
|
|
703
|
+
|
|
704
|
+
self.conversation_history.append({"role": "assistant", "content": final})
|
|
705
|
+
return final
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
async def demo_analyst_llm_flow():
|
|
709
|
+
"""Demo showing how Analyst's LLM decides to use tools."""
|
|
710
|
+
print("\n" + "="*70)
|
|
711
|
+
print("ANALYST LLM TOOL-USE FLOW")
|
|
712
|
+
print("="*70)
|
|
713
|
+
|
|
714
|
+
print("""
|
|
715
|
+
This shows how the ANALYST's LLM decides which tools to use:
|
|
716
|
+
|
|
717
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
718
|
+
│ "analyze X" → LLM uses LOCAL tool (analyze) │
|
|
719
|
+
│ "find data on X" → LLM uses PEER tool (ask_peer→researcher) │
|
|
720
|
+
│ "generate report" → LLM uses LOCAL tool (generate_report) │
|
|
721
|
+
│ "hello" → LLM responds DIRECTLY │
|
|
722
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
723
|
+
""")
|
|
724
|
+
|
|
725
|
+
mesh = Mesh(mode="p2p")
|
|
726
|
+
|
|
727
|
+
analyst = LLMPoweredAnalyst()
|
|
728
|
+
mesh._agent_registry["analyst"] = [analyst]
|
|
729
|
+
|
|
730
|
+
# Add researcher for analyst to delegate to
|
|
731
|
+
class Researcher(Agent):
|
|
732
|
+
role = "researcher"
|
|
733
|
+
capabilities = ["research"]
|
|
734
|
+
async def execute_task(self, task): return {}
|
|
735
|
+
async def run(self):
|
|
736
|
+
while not self.shutdown_requested:
|
|
737
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
738
|
+
if msg and msg.is_request:
|
|
739
|
+
await self.peers.respond(msg, {
|
|
740
|
+
"response": f"[researcher] Found 5 papers on: {msg.data.get('query')}"
|
|
741
|
+
})
|
|
742
|
+
|
|
743
|
+
researcher = Researcher()
|
|
744
|
+
mesh._agent_registry["researcher"] = [researcher]
|
|
745
|
+
mesh.agents = [analyst, researcher]
|
|
746
|
+
|
|
747
|
+
for agent in mesh.agents:
|
|
748
|
+
agent.peers = PeerClient(
|
|
749
|
+
coordinator=None,
|
|
750
|
+
agent_id=agent.agent_id,
|
|
751
|
+
agent_role=agent.role,
|
|
752
|
+
agent_registry=mesh._agent_registry,
|
|
753
|
+
node_id="local"
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
757
|
+
await asyncio.sleep(0.1)
|
|
758
|
+
|
|
759
|
+
# ─────────────────────────────────────────────────────────────────
|
|
760
|
+
# SCENARIO 1: Analyst uses LOCAL analyze tool
|
|
761
|
+
# ─────────────────────────────────────────────────────────────────
|
|
762
|
+
print("\n" + "─"*70)
|
|
763
|
+
print("SCENARIO 1: Request to ANALYZE (Analyst uses LOCAL tool)")
|
|
764
|
+
print("─"*70)
|
|
765
|
+
print(f"\n[USER] → Analyst: \"Please analyze Q4 sales performance\"")
|
|
766
|
+
print(f"\n[ANALYST LLM FLOW]")
|
|
767
|
+
|
|
768
|
+
tools = analyst.get_tools()
|
|
769
|
+
print(f" │")
|
|
770
|
+
print(f" ├─→ [LLM RECEIVES] Message: \"Please analyze Q4 sales performance\"")
|
|
771
|
+
print(f" │ Tools available: {[t['name'] for t in tools]}")
|
|
772
|
+
|
|
773
|
+
response = await analyst.chat("Please analyze Q4 sales performance")
|
|
774
|
+
|
|
775
|
+
print(f" │")
|
|
776
|
+
print(f" ├─→ [LLM DECIDES] Use LOCAL tool: analyze")
|
|
777
|
+
print(f" │ Args: {analyst.tool_calls_made[-1]['args']}")
|
|
778
|
+
print(f" │")
|
|
779
|
+
print(f" ├─→ [EXECUTE LOCAL TOOL] analyze")
|
|
780
|
+
print(f" │")
|
|
781
|
+
print(f" └─→ [LLM RESPONDS] \"{response[:60]}...\"")
|
|
782
|
+
|
|
783
|
+
print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
|
|
784
|
+
print(f"\n✓ Analyst's LLM used LOCAL tool (analyze)")
|
|
785
|
+
|
|
786
|
+
analyst.tool_calls_made = []
|
|
787
|
+
analyst.conversation_history = []
|
|
788
|
+
|
|
789
|
+
# ─────────────────────────────────────────────────────────────────
|
|
790
|
+
# SCENARIO 2: Analyst delegates to researcher
|
|
791
|
+
# ─────────────────────────────────────────────────────────────────
|
|
792
|
+
print("\n" + "─"*70)
|
|
793
|
+
print("SCENARIO 2: Request needs DATA (Analyst delegates to RESEARCHER)")
|
|
794
|
+
print("─"*70)
|
|
795
|
+
print(f"\n[USER] → Analyst: \"Find research data on market trends\"")
|
|
796
|
+
print(f"\n[ANALYST LLM FLOW]")
|
|
797
|
+
|
|
798
|
+
print(f" │")
|
|
799
|
+
print(f" ├─→ [LLM RECEIVES] Message: \"Find research data on market trends\"")
|
|
800
|
+
print(f" │ Tools available: {[t['name'] for t in tools]}")
|
|
801
|
+
|
|
802
|
+
response = await analyst.chat("Find research data on market trends")
|
|
803
|
+
|
|
804
|
+
print(f" │")
|
|
805
|
+
print(f" ├─→ [LLM DECIDES] Use PEER tool: ask_peer → researcher")
|
|
806
|
+
print(f" │ Args: {analyst.tool_calls_made[-1]['args']}")
|
|
807
|
+
print(f" │")
|
|
808
|
+
print(f" ├─→ [EXECUTE PEER TOOL] ask_peer")
|
|
809
|
+
print(f" │ Sending to: researcher")
|
|
810
|
+
print(f" │ Researcher responds: \"Found 5 papers...\"")
|
|
811
|
+
print(f" │")
|
|
812
|
+
print(f" └─→ [LLM RESPONDS] \"{response[:60]}...\"")
|
|
813
|
+
|
|
814
|
+
print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
|
|
815
|
+
print(f"\n✓ Analyst's LLM delegated to PEER (researcher)")
|
|
816
|
+
|
|
817
|
+
analyst.tool_calls_made = []
|
|
818
|
+
analyst.conversation_history = []
|
|
819
|
+
|
|
820
|
+
# ─────────────────────────────────────────────────────────────────
|
|
821
|
+
# SCENARIO 3: Analyst responds directly
|
|
822
|
+
# ─────────────────────────────────────────────────────────────────
|
|
823
|
+
print("\n" + "─"*70)
|
|
824
|
+
print("SCENARIO 3: Simple greeting (Analyst responds DIRECTLY)")
|
|
825
|
+
print("─"*70)
|
|
826
|
+
print(f"\n[USER] → Analyst: \"Hello, what can you do?\"")
|
|
827
|
+
print(f"\n[ANALYST LLM FLOW]")
|
|
828
|
+
|
|
829
|
+
print(f" │")
|
|
830
|
+
print(f" ├─→ [LLM RECEIVES] Message: \"Hello, what can you do?\"")
|
|
831
|
+
print(f" │ Tools available: {[t['name'] for t in tools]}")
|
|
832
|
+
|
|
833
|
+
response = await analyst.chat("Hello, what can you do?")
|
|
834
|
+
|
|
835
|
+
print(f" │")
|
|
836
|
+
print(f" └─→ [LLM DECIDES] Respond directly (no tool needed)")
|
|
837
|
+
|
|
838
|
+
print(f"\n[FINAL RESPONSE] → User: \"{response}\"")
|
|
839
|
+
print(f"\n✓ Analyst's LLM responded DIRECTLY (no tools)")
|
|
840
|
+
|
|
841
|
+
# Cleanup
|
|
842
|
+
researcher.request_shutdown()
|
|
843
|
+
researcher_task.cancel()
|
|
844
|
+
try: await researcher_task
|
|
845
|
+
except asyncio.CancelledError: pass
|
|
846
|
+
|
|
847
|
+
print("\n" + "="*70)
|
|
848
|
+
print("ANALYST LLM DEMO COMPLETE!")
|
|
849
|
+
print("="*70)
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
if __name__ == "__main__":
|
|
853
|
+
print("\n" + "="*60)
|
|
854
|
+
print("TEST 3: ANALYST AS FULL LLM-POWERED MESH PARTICIPANT")
|
|
855
|
+
print("="*60)
|
|
856
|
+
|
|
857
|
+
print("\n[Framework Integration]")
|
|
858
|
+
t = TestFrameworkIntegration()
|
|
859
|
+
t.test_inherits_from_agent()
|
|
860
|
+
print("✓ Inherits from Agent")
|
|
861
|
+
t.test_has_required_attributes()
|
|
862
|
+
print("✓ Has role and capabilities")
|
|
863
|
+
|
|
864
|
+
mesh1 = Mesh(mode="p2p")
|
|
865
|
+
t.test_can_be_added_to_mesh(mesh1)
|
|
866
|
+
print("✓ Can be added to mesh")
|
|
867
|
+
|
|
868
|
+
print("\n[Local Tools]")
|
|
869
|
+
mesh2 = Mesh(mode="p2p")
|
|
870
|
+
analyst = mesh2.add(ConnectedAnalyst)
|
|
871
|
+
t2 = TestLocalTools()
|
|
872
|
+
t2.test_analyze_works(analyst)
|
|
873
|
+
print("✓ analyze() works")
|
|
874
|
+
t2.test_generate_report_works(analyst)
|
|
875
|
+
print("✓ generate_report() works")
|
|
876
|
+
t2.test_get_tools_returns_local_tools(analyst)
|
|
877
|
+
print("✓ get_tools() returns local tools")
|
|
878
|
+
|
|
879
|
+
print("\n[Peer Tools Integration]")
|
|
880
|
+
mesh3 = Mesh(mode="p2p")
|
|
881
|
+
analyst3 = mesh3.add(ConnectedAnalyst)
|
|
882
|
+
analyst3.peers = PeerClient(
|
|
883
|
+
coordinator=None,
|
|
884
|
+
agent_id=analyst3.agent_id,
|
|
885
|
+
agent_role=analyst3.role,
|
|
886
|
+
agent_registry=mesh3._agent_registry,
|
|
887
|
+
node_id="local"
|
|
888
|
+
)
|
|
889
|
+
|
|
890
|
+
# Add another agent
|
|
891
|
+
other = mesh3.add(ConnectedAnalyst, agent_id="other")
|
|
892
|
+
other.peers = PeerClient(
|
|
893
|
+
coordinator=None,
|
|
894
|
+
agent_id=other.agent_id,
|
|
895
|
+
agent_role=other.role,
|
|
896
|
+
agent_registry=mesh3._agent_registry,
|
|
897
|
+
node_id="local"
|
|
898
|
+
)
|
|
899
|
+
|
|
900
|
+
tools = analyst3.get_tools()
|
|
901
|
+
tool_names = [t["name"] for t in tools]
|
|
902
|
+
assert "ask_peer" in tool_names
|
|
903
|
+
assert "broadcast_update" in tool_names
|
|
904
|
+
assert "list_peers" in tool_names
|
|
905
|
+
print(f"✓ get_tools() includes peer tools: {tool_names}")
|
|
906
|
+
|
|
907
|
+
print("\n[Bidirectional Communication]")
|
|
908
|
+
asyncio.run(_run_integration_test())
|
|
909
|
+
|
|
910
|
+
print("\n" + "="*60)
|
|
911
|
+
print("ALL TESTS PASSED!")
|
|
912
|
+
print("="*60)
|
|
913
|
+
print("""
|
|
914
|
+
KEY INSIGHT: Every agent is a FULL MESH PARTICIPANT
|
|
915
|
+
|
|
916
|
+
The analyst can:
|
|
917
|
+
├── Use LOCAL tools (analyze, generate_report)
|
|
918
|
+
├── SEND to peers (ask_peer → researcher)
|
|
919
|
+
├── RECEIVE from peers (requester → analyst)
|
|
920
|
+
└── BROADCAST to all (broadcast_update)
|
|
921
|
+
|
|
922
|
+
The role ("analyst") defines what it's GOOD at,
|
|
923
|
+
NOT whether it sends or receives.
|
|
924
|
+
""")
|
|
925
|
+
|
|
926
|
+
# Run LLM tool-use demo
|
|
927
|
+
asyncio.run(demo_analyst_llm_flow())
|
|
928
|
+
|
|
929
|
+
print("""
|
|
930
|
+
═══════════════════════════════════════════════════════════════════════════════
|
|
931
|
+
KEY INSIGHT: ANALYST LLM TOOL-USE DECISIONS
|
|
932
|
+
═══════════════════════════════════════════════════════════════════════════════
|
|
933
|
+
|
|
934
|
+
The Analyst's LLM sees tools and DECIDES:
|
|
935
|
+
|
|
936
|
+
┌─────────────────────────────────────────────────────────────────────────┐
|
|
937
|
+
│ "analyze X" → Use LOCAL tool (analyze) │
|
|
938
|
+
│ "find data on X" → Use PEER tool (ask_peer → researcher) │
|
|
939
|
+
│ "generate report" → Use LOCAL tool (generate_report) │
|
|
940
|
+
│ "hello" → Respond DIRECTLY (no tool needed) │
|
|
941
|
+
└─────────────────────────────────────────────────────────────────────────┘
|
|
942
|
+
|
|
943
|
+
The Analyst is GOOD at analysis, but can delegate data gathering!
|
|
944
|
+
═══════════════════════════════════════════════════════════════════════════════
|
|
945
|
+
""")
|