jarviscore-framework 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +162 -0
- examples/customagent_p2p_example.py +566 -183
- examples/fastapi_integration_example.py +570 -0
- examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/__init__.py +22 -5
- jarviscore/cli/smoketest.py +8 -4
- jarviscore/core/agent.py +227 -0
- jarviscore/data/examples/cloud_deployment_example.py +162 -0
- jarviscore/data/examples/customagent_p2p_example.py +566 -183
- jarviscore/data/examples/fastapi_integration_example.py +570 -0
- jarviscore/data/examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/docs/API_REFERENCE.md +296 -3
- jarviscore/docs/CHANGELOG.md +97 -0
- jarviscore/docs/CONFIGURATION.md +2 -2
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +2021 -255
- jarviscore/docs/GETTING_STARTED.md +112 -8
- jarviscore/docs/TROUBLESHOOTING.md +3 -3
- jarviscore/docs/USER_GUIDE.md +152 -6
- jarviscore/integrations/__init__.py +16 -0
- jarviscore/integrations/fastapi.py +247 -0
- jarviscore/p2p/broadcaster.py +10 -3
- jarviscore/p2p/coordinator.py +310 -14
- jarviscore/p2p/keepalive.py +45 -23
- jarviscore/p2p/peer_client.py +282 -10
- jarviscore/p2p/swim_manager.py +9 -4
- jarviscore/profiles/__init__.py +10 -2
- jarviscore/profiles/listeneragent.py +292 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/METADATA +42 -8
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/RECORD +36 -22
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/WHEEL +1 -1
- tests/test_13_dx_improvements.py +554 -0
- tests/test_14_cloud_deployment.py +403 -0
- tests/test_15_llm_cognitive_discovery.py +684 -0
- tests/test_16_unified_dx_flow.py +947 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.0.dist-info → jarviscore_framework-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,947 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test 16: Unified DX Flow - Autonomous Agents with Mesh as Tool
|
|
3
|
+
|
|
4
|
+
Tests the COMPLETE real-world flow combining all DX improvements:
|
|
5
|
+
1. FastAPI Integration (JarvisLifespan)
|
|
6
|
+
2. ListenerAgent Profile
|
|
7
|
+
3. Cognitive Discovery (get_cognitive_context)
|
|
8
|
+
4. LLM Autonomous Delegation - Each agent has mesh as a TOOL
|
|
9
|
+
5. Peer-to-Peer Communication - No coordinator, any agent can talk to any agent
|
|
10
|
+
|
|
11
|
+
Key Pattern Tested:
|
|
12
|
+
Each agent has the MESH as a TOOL
|
|
13
|
+
Agent A (LLM) → discovers peers via get_cognitive_context()
|
|
14
|
+
Agent A → delegates to Agent B via ask_peer tool
|
|
15
|
+
Agent B responds → Agent A synthesizes response
|
|
16
|
+
|
|
17
|
+
Run with: pytest tests/test_16_unified_dx_flow.py -v -s
|
|
18
|
+
"""
|
|
19
|
+
import asyncio
|
|
20
|
+
import os
|
|
21
|
+
import sys
|
|
22
|
+
import pytest
|
|
23
|
+
import logging
|
|
24
|
+
from unittest.mock import MagicMock
|
|
25
|
+
|
|
26
|
+
sys.path.insert(0, '.')
|
|
27
|
+
|
|
28
|
+
logging.basicConfig(level=logging.INFO)
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
33
|
+
# LLM CLIENT HELPER
|
|
34
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
35
|
+
|
|
36
|
+
def get_llm_client():
|
|
37
|
+
"""Get configured LLM client from settings."""
|
|
38
|
+
try:
|
|
39
|
+
from jarviscore.config import settings
|
|
40
|
+
from anthropic import Anthropic
|
|
41
|
+
|
|
42
|
+
api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
|
|
43
|
+
if not api_key:
|
|
44
|
+
return None, None, "No API key"
|
|
45
|
+
|
|
46
|
+
endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
|
|
47
|
+
model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
|
|
48
|
+
|
|
49
|
+
if endpoint:
|
|
50
|
+
client = Anthropic(api_key=api_key, base_url=endpoint)
|
|
51
|
+
else:
|
|
52
|
+
client = Anthropic(api_key=api_key)
|
|
53
|
+
|
|
54
|
+
return client, model, None
|
|
55
|
+
except Exception as e:
|
|
56
|
+
return None, None, str(e)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def has_valid_llm():
|
|
60
|
+
"""Check if LLM is available."""
|
|
61
|
+
try:
|
|
62
|
+
client, model, error = get_llm_client()
|
|
63
|
+
if error:
|
|
64
|
+
return False
|
|
65
|
+
client.messages.create(model=model, max_tokens=10, messages=[{"role": "user", "content": "Hi"}])
|
|
66
|
+
return True
|
|
67
|
+
except:
|
|
68
|
+
return False
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
_llm_available = None
|
|
72
|
+
def llm_is_available():
|
|
73
|
+
global _llm_available
|
|
74
|
+
if _llm_available is None:
|
|
75
|
+
_llm_available = has_valid_llm()
|
|
76
|
+
return _llm_available
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
requires_llm = pytest.mark.skipif(not llm_is_available(), reason="No valid LLM API key")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
83
|
+
# AUTONOMOUS AGENT BASE - Each agent has mesh as a tool
|
|
84
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
85
|
+
|
|
86
|
+
def create_llm_agent_class():
|
|
87
|
+
"""Create the LLMAgent base class."""
|
|
88
|
+
from jarviscore.profiles import ListenerAgent
|
|
89
|
+
|
|
90
|
+
class LLMAgent(ListenerAgent):
|
|
91
|
+
"""
|
|
92
|
+
Base for LLM-powered agents that can discover and delegate to peers.
|
|
93
|
+
|
|
94
|
+
KEY PATTERN: The mesh is a TOOL for the LLM.
|
|
95
|
+
- get_cognitive_context() tells LLM who's available
|
|
96
|
+
- ask_peer tool lets LLM delegate to specialists
|
|
97
|
+
- Each agent is autonomous - no central coordinator needed
|
|
98
|
+
"""
|
|
99
|
+
listen_timeout = 0.1
|
|
100
|
+
system_prompt = "You are a helpful agent."
|
|
101
|
+
|
|
102
|
+
async def setup(self):
|
|
103
|
+
await super().setup()
|
|
104
|
+
self.llm_client, self.llm_model, _ = get_llm_client()
|
|
105
|
+
|
|
106
|
+
def _get_tools(self):
|
|
107
|
+
"""Get tools for LLM - includes ask_peer for mesh communication."""
|
|
108
|
+
return [{
|
|
109
|
+
"name": "ask_peer",
|
|
110
|
+
"description": "Ask another agent in the mesh for help. Use this to delegate tasks to specialists.",
|
|
111
|
+
"input_schema": {
|
|
112
|
+
"type": "object",
|
|
113
|
+
"properties": {
|
|
114
|
+
"role": {"type": "string", "description": "Role of the agent to ask (e.g., 'analyst', 'researcher')"},
|
|
115
|
+
"question": {"type": "string", "description": "The question or task for that agent"}
|
|
116
|
+
},
|
|
117
|
+
"required": ["role", "question"]
|
|
118
|
+
}
|
|
119
|
+
}]
|
|
120
|
+
|
|
121
|
+
async def _ask_peer(self, role: str, question: str) -> dict:
|
|
122
|
+
"""Execute ask_peer tool - send request to another agent."""
|
|
123
|
+
response = await self.peers.request(role, {"question": question}, timeout=30)
|
|
124
|
+
return response
|
|
125
|
+
|
|
126
|
+
async def chat(self, message: str) -> dict:
|
|
127
|
+
"""
|
|
128
|
+
Process a message with LLM that can discover and delegate to peers.
|
|
129
|
+
|
|
130
|
+
This is the CORE PATTERN:
|
|
131
|
+
1. Build system prompt with WHO I AM + WHO ELSE IS AVAILABLE
|
|
132
|
+
2. LLM sees available peers as potential helpers
|
|
133
|
+
3. LLM decides whether to handle directly or delegate
|
|
134
|
+
"""
|
|
135
|
+
if not self.llm_client:
|
|
136
|
+
return await self._chat_mock(message)
|
|
137
|
+
|
|
138
|
+
# DYNAMIC DISCOVERY: Tell LLM who it is and who else is available
|
|
139
|
+
peer_context = self.peers.get_cognitive_context() if self.peers else ""
|
|
140
|
+
|
|
141
|
+
system = f"""{self.system_prompt}
|
|
142
|
+
|
|
143
|
+
{peer_context}"""
|
|
144
|
+
|
|
145
|
+
response = self.llm_client.messages.create(
|
|
146
|
+
model=self.llm_model,
|
|
147
|
+
max_tokens=1024,
|
|
148
|
+
system=system,
|
|
149
|
+
messages=[{"role": "user", "content": message}],
|
|
150
|
+
tools=self._get_tools()
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Handle tool use - check for tool_use FIRST (prioritize over text)
|
|
154
|
+
tool_use_block = None
|
|
155
|
+
text_content = None
|
|
156
|
+
|
|
157
|
+
for block in response.content:
|
|
158
|
+
if block.type == "tool_use" and block.name == "ask_peer":
|
|
159
|
+
tool_use_block = block
|
|
160
|
+
elif hasattr(block, 'text'):
|
|
161
|
+
text_content = block.text
|
|
162
|
+
|
|
163
|
+
if tool_use_block:
|
|
164
|
+
role = tool_use_block.input.get("role")
|
|
165
|
+
question = tool_use_block.input.get("question")
|
|
166
|
+
|
|
167
|
+
peer_response = await self._ask_peer(role, question)
|
|
168
|
+
|
|
169
|
+
# Continue with tool result
|
|
170
|
+
messages = [{"role": "user", "content": message}]
|
|
171
|
+
messages.append({"role": "assistant", "content": response.content})
|
|
172
|
+
messages.append({
|
|
173
|
+
"role": "user",
|
|
174
|
+
"content": [{"type": "tool_result", "tool_use_id": tool_use_block.id, "content": str(peer_response)}]
|
|
175
|
+
})
|
|
176
|
+
|
|
177
|
+
final = self.llm_client.messages.create(
|
|
178
|
+
model=self.llm_model,
|
|
179
|
+
max_tokens=1024,
|
|
180
|
+
system=system,
|
|
181
|
+
messages=messages
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
for block in final.content:
|
|
185
|
+
if hasattr(block, 'text'):
|
|
186
|
+
return {"response": block.text, "delegated_to": role, "peer_response": peer_response}
|
|
187
|
+
|
|
188
|
+
return {"response": text_content or "Processed.", "delegated_to": None}
|
|
189
|
+
|
|
190
|
+
async def _chat_mock(self, message: str) -> dict:
|
|
191
|
+
"""Mock when LLM unavailable - for testing basic flow."""
|
|
192
|
+
# Determine if we should delegate based on keywords
|
|
193
|
+
msg_lower = message.lower()
|
|
194
|
+
if any(w in msg_lower for w in ["analyze", "analysis", "data", "statistics", "insights"]):
|
|
195
|
+
response = await self._ask_peer("analyst", message)
|
|
196
|
+
return {"response": "Analysis complete", "delegated_to": "analyst", "peer_response": response}
|
|
197
|
+
if any(w in msg_lower for w in ["research", "search", "find", "investigate"]):
|
|
198
|
+
response = await self._ask_peer("researcher", message)
|
|
199
|
+
return {"response": "Research complete", "delegated_to": "researcher", "peer_response": response}
|
|
200
|
+
return {"response": f"[{self.role}] Processed: {message}", "delegated_to": None}
|
|
201
|
+
|
|
202
|
+
async def on_peer_request(self, msg):
|
|
203
|
+
"""Handle requests from other agents."""
|
|
204
|
+
return await self.chat(msg.data.get("question", ""))
|
|
205
|
+
|
|
206
|
+
return LLMAgent
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def create_autonomous_agents():
|
|
210
|
+
"""Create autonomous agents where each has the mesh as a tool."""
|
|
211
|
+
LLMAgent = create_llm_agent_class()
|
|
212
|
+
|
|
213
|
+
class AssistantAgent(LLMAgent):
|
|
214
|
+
"""General assistant that can delegate to specialists."""
|
|
215
|
+
role = "assistant"
|
|
216
|
+
capabilities = ["chat", "general_help", "delegation"]
|
|
217
|
+
description = "General assistant that delegates specialized tasks to experts"
|
|
218
|
+
|
|
219
|
+
system_prompt = """You are a helpful assistant. You can answer general questions directly.
|
|
220
|
+
|
|
221
|
+
For specialized tasks, you have access to other agents via the ask_peer tool:
|
|
222
|
+
- For data analysis, statistics, or insights → ask the "analyst" agent
|
|
223
|
+
- For research or information gathering → ask the "researcher" agent
|
|
224
|
+
|
|
225
|
+
Use ask_peer when the task requires specialized expertise. Be helpful and concise."""
|
|
226
|
+
|
|
227
|
+
class AnalystAgent(LLMAgent):
|
|
228
|
+
"""Data analysis specialist with LLM."""
|
|
229
|
+
role = "analyst"
|
|
230
|
+
capabilities = ["data_analysis", "statistics", "insights", "reporting"]
|
|
231
|
+
description = "Expert data analyst for statistics and insights"
|
|
232
|
+
|
|
233
|
+
system_prompt = """You are an expert data analyst. You specialize in:
|
|
234
|
+
- Statistical analysis
|
|
235
|
+
- Data insights and trends
|
|
236
|
+
- Business metrics and KPIs
|
|
237
|
+
- Data visualization recommendations
|
|
238
|
+
|
|
239
|
+
Provide clear, actionable insights. If you need research data, you can ask the "researcher" agent."""
|
|
240
|
+
|
|
241
|
+
async def on_peer_request(self, msg):
|
|
242
|
+
"""Handle analysis requests - return structured analysis."""
|
|
243
|
+
question = msg.data.get("question", "")
|
|
244
|
+
|
|
245
|
+
if self.llm_client:
|
|
246
|
+
response = self.llm_client.messages.create(
|
|
247
|
+
model=self.llm_model,
|
|
248
|
+
max_tokens=512,
|
|
249
|
+
system=self.system_prompt,
|
|
250
|
+
messages=[{"role": "user", "content": f"Analyze this request: {question}"}]
|
|
251
|
+
)
|
|
252
|
+
for block in response.content:
|
|
253
|
+
if hasattr(block, 'text'):
|
|
254
|
+
return {"analysis": block.text, "confidence": 0.9}
|
|
255
|
+
|
|
256
|
+
# Mock response for testing
|
|
257
|
+
return {
|
|
258
|
+
"analysis": f"Analysis of: {question}",
|
|
259
|
+
"findings": ["Revenue up 15%", "Costs down 8%", "Growth trend positive"],
|
|
260
|
+
"confidence": 0.85
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
class ResearcherAgent(LLMAgent):
|
|
264
|
+
"""Research specialist with LLM."""
|
|
265
|
+
role = "researcher"
|
|
266
|
+
capabilities = ["research", "web_search", "fact_checking", "information_gathering"]
|
|
267
|
+
description = "Research specialist for gathering and verifying information"
|
|
268
|
+
|
|
269
|
+
system_prompt = """You are an expert researcher. You specialize in:
|
|
270
|
+
- Information gathering
|
|
271
|
+
- Fact checking
|
|
272
|
+
- Market research
|
|
273
|
+
- Competitive analysis
|
|
274
|
+
|
|
275
|
+
Provide well-sourced, accurate information. If you need data analysis, you can ask the "analyst" agent."""
|
|
276
|
+
|
|
277
|
+
async def on_peer_request(self, msg):
|
|
278
|
+
"""Handle research requests - return structured research."""
|
|
279
|
+
question = msg.data.get("question", "")
|
|
280
|
+
|
|
281
|
+
if self.llm_client:
|
|
282
|
+
response = self.llm_client.messages.create(
|
|
283
|
+
model=self.llm_model,
|
|
284
|
+
max_tokens=512,
|
|
285
|
+
system=self.system_prompt,
|
|
286
|
+
messages=[{"role": "user", "content": f"Research this topic: {question}"}]
|
|
287
|
+
)
|
|
288
|
+
for block in response.content:
|
|
289
|
+
if hasattr(block, 'text'):
|
|
290
|
+
return {"research": block.text, "sources": ["Internal analysis"]}
|
|
291
|
+
|
|
292
|
+
# Mock response for testing
|
|
293
|
+
return {
|
|
294
|
+
"research": f"Research on: {question}",
|
|
295
|
+
"sources": ["Industry Report 2024", "Market Analysis"],
|
|
296
|
+
"summary": "Research findings compiled"
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
return AssistantAgent, AnalystAgent, ResearcherAgent
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
303
|
+
# TEST: AUTONOMOUS DISCOVERY - Each agent discovers peers
|
|
304
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
305
|
+
|
|
306
|
+
class TestAutonomousDiscovery:
|
|
307
|
+
"""Test that each agent autonomously discovers peers."""
|
|
308
|
+
|
|
309
|
+
@pytest.mark.asyncio
|
|
310
|
+
async def test_assistant_discovers_all_specialists(self):
|
|
311
|
+
"""Test assistant agent sees all specialist peers."""
|
|
312
|
+
from jarviscore import Mesh
|
|
313
|
+
|
|
314
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
315
|
+
|
|
316
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7990})
|
|
317
|
+
assistant = mesh.add(AssistantAgent())
|
|
318
|
+
analyst = mesh.add(AnalystAgent())
|
|
319
|
+
researcher = mesh.add(ResearcherAgent())
|
|
320
|
+
|
|
321
|
+
await mesh.start()
|
|
322
|
+
|
|
323
|
+
try:
|
|
324
|
+
# Assistant should see both specialists
|
|
325
|
+
peers = assistant.peers.list_peers()
|
|
326
|
+
roles = [p['role'] for p in peers]
|
|
327
|
+
|
|
328
|
+
assert "analyst" in roles, "Assistant should see analyst"
|
|
329
|
+
assert "researcher" in roles, "Assistant should see researcher"
|
|
330
|
+
|
|
331
|
+
# Cognitive context should include both
|
|
332
|
+
context = assistant.peers.get_cognitive_context()
|
|
333
|
+
assert "analyst" in context
|
|
334
|
+
assert "researcher" in context
|
|
335
|
+
assert "data_analysis" in context
|
|
336
|
+
assert "research" in context
|
|
337
|
+
|
|
338
|
+
finally:
|
|
339
|
+
await mesh.stop()
|
|
340
|
+
|
|
341
|
+
@pytest.mark.asyncio
|
|
342
|
+
async def test_analyst_can_see_other_agents(self):
|
|
343
|
+
"""Test analyst agent can see assistant and researcher."""
|
|
344
|
+
from jarviscore import Mesh
|
|
345
|
+
|
|
346
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
347
|
+
|
|
348
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7991})
|
|
349
|
+
assistant = mesh.add(AssistantAgent())
|
|
350
|
+
analyst = mesh.add(AnalystAgent())
|
|
351
|
+
researcher = mesh.add(ResearcherAgent())
|
|
352
|
+
|
|
353
|
+
await mesh.start()
|
|
354
|
+
|
|
355
|
+
try:
|
|
356
|
+
# Analyst should see both other agents
|
|
357
|
+
peers = analyst.peers.list_peers()
|
|
358
|
+
roles = [p['role'] for p in peers]
|
|
359
|
+
|
|
360
|
+
assert "assistant" in roles, "Analyst should see assistant"
|
|
361
|
+
assert "researcher" in roles, "Analyst should see researcher"
|
|
362
|
+
|
|
363
|
+
finally:
|
|
364
|
+
await mesh.stop()
|
|
365
|
+
|
|
366
|
+
@pytest.mark.asyncio
|
|
367
|
+
async def test_all_agents_have_bidirectional_visibility(self):
|
|
368
|
+
"""Test all agents can see all other agents (true peer-to-peer)."""
|
|
369
|
+
from jarviscore import Mesh
|
|
370
|
+
|
|
371
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
372
|
+
|
|
373
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7992})
|
|
374
|
+
assistant = mesh.add(AssistantAgent())
|
|
375
|
+
analyst = mesh.add(AnalystAgent())
|
|
376
|
+
researcher = mesh.add(ResearcherAgent())
|
|
377
|
+
|
|
378
|
+
await mesh.start()
|
|
379
|
+
|
|
380
|
+
try:
|
|
381
|
+
# Each agent should see the other two
|
|
382
|
+
for agent in [assistant, analyst, researcher]:
|
|
383
|
+
peers = agent.peers.list_peers()
|
|
384
|
+
other_roles = [p['role'] for p in peers]
|
|
385
|
+
|
|
386
|
+
# Should see exactly 2 other agents
|
|
387
|
+
assert len(other_roles) == 2, f"{agent.role} should see 2 peers, got {len(other_roles)}"
|
|
388
|
+
|
|
389
|
+
finally:
|
|
390
|
+
await mesh.stop()
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
394
|
+
# TEST: AUTONOMOUS DELEGATION - Any agent can delegate to any other
|
|
395
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
396
|
+
|
|
397
|
+
class TestAutonomousDelegation:
|
|
398
|
+
"""Test autonomous delegation between agents."""
|
|
399
|
+
|
|
400
|
+
@requires_llm
|
|
401
|
+
@pytest.mark.asyncio
|
|
402
|
+
async def test_assistant_delegates_to_analyst(self):
|
|
403
|
+
"""Test assistant autonomously delegates analysis to analyst."""
|
|
404
|
+
from jarviscore import Mesh
|
|
405
|
+
|
|
406
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
407
|
+
|
|
408
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7993})
|
|
409
|
+
assistant = mesh.add(AssistantAgent())
|
|
410
|
+
analyst = mesh.add(AnalystAgent())
|
|
411
|
+
researcher = mesh.add(ResearcherAgent())
|
|
412
|
+
|
|
413
|
+
await mesh.start()
|
|
414
|
+
|
|
415
|
+
# Start listeners
|
|
416
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
417
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
418
|
+
|
|
419
|
+
try:
|
|
420
|
+
await asyncio.sleep(0.3)
|
|
421
|
+
|
|
422
|
+
# Query that should be delegated to analyst - be explicit
|
|
423
|
+
result = await assistant.chat(
|
|
424
|
+
"Please analyze the Q4 2024 sales data and provide statistical insights on revenue trends"
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
assert result["delegated_to"] == "analyst", f"Expected delegation to analyst, got {result.get('delegated_to')}"
|
|
428
|
+
assert "peer_response" in result
|
|
429
|
+
assert "findings" in result["peer_response"] or "analysis" in result["peer_response"]
|
|
430
|
+
|
|
431
|
+
finally:
|
|
432
|
+
analyst.request_shutdown()
|
|
433
|
+
researcher.request_shutdown()
|
|
434
|
+
analyst_task.cancel()
|
|
435
|
+
researcher_task.cancel()
|
|
436
|
+
try:
|
|
437
|
+
await analyst_task
|
|
438
|
+
except asyncio.CancelledError:
|
|
439
|
+
pass
|
|
440
|
+
try:
|
|
441
|
+
await researcher_task
|
|
442
|
+
except asyncio.CancelledError:
|
|
443
|
+
pass
|
|
444
|
+
await mesh.stop()
|
|
445
|
+
|
|
446
|
+
@requires_llm
|
|
447
|
+
@pytest.mark.asyncio
|
|
448
|
+
async def test_assistant_delegates_to_researcher(self):
|
|
449
|
+
"""Test assistant autonomously delegates research to researcher."""
|
|
450
|
+
from jarviscore import Mesh
|
|
451
|
+
|
|
452
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
453
|
+
|
|
454
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7994})
|
|
455
|
+
assistant = mesh.add(AssistantAgent())
|
|
456
|
+
analyst = mesh.add(AnalystAgent())
|
|
457
|
+
researcher = mesh.add(ResearcherAgent())
|
|
458
|
+
|
|
459
|
+
await mesh.start()
|
|
460
|
+
|
|
461
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
462
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
463
|
+
|
|
464
|
+
try:
|
|
465
|
+
await asyncio.sleep(0.3)
|
|
466
|
+
|
|
467
|
+
# Query that should be delegated to researcher - be explicit
|
|
468
|
+
result = await assistant.chat(
|
|
469
|
+
"Research our main competitors and gather information about their market positioning"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
assert result["delegated_to"] == "researcher", f"Expected delegation to researcher, got {result.get('delegated_to')}"
|
|
473
|
+
assert "peer_response" in result
|
|
474
|
+
assert "sources" in result["peer_response"] or "research" in result["peer_response"]
|
|
475
|
+
|
|
476
|
+
finally:
|
|
477
|
+
analyst.request_shutdown()
|
|
478
|
+
researcher.request_shutdown()
|
|
479
|
+
analyst_task.cancel()
|
|
480
|
+
researcher_task.cancel()
|
|
481
|
+
try:
|
|
482
|
+
await analyst_task
|
|
483
|
+
except asyncio.CancelledError:
|
|
484
|
+
pass
|
|
485
|
+
try:
|
|
486
|
+
await researcher_task
|
|
487
|
+
except asyncio.CancelledError:
|
|
488
|
+
pass
|
|
489
|
+
await mesh.stop()
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
493
|
+
# TEST: FASTAPI INTEGRATION - JarvisLifespan with autonomous agents
|
|
494
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
495
|
+
|
|
496
|
+
class TestFastAPIIntegration:
|
|
497
|
+
"""Test FastAPI + JarvisLifespan with autonomous agents."""
|
|
498
|
+
|
|
499
|
+
@pytest.mark.asyncio
|
|
500
|
+
async def test_jarvis_lifespan_starts_all_agents(self):
|
|
501
|
+
"""Test JarvisLifespan correctly initializes all agents."""
|
|
502
|
+
from jarviscore.integrations import JarvisLifespan
|
|
503
|
+
|
|
504
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
505
|
+
|
|
506
|
+
agents = [AssistantAgent(), AnalystAgent(), ResearcherAgent()]
|
|
507
|
+
lifespan = JarvisLifespan(agents, mode="p2p", bind_port=7995)
|
|
508
|
+
|
|
509
|
+
mock_app = MagicMock()
|
|
510
|
+
mock_app.state = MagicMock()
|
|
511
|
+
|
|
512
|
+
async with lifespan(mock_app):
|
|
513
|
+
# All agents should be registered
|
|
514
|
+
assert "assistant" in mock_app.state.jarvis_agents
|
|
515
|
+
assert "analyst" in mock_app.state.jarvis_agents
|
|
516
|
+
assert "researcher" in mock_app.state.jarvis_agents
|
|
517
|
+
|
|
518
|
+
# Mesh should be started
|
|
519
|
+
assert lifespan.mesh is not None
|
|
520
|
+
assert lifespan.mesh._started is True
|
|
521
|
+
|
|
522
|
+
# Each agent should see others
|
|
523
|
+
assistant = mock_app.state.jarvis_agents["assistant"]
|
|
524
|
+
peers = assistant.peers.list_peers()
|
|
525
|
+
roles = [p['role'] for p in peers]
|
|
526
|
+
|
|
527
|
+
assert "analyst" in roles
|
|
528
|
+
assert "researcher" in roles
|
|
529
|
+
|
|
530
|
+
@requires_llm
|
|
531
|
+
@pytest.mark.asyncio
|
|
532
|
+
async def test_fastapi_flow_assistant_to_analyst(self):
|
|
533
|
+
"""Test complete HTTP → Assistant → Analyst → Response flow."""
|
|
534
|
+
from jarviscore.integrations import JarvisLifespan
|
|
535
|
+
|
|
536
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
537
|
+
|
|
538
|
+
agents = [AssistantAgent(), AnalystAgent(), ResearcherAgent()]
|
|
539
|
+
lifespan = JarvisLifespan(agents, mode="p2p", bind_port=7996)
|
|
540
|
+
|
|
541
|
+
mock_app = MagicMock()
|
|
542
|
+
mock_app.state = MagicMock()
|
|
543
|
+
|
|
544
|
+
async with lifespan(mock_app):
|
|
545
|
+
assistant = mock_app.state.jarvis_agents["assistant"]
|
|
546
|
+
analyst = mock_app.state.jarvis_agents["analyst"]
|
|
547
|
+
researcher = mock_app.state.jarvis_agents["researcher"]
|
|
548
|
+
|
|
549
|
+
# Start listeners
|
|
550
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
551
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
await asyncio.sleep(0.3)
|
|
555
|
+
|
|
556
|
+
# Simulate HTTP request → assistant processing - be explicit
|
|
557
|
+
result = await assistant.chat(
|
|
558
|
+
"Please analyze the Q4 2024 revenue data and provide detailed statistical insights on trends"
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
# Should have delegated to analyst
|
|
562
|
+
assert result["delegated_to"] == "analyst"
|
|
563
|
+
assert result["peer_response"] is not None
|
|
564
|
+
|
|
565
|
+
finally:
|
|
566
|
+
analyst.request_shutdown()
|
|
567
|
+
researcher.request_shutdown()
|
|
568
|
+
analyst_task.cancel()
|
|
569
|
+
researcher_task.cancel()
|
|
570
|
+
try:
|
|
571
|
+
await analyst_task
|
|
572
|
+
except asyncio.CancelledError:
|
|
573
|
+
pass
|
|
574
|
+
try:
|
|
575
|
+
await researcher_task
|
|
576
|
+
except asyncio.CancelledError:
|
|
577
|
+
pass
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
581
|
+
# TEST: REAL LLM INTEGRATION - Autonomous delegation with actual LLM
|
|
582
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
583
|
+
|
|
584
|
+
class TestRealLLMAutonomousDelegation:
|
|
585
|
+
"""Test the complete flow with real LLM integration."""
|
|
586
|
+
|
|
587
|
+
@requires_llm
|
|
588
|
+
@pytest.mark.asyncio
|
|
589
|
+
async def test_llm_autonomously_delegates_to_analyst(self):
|
|
590
|
+
"""Test LLM-powered assistant autonomously delegates to analyst."""
|
|
591
|
+
from jarviscore import Mesh
|
|
592
|
+
|
|
593
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
594
|
+
|
|
595
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7997})
|
|
596
|
+
assistant = mesh.add(AssistantAgent())
|
|
597
|
+
analyst = mesh.add(AnalystAgent())
|
|
598
|
+
researcher = mesh.add(ResearcherAgent())
|
|
599
|
+
|
|
600
|
+
await mesh.start()
|
|
601
|
+
|
|
602
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
603
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
604
|
+
|
|
605
|
+
try:
|
|
606
|
+
await asyncio.sleep(0.3)
|
|
607
|
+
|
|
608
|
+
# Verify LLM is available
|
|
609
|
+
assert assistant.llm_client is not None, "LLM client should be available"
|
|
610
|
+
|
|
611
|
+
# Verify assistant sees peers
|
|
612
|
+
context = assistant.peers.get_cognitive_context()
|
|
613
|
+
assert "analyst" in context, "Assistant should see analyst in context"
|
|
614
|
+
|
|
615
|
+
# Query that should trigger delegation
|
|
616
|
+
result = await assistant.chat(
|
|
617
|
+
"Please analyze the Q4 2024 sales data and provide key insights"
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
# LLM should have delegated to analyst
|
|
621
|
+
assert result["delegated_to"] == "analyst", \
|
|
622
|
+
f"LLM should delegate to analyst, got: {result.get('delegated_to')}"
|
|
623
|
+
assert result["peer_response"] is not None
|
|
624
|
+
|
|
625
|
+
finally:
|
|
626
|
+
analyst.request_shutdown()
|
|
627
|
+
researcher.request_shutdown()
|
|
628
|
+
analyst_task.cancel()
|
|
629
|
+
researcher_task.cancel()
|
|
630
|
+
try:
|
|
631
|
+
await analyst_task
|
|
632
|
+
except asyncio.CancelledError:
|
|
633
|
+
pass
|
|
634
|
+
try:
|
|
635
|
+
await researcher_task
|
|
636
|
+
except asyncio.CancelledError:
|
|
637
|
+
pass
|
|
638
|
+
await mesh.stop()
|
|
639
|
+
|
|
640
|
+
@requires_llm
|
|
641
|
+
@pytest.mark.asyncio
|
|
642
|
+
async def test_llm_autonomously_delegates_to_researcher(self):
|
|
643
|
+
"""Test LLM-powered assistant autonomously delegates to researcher."""
|
|
644
|
+
from jarviscore import Mesh
|
|
645
|
+
|
|
646
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
647
|
+
|
|
648
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7998})
|
|
649
|
+
assistant = mesh.add(AssistantAgent())
|
|
650
|
+
analyst = mesh.add(AnalystAgent())
|
|
651
|
+
researcher = mesh.add(ResearcherAgent())
|
|
652
|
+
|
|
653
|
+
await mesh.start()
|
|
654
|
+
|
|
655
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
656
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
657
|
+
|
|
658
|
+
try:
|
|
659
|
+
await asyncio.sleep(0.3)
|
|
660
|
+
|
|
661
|
+
# Query that should trigger research delegation
|
|
662
|
+
result = await assistant.chat(
|
|
663
|
+
"Research our main competitors and their market positioning"
|
|
664
|
+
)
|
|
665
|
+
|
|
666
|
+
# LLM should have delegated to researcher
|
|
667
|
+
assert result["delegated_to"] == "researcher", \
|
|
668
|
+
f"LLM should delegate to researcher, got: {result.get('delegated_to')}"
|
|
669
|
+
assert result["peer_response"] is not None
|
|
670
|
+
|
|
671
|
+
finally:
|
|
672
|
+
analyst.request_shutdown()
|
|
673
|
+
researcher.request_shutdown()
|
|
674
|
+
analyst_task.cancel()
|
|
675
|
+
researcher_task.cancel()
|
|
676
|
+
try:
|
|
677
|
+
await analyst_task
|
|
678
|
+
except asyncio.CancelledError:
|
|
679
|
+
pass
|
|
680
|
+
try:
|
|
681
|
+
await researcher_task
|
|
682
|
+
except asyncio.CancelledError:
|
|
683
|
+
pass
|
|
684
|
+
await mesh.stop()
|
|
685
|
+
|
|
686
|
+
@requires_llm
|
|
687
|
+
@pytest.mark.asyncio
|
|
688
|
+
async def test_llm_responds_directly_without_delegation(self):
|
|
689
|
+
"""Test LLM responds directly for general questions without delegation."""
|
|
690
|
+
from jarviscore import Mesh
|
|
691
|
+
|
|
692
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
693
|
+
|
|
694
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7999})
|
|
695
|
+
assistant = mesh.add(AssistantAgent())
|
|
696
|
+
analyst = mesh.add(AnalystAgent())
|
|
697
|
+
researcher = mesh.add(ResearcherAgent())
|
|
698
|
+
|
|
699
|
+
await mesh.start()
|
|
700
|
+
|
|
701
|
+
try:
|
|
702
|
+
await asyncio.sleep(0.3)
|
|
703
|
+
|
|
704
|
+
# General question that doesn't need delegation
|
|
705
|
+
result = await assistant.chat("What is 2 + 2?")
|
|
706
|
+
|
|
707
|
+
# Should NOT delegate
|
|
708
|
+
assert result["delegated_to"] is None, \
|
|
709
|
+
f"LLM should not delegate for simple questions, got: {result.get('delegated_to')}"
|
|
710
|
+
assert "response" in result
|
|
711
|
+
|
|
712
|
+
finally:
|
|
713
|
+
await mesh.stop()
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
717
|
+
# TEST: COGNITIVE CONTEXT ACCURACY
|
|
718
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
719
|
+
|
|
720
|
+
class TestCognitiveContextAccuracy:
|
|
721
|
+
"""Test that cognitive context accurately represents the mesh."""
|
|
722
|
+
|
|
723
|
+
@pytest.mark.asyncio
|
|
724
|
+
async def test_cognitive_context_includes_all_peer_details(self):
|
|
725
|
+
"""Test cognitive context includes roles, capabilities, descriptions."""
|
|
726
|
+
from jarviscore import Mesh
|
|
727
|
+
|
|
728
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
729
|
+
|
|
730
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 8000})
|
|
731
|
+
assistant = mesh.add(AssistantAgent())
|
|
732
|
+
analyst = mesh.add(AnalystAgent())
|
|
733
|
+
researcher = mesh.add(ResearcherAgent())
|
|
734
|
+
|
|
735
|
+
await mesh.start()
|
|
736
|
+
|
|
737
|
+
try:
|
|
738
|
+
context = assistant.peers.get_cognitive_context(format="markdown")
|
|
739
|
+
|
|
740
|
+
# Should include roles
|
|
741
|
+
assert "analyst" in context
|
|
742
|
+
assert "researcher" in context
|
|
743
|
+
|
|
744
|
+
# Should include capabilities
|
|
745
|
+
assert "data_analysis" in context or "statistics" in context
|
|
746
|
+
assert "research" in context or "web_search" in context
|
|
747
|
+
|
|
748
|
+
# Should include delegation instructions
|
|
749
|
+
assert "ask_peer" in context
|
|
750
|
+
|
|
751
|
+
finally:
|
|
752
|
+
await mesh.stop()
|
|
753
|
+
|
|
754
|
+
@pytest.mark.asyncio
|
|
755
|
+
async def test_build_system_prompt_combines_base_and_context(self):
|
|
756
|
+
"""Test build_system_prompt properly combines base prompt with peer context."""
|
|
757
|
+
from jarviscore import Mesh
|
|
758
|
+
|
|
759
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
760
|
+
|
|
761
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 8001})
|
|
762
|
+
assistant = mesh.add(AssistantAgent())
|
|
763
|
+
analyst = mesh.add(AnalystAgent())
|
|
764
|
+
|
|
765
|
+
await mesh.start()
|
|
766
|
+
|
|
767
|
+
try:
|
|
768
|
+
base_prompt = "You are a helpful assistant."
|
|
769
|
+
full_prompt = assistant.peers.build_system_prompt(base_prompt)
|
|
770
|
+
|
|
771
|
+
# Should include base prompt
|
|
772
|
+
assert "You are a helpful assistant" in full_prompt
|
|
773
|
+
|
|
774
|
+
# Should include peer context
|
|
775
|
+
assert "AVAILABLE MESH PEERS" in full_prompt
|
|
776
|
+
assert "analyst" in full_prompt
|
|
777
|
+
|
|
778
|
+
finally:
|
|
779
|
+
await mesh.stop()
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
783
|
+
# TEST: CLOUD DEPLOYMENT - Standalone agent joining mesh (Day 2)
|
|
784
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
785
|
+
|
|
786
|
+
def create_standalone_scout_agent():
|
|
787
|
+
"""Create a standalone scout agent for cloud deployment testing."""
|
|
788
|
+
LLMAgent = create_llm_agent_class()
|
|
789
|
+
|
|
790
|
+
class ScoutAgent(LLMAgent):
|
|
791
|
+
"""Standalone agent that joins an existing mesh."""
|
|
792
|
+
role = "scout"
|
|
793
|
+
capabilities = ["scouting", "reconnaissance", "market_intel"]
|
|
794
|
+
description = "Scout agent for market intelligence"
|
|
795
|
+
|
|
796
|
+
system_prompt = """You are a scout agent. You gather market intelligence."""
|
|
797
|
+
|
|
798
|
+
async def on_peer_request(self, msg):
|
|
799
|
+
question = msg.data.get("question", "")
|
|
800
|
+
return {
|
|
801
|
+
"intel": f"Scouting report on: {question}",
|
|
802
|
+
"signals": ["Emerging trend detected"],
|
|
803
|
+
"confidence": 0.8
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
return ScoutAgent
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
class TestCloudDeployment:
|
|
810
|
+
"""Test Day 2 cloud deployment patterns - standalone agents joining mesh."""
|
|
811
|
+
|
|
812
|
+
@pytest.mark.asyncio
|
|
813
|
+
async def test_agent_has_join_mesh_method(self):
|
|
814
|
+
"""Test that agents have join_mesh method."""
|
|
815
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
816
|
+
agent = AssistantAgent()
|
|
817
|
+
|
|
818
|
+
assert hasattr(agent, 'join_mesh'), "Agent should have join_mesh method"
|
|
819
|
+
assert hasattr(agent, 'leave_mesh'), "Agent should have leave_mesh method"
|
|
820
|
+
assert hasattr(agent, 'is_mesh_connected'), "Agent should have is_mesh_connected property"
|
|
821
|
+
|
|
822
|
+
@pytest.mark.asyncio
|
|
823
|
+
async def test_agent_is_mesh_connected_initially_false(self):
|
|
824
|
+
"""Test that agent is not connected before join_mesh."""
|
|
825
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
826
|
+
agent = AssistantAgent()
|
|
827
|
+
|
|
828
|
+
assert agent.is_mesh_connected is False, "Agent should not be connected initially"
|
|
829
|
+
|
|
830
|
+
@pytest.mark.asyncio
|
|
831
|
+
async def test_standalone_agent_visibility_after_mesh_add(self):
|
|
832
|
+
"""Test that all agents added before mesh.start() are visible to each other."""
|
|
833
|
+
from jarviscore import Mesh
|
|
834
|
+
|
|
835
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
836
|
+
ScoutAgent = create_standalone_scout_agent()
|
|
837
|
+
|
|
838
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 8002})
|
|
839
|
+
|
|
840
|
+
# Add ALL agents before mesh.start() - this is the standard pattern
|
|
841
|
+
assistant = mesh.add(AssistantAgent())
|
|
842
|
+
analyst = mesh.add(AnalystAgent())
|
|
843
|
+
scout = mesh.add(ScoutAgent())
|
|
844
|
+
|
|
845
|
+
await mesh.start()
|
|
846
|
+
|
|
847
|
+
try:
|
|
848
|
+
# Assistant should see analyst and scout
|
|
849
|
+
assistant_peers = assistant.peers.list_peers()
|
|
850
|
+
assistant_roles = [p['role'] for p in assistant_peers]
|
|
851
|
+
assert "analyst" in assistant_roles
|
|
852
|
+
assert "scout" in assistant_roles, "Assistant should see scout"
|
|
853
|
+
|
|
854
|
+
# Scout should see assistant and analyst
|
|
855
|
+
scout_peers = scout.peers.list_peers()
|
|
856
|
+
scout_roles = [p['role'] for p in scout_peers]
|
|
857
|
+
assert "assistant" in scout_roles
|
|
858
|
+
assert "analyst" in scout_roles
|
|
859
|
+
|
|
860
|
+
# Analyst should see assistant and scout
|
|
861
|
+
analyst_peers = analyst.peers.list_peers()
|
|
862
|
+
analyst_roles = [p['role'] for p in analyst_peers]
|
|
863
|
+
assert "assistant" in analyst_roles
|
|
864
|
+
assert "scout" in analyst_roles
|
|
865
|
+
|
|
866
|
+
finally:
|
|
867
|
+
await mesh.stop()
|
|
868
|
+
|
|
869
|
+
@pytest.mark.asyncio
|
|
870
|
+
async def test_cognitive_context_updates_with_new_agent(self):
|
|
871
|
+
"""Test that cognitive context reflects newly joined agents."""
|
|
872
|
+
from jarviscore import Mesh
|
|
873
|
+
|
|
874
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
875
|
+
ScoutAgent = create_standalone_scout_agent()
|
|
876
|
+
|
|
877
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 8003})
|
|
878
|
+
|
|
879
|
+
assistant = mesh.add(AssistantAgent())
|
|
880
|
+
analyst = mesh.add(AnalystAgent())
|
|
881
|
+
|
|
882
|
+
await mesh.start()
|
|
883
|
+
|
|
884
|
+
try:
|
|
885
|
+
# Get context before adding scout
|
|
886
|
+
context_before = assistant.peers.get_cognitive_context()
|
|
887
|
+
assert "scout" not in context_before
|
|
888
|
+
|
|
889
|
+
# Add scout
|
|
890
|
+
scout = mesh.add(ScoutAgent())
|
|
891
|
+
|
|
892
|
+
# Context should now include scout
|
|
893
|
+
context_after = assistant.peers.get_cognitive_context()
|
|
894
|
+
assert "scout" in context_after, "Cognitive context should include scout"
|
|
895
|
+
assert "scouting" in context_after or "reconnaissance" in context_after
|
|
896
|
+
|
|
897
|
+
finally:
|
|
898
|
+
await mesh.stop()
|
|
899
|
+
|
|
900
|
+
@pytest.mark.asyncio
|
|
901
|
+
async def test_existing_agents_can_communicate_with_new_agent(self):
|
|
902
|
+
"""Test that existing agents can send requests to newly added agents."""
|
|
903
|
+
from jarviscore import Mesh
|
|
904
|
+
|
|
905
|
+
AssistantAgent, AnalystAgent, ResearcherAgent = create_autonomous_agents()
|
|
906
|
+
ScoutAgent = create_standalone_scout_agent()
|
|
907
|
+
|
|
908
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 8004})
|
|
909
|
+
|
|
910
|
+
assistant = mesh.add(AssistantAgent())
|
|
911
|
+
analyst = mesh.add(AnalystAgent())
|
|
912
|
+
scout = mesh.add(ScoutAgent())
|
|
913
|
+
|
|
914
|
+
await mesh.start()
|
|
915
|
+
|
|
916
|
+
# Start scout listener
|
|
917
|
+
scout_task = asyncio.create_task(scout.run())
|
|
918
|
+
|
|
919
|
+
try:
|
|
920
|
+
await asyncio.sleep(0.2)
|
|
921
|
+
|
|
922
|
+
# Assistant sends request to scout
|
|
923
|
+
response = await assistant.peers.request(
|
|
924
|
+
"scout",
|
|
925
|
+
{"question": "What are the market trends?"},
|
|
926
|
+
timeout=5
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
assert response is not None
|
|
930
|
+
assert "intel" in response or "signals" in response
|
|
931
|
+
|
|
932
|
+
finally:
|
|
933
|
+
scout.request_shutdown()
|
|
934
|
+
scout_task.cancel()
|
|
935
|
+
try:
|
|
936
|
+
await scout_task
|
|
937
|
+
except asyncio.CancelledError:
|
|
938
|
+
pass
|
|
939
|
+
await mesh.stop()
|
|
940
|
+
|
|
941
|
+
|
|
942
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
943
|
+
# RUN TESTS
|
|
944
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
945
|
+
|
|
946
|
+
if __name__ == "__main__":
|
|
947
|
+
pytest.main([__file__, "-v", "-s"])
|