jarviscore-framework 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +347 -0
  6. jarviscore/__init__.py +60 -15
  7. jarviscore/adapter/__init__.py +40 -0
  8. jarviscore/adapter/decorator.py +336 -0
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/check.py +18 -13
  11. jarviscore/cli/scaffold.py +178 -0
  12. jarviscore/cli/smoketest.py +3 -2
  13. jarviscore/context/__init__.py +40 -0
  14. jarviscore/context/dependency.py +160 -0
  15. jarviscore/context/jarvis_context.py +207 -0
  16. jarviscore/context/memory.py +155 -0
  17. jarviscore/core/agent.py +44 -1
  18. jarviscore/core/mesh.py +196 -35
  19. jarviscore/data/.env.example +146 -0
  20. jarviscore/data/__init__.py +7 -0
  21. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  22. jarviscore/data/examples/calculator_agent_example.py +77 -0
  23. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  24. jarviscore/data/examples/customagent_p2p_example.py +347 -0
  25. jarviscore/data/examples/multi_agent_workflow.py +132 -0
  26. jarviscore/data/examples/research_agent_example.py +76 -0
  27. jarviscore/docs/API_REFERENCE.md +264 -51
  28. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  29. jarviscore/docs/CONFIGURATION.md +41 -23
  30. jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
  31. jarviscore/docs/GETTING_STARTED.md +113 -17
  32. jarviscore/docs/TROUBLESHOOTING.md +155 -13
  33. jarviscore/docs/USER_GUIDE.md +144 -363
  34. jarviscore/execution/llm.py +23 -16
  35. jarviscore/orchestration/engine.py +20 -8
  36. jarviscore/p2p/__init__.py +10 -0
  37. jarviscore/p2p/coordinator.py +129 -0
  38. jarviscore/p2p/messages.py +87 -0
  39. jarviscore/p2p/peer_client.py +576 -0
  40. jarviscore/p2p/peer_tool.py +268 -0
  41. jarviscore_framework-0.2.0.dist-info/METADATA +143 -0
  42. jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
  43. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
  44. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
  45. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  46. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  47. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  48. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  49. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  50. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  51. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  52. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  53. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  54. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  55. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  56. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  57. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  60. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  61. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  62. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  63. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  64. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  65. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  66. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  67. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  68. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  69. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  70. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  71. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  72. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  73. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  74. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  75. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  76. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  77. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  78. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  79. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  80. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  81. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  82. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  83. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  84. tests/test_01_analyst_standalone.py +124 -0
  85. tests/test_02_assistant_standalone.py +164 -0
  86. tests/test_03_analyst_with_framework.py +945 -0
  87. tests/test_04_assistant_with_framework.py +1002 -0
  88. tests/test_05_integration.py +1301 -0
  89. tests/test_06_real_llm_integration.py +760 -0
  90. tests/test_07_distributed_single_node.py +578 -0
  91. tests/test_08_distributed_multi_node.py +454 -0
  92. tests/test_09_distributed_autoagent.py +509 -0
  93. tests/test_10_distributed_customagent.py +787 -0
  94. tests/test_context.py +467 -0
  95. tests/test_decorator.py +622 -0
  96. tests/test_mesh.py +35 -4
  97. jarviscore_framework-0.1.0.dist-info/METADATA +0 -136
  98. jarviscore_framework-0.1.0.dist-info/RECORD +0 -55
  99. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,787 @@
1
+ """
2
+ Test 10: Distributed Mode - CustomAgent Profile with Real LLM
3
+
4
+ Tests the CustomAgent profile in distributed execution mode:
5
+ - CustomAgent with user-controlled LLM integration
6
+ - CustomAgent with peer tools for agent communication
7
+ - Workflow execution with CustomAgents
8
+ - Multi-agent collaboration via peer tools
9
+
10
+ This file uses REAL LLM API calls (not mocks).
11
+
12
+ Run with: pytest tests/test_10_distributed_customagent.py -v -s
13
+ """
14
+ import asyncio
15
+ import sys
16
+ import pytest
17
+ import logging
18
+
19
+ sys.path.insert(0, '.')
20
+
21
+ from jarviscore import Mesh
22
+ from jarviscore.profiles.customagent import CustomAgent
23
+ from jarviscore.p2p.peer_client import PeerClient
24
+
25
+ # Setup logging
26
+ logging.basicConfig(level=logging.INFO)
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # Skip all tests if no API key is configured
30
+ try:
31
+ from jarviscore.config import settings
32
+ HAS_API_KEY = bool(
33
+ settings.claude_api_key or
34
+ settings.azure_api_key or
35
+ settings.gemini_api_key
36
+ )
37
+ except Exception:
38
+ HAS_API_KEY = False
39
+
40
+ pytestmark = pytest.mark.skipif(
41
+ not HAS_API_KEY,
42
+ reason="No LLM API key configured in .env"
43
+ )
44
+
45
+
46
+ # ═══════════════════════════════════════════════════════════════════════════════
47
+ # REAL LLM CLIENT
48
+ # ═══════════════════════════════════════════════════════════════════════════════
49
+
50
+ class RealLLMClient:
51
+ """Real LLM client with tool calling support."""
52
+
53
+ def __init__(self):
54
+ from anthropic import Anthropic
55
+ from jarviscore.config import settings
56
+
57
+ api_key = settings.claude_api_key
58
+ endpoint = settings.claude_endpoint
59
+
60
+ if not api_key:
61
+ raise RuntimeError("No Claude API key found")
62
+
63
+ if endpoint:
64
+ self.client = Anthropic(api_key=api_key, base_url=endpoint)
65
+ else:
66
+ self.client = Anthropic(api_key=api_key)
67
+
68
+ self.model = settings.claude_model or "claude-sonnet-4-20250514"
69
+
70
+ def chat(self, messages: list, system: str = None, max_tokens: int = 1024) -> str:
71
+ """Simple chat without tools."""
72
+ request_kwargs = {
73
+ "model": self.model,
74
+ "max_tokens": max_tokens,
75
+ "messages": messages,
76
+ }
77
+ if system:
78
+ request_kwargs["system"] = system
79
+
80
+ response = self.client.messages.create(**request_kwargs)
81
+ return response.content[0].text
82
+
83
+ def chat_with_tools(
84
+ self,
85
+ messages: list,
86
+ tools: list,
87
+ system: str = None,
88
+ max_tokens: int = 1024
89
+ ) -> dict:
90
+ """Chat with tool support."""
91
+ request_kwargs = {
92
+ "model": self.model,
93
+ "max_tokens": max_tokens,
94
+ "messages": messages,
95
+ }
96
+ if system:
97
+ request_kwargs["system"] = system
98
+ if tools:
99
+ request_kwargs["tools"] = tools
100
+
101
+ response = self.client.messages.create(**request_kwargs)
102
+
103
+ result = {"stop_reason": response.stop_reason}
104
+ for block in response.content:
105
+ if block.type == "text":
106
+ result["type"] = "text"
107
+ result["content"] = block.text
108
+ elif block.type == "tool_use":
109
+ result["type"] = "tool_use"
110
+ result["tool_name"] = block.name
111
+ result["tool_args"] = block.input
112
+ result["tool_use_id"] = block.id
113
+
114
+ return result
115
+
116
+ def continue_with_tool_result(
117
+ self,
118
+ messages: list,
119
+ tool_use_id: str,
120
+ tool_result: str,
121
+ tools: list = None,
122
+ system: str = None,
123
+ max_tokens: int = 1024
124
+ ) -> dict:
125
+ """Continue with tool result."""
126
+ messages = messages + [
127
+ {
128
+ "role": "user",
129
+ "content": [
130
+ {
131
+ "type": "tool_result",
132
+ "tool_use_id": tool_use_id,
133
+ "content": tool_result
134
+ }
135
+ ]
136
+ }
137
+ ]
138
+ return self.chat_with_tools(messages, tools or [], system, max_tokens)
139
+
140
+
141
+ # ═══════════════════════════════════════════════════════════════════════════════
142
+ # TEST CUSTOMAGENTS WITH LLM
143
+ # ═══════════════════════════════════════════════════════════════════════════════
144
+
145
+ class LLMResearchAgent(CustomAgent):
146
+ """CustomAgent that uses LLM for research and reasoning."""
147
+ role = "researcher"
148
+ capabilities = ["research", "analysis", "summarization"]
149
+
150
+ def __init__(self, agent_id=None):
151
+ super().__init__(agent_id)
152
+ self.llm = None
153
+ self.requests_received = []
154
+
155
+ async def setup(self):
156
+ await super().setup()
157
+ self.llm = RealLLMClient()
158
+ self._logger.info(f"[{self.role}] LLM initialized")
159
+
160
+ def get_tools(self) -> list:
161
+ """Return tools including peer tools if available."""
162
+ tools = [
163
+ {
164
+ "name": "search_knowledge",
165
+ "description": "Search internal knowledge base for information",
166
+ "input_schema": {
167
+ "type": "object",
168
+ "properties": {
169
+ "query": {"type": "string", "description": "Search query"}
170
+ },
171
+ "required": ["query"]
172
+ }
173
+ }
174
+ ]
175
+ if self.peers:
176
+ tools.extend(self.peers.as_tool().schema)
177
+ return tools
178
+
179
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
180
+ """Execute tool."""
181
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
182
+ return await self.peers.as_tool().execute(tool_name, args)
183
+ if tool_name == "search_knowledge":
184
+ return f"Knowledge base results for '{args.get('query')}': Found relevant information on the topic."
185
+ return f"Unknown tool: {tool_name}"
186
+
187
+ async def execute_task(self, task):
188
+ """Execute research task using LLM."""
189
+ task_desc = task.get("task", "")
190
+ self._logger.info(f"[{self.role}] Executing: {task_desc[:50]}...")
191
+
192
+ system_prompt = (
193
+ "You are an expert researcher. Analyze the given topic and provide "
194
+ "a concise but thorough response. Be factual and precise."
195
+ )
196
+
197
+ messages = [{"role": "user", "content": task_desc}]
198
+ tools = self.get_tools()
199
+ # Remove peer tools to avoid complexity in basic tests
200
+ tools = [t for t in tools if t["name"] not in ["ask_peer", "broadcast_update", "list_peers"]]
201
+
202
+ response = self.llm.chat_with_tools(messages, tools, system_prompt)
203
+
204
+ # Handle tool use if needed
205
+ if response.get("type") == "tool_use":
206
+ tool_name = response["tool_name"]
207
+ tool_args = response["tool_args"]
208
+ tool_use_id = response["tool_use_id"]
209
+
210
+ tool_result = await self.execute_tool(tool_name, tool_args)
211
+
212
+ messages.append({
213
+ "role": "assistant",
214
+ "content": [{"type": "tool_use", "id": tool_use_id, "name": tool_name, "input": tool_args}]
215
+ })
216
+ messages.append({
217
+ "role": "user",
218
+ "content": [{"type": "tool_result", "tool_use_id": tool_use_id, "content": tool_result}]
219
+ })
220
+
221
+ response = self.llm.chat_with_tools(messages, [], system_prompt)
222
+
223
+ output = response.get("content", "Research complete.")
224
+
225
+ return {
226
+ "status": "success",
227
+ "output": output,
228
+ "agent_id": self.agent_id,
229
+ "role": self.role
230
+ }
231
+
232
+
233
+ class LLMWriterAgent(CustomAgent):
234
+ """CustomAgent that uses LLM for writing tasks."""
235
+ role = "writer"
236
+ capabilities = ["writing", "editing", "formatting"]
237
+
238
+ def __init__(self, agent_id=None):
239
+ super().__init__(agent_id)
240
+ self.llm = None
241
+
242
+ async def setup(self):
243
+ await super().setup()
244
+ self.llm = RealLLMClient()
245
+ self._logger.info(f"[{self.role}] LLM initialized")
246
+
247
+ async def execute_task(self, task):
248
+ """Execute writing task using LLM."""
249
+ task_desc = task.get("task", "")
250
+ context = task.get("context", {})
251
+
252
+ self._logger.info(f"[{self.role}] Writing: {task_desc[:50]}...")
253
+
254
+ system_prompt = (
255
+ "You are an expert writer. Create clear, engaging content. "
256
+ "Be concise but thorough. Format appropriately for the request."
257
+ )
258
+
259
+ # Include context from previous steps if available
260
+ full_prompt = task_desc
261
+ if context:
262
+ full_prompt = f"Context from previous steps: {context}\n\nTask: {task_desc}"
263
+
264
+ messages = [{"role": "user", "content": full_prompt}]
265
+ output = self.llm.chat(messages, system_prompt)
266
+
267
+ return {
268
+ "status": "success",
269
+ "output": output,
270
+ "agent_id": self.agent_id,
271
+ "role": self.role
272
+ }
273
+
274
+
275
+ class LLMReviewerAgent(CustomAgent):
276
+ """CustomAgent that reviews and provides feedback using LLM."""
277
+ role = "reviewer"
278
+ capabilities = ["review", "feedback", "quality_check"]
279
+
280
+ def __init__(self, agent_id=None):
281
+ super().__init__(agent_id)
282
+ self.llm = None
283
+
284
+ async def setup(self):
285
+ await super().setup()
286
+ self.llm = RealLLMClient()
287
+ self._logger.info(f"[{self.role}] LLM initialized")
288
+
289
+ async def execute_task(self, task):
290
+ """Execute review task using LLM."""
291
+ task_desc = task.get("task", "")
292
+ context = task.get("context", {})
293
+
294
+ self._logger.info(f"[{self.role}] Reviewing: {task_desc[:50]}...")
295
+
296
+ system_prompt = (
297
+ "You are an expert reviewer. Provide constructive feedback. "
298
+ "Be specific about what works well and what could be improved. "
299
+ "Keep feedback concise and actionable."
300
+ )
301
+
302
+ full_prompt = task_desc
303
+ if context:
304
+ full_prompt = f"Content to review: {context}\n\nReview task: {task_desc}"
305
+
306
+ messages = [{"role": "user", "content": full_prompt}]
307
+ output = self.llm.chat(messages, system_prompt)
308
+
309
+ return {
310
+ "status": "success",
311
+ "output": output,
312
+ "agent_id": self.agent_id,
313
+ "role": self.role
314
+ }
315
+
316
+
317
+ class PeerAwareAgent(CustomAgent):
318
+ """CustomAgent that can communicate with peers via peer tools."""
319
+ role = "coordinator"
320
+ capabilities = ["coordination", "delegation"]
321
+
322
+ def __init__(self, agent_id=None):
323
+ super().__init__(agent_id)
324
+ self.llm = None
325
+ self.tool_calls = []
326
+
327
+ async def setup(self):
328
+ await super().setup()
329
+ self.llm = RealLLMClient()
330
+ self._logger.info(f"[{self.role}] LLM initialized with peer awareness")
331
+
332
+ def get_tools(self) -> list:
333
+ """Return tools including peer tools."""
334
+ tools = []
335
+ if self.peers:
336
+ tools.extend(self.peers.as_tool().schema)
337
+ return tools
338
+
339
+ async def execute_tool(self, tool_name: str, args: dict) -> str:
340
+ """Execute tool including peer tools."""
341
+ self.tool_calls.append({"tool": tool_name, "args": args})
342
+ if self.peers and tool_name in self.peers.as_tool().tool_names:
343
+ return await self.peers.as_tool().execute(tool_name, args)
344
+ return f"Unknown tool: {tool_name}"
345
+
346
+ async def execute_task(self, task):
347
+ """Execute task, potentially delegating to peers."""
348
+ task_desc = task.get("task", "")
349
+ self._logger.info(f"[{self.role}] Coordinating: {task_desc[:50]}...")
350
+
351
+ system_prompt = (
352
+ "You are a coordinator agent. You can delegate tasks to specialist peers. "
353
+ "Use the ask_peer tool to get help from other agents when needed. "
354
+ "Available peers include: researcher (for research tasks), writer (for writing), "
355
+ "and reviewer (for reviews). Coordinate effectively."
356
+ )
357
+
358
+ tools = self.get_tools()
359
+ if not tools:
360
+ # No peers, just respond directly
361
+ output = self.llm.chat([{"role": "user", "content": task_desc}], system_prompt)
362
+ return {"status": "success", "output": output}
363
+
364
+ messages = [{"role": "user", "content": task_desc}]
365
+ response = self.llm.chat_with_tools(messages, tools, system_prompt)
366
+
367
+ # Handle tool use loop
368
+ iterations = 0
369
+ while response.get("type") == "tool_use" and iterations < 3:
370
+ iterations += 1
371
+
372
+ tool_name = response["tool_name"]
373
+ tool_args = response["tool_args"]
374
+ tool_use_id = response["tool_use_id"]
375
+
376
+ self._logger.info(f"[{self.role}] Using tool: {tool_name}")
377
+
378
+ tool_result = await self.execute_tool(tool_name, tool_args)
379
+
380
+ messages.append({
381
+ "role": "assistant",
382
+ "content": [{"type": "tool_use", "id": tool_use_id, "name": tool_name, "input": tool_args}]
383
+ })
384
+
385
+ response = self.llm.continue_with_tool_result(
386
+ messages, tool_use_id, tool_result, tools, system_prompt
387
+ )
388
+
389
+ output = response.get("content", "Coordination complete.")
390
+
391
+ return {
392
+ "status": "success",
393
+ "output": output,
394
+ "tool_calls": self.tool_calls,
395
+ "agent_id": self.agent_id,
396
+ "role": self.role
397
+ }
398
+
399
+
400
+ # ═══════════════════════════════════════════════════════════════════════════════
401
+ # FIXTURES
402
+ # ═══════════════════════════════════════════════════════════════════════════════
403
+
404
+ @pytest.fixture
405
+ async def distributed_mesh_single():
406
+ """Create distributed mesh with single CustomAgent."""
407
+ mesh = Mesh(mode="distributed", config={'bind_port': 7990})
408
+ agent = mesh.add(LLMResearchAgent)
409
+
410
+ await mesh.start()
411
+
412
+ yield mesh, agent
413
+
414
+ await mesh.stop()
415
+
416
+
417
+ @pytest.fixture
418
+ async def distributed_mesh_pipeline():
419
+ """Create distributed mesh with multiple CustomAgents for pipeline."""
420
+ mesh = Mesh(mode="distributed", config={'bind_port': 7991})
421
+
422
+ researcher = mesh.add(LLMResearchAgent)
423
+ writer = mesh.add(LLMWriterAgent)
424
+ reviewer = mesh.add(LLMReviewerAgent)
425
+
426
+ await mesh.start()
427
+
428
+ yield mesh, researcher, writer, reviewer
429
+
430
+ await mesh.stop()
431
+
432
+
433
+ @pytest.fixture
434
+ async def distributed_mesh_with_peers():
435
+ """Create distributed mesh with peer-aware agents."""
436
+ mesh = Mesh(mode="distributed", config={'bind_port': 7992})
437
+
438
+ coordinator = mesh.add(PeerAwareAgent)
439
+ researcher = mesh.add(LLMResearchAgent)
440
+ writer = mesh.add(LLMWriterAgent)
441
+
442
+ await mesh.start()
443
+
444
+ # Wire up peer clients for peer communication
445
+ for agent in mesh.agents:
446
+ agent.peers = PeerClient(
447
+ coordinator=mesh._p2p_coordinator,
448
+ agent_id=agent.agent_id,
449
+ agent_role=agent.role,
450
+ agent_registry=mesh._agent_registry,
451
+ node_id="local"
452
+ )
453
+
454
+ yield mesh, coordinator, researcher, writer
455
+
456
+ await mesh.stop()
457
+
458
+
459
+ # ═══════════════════════════════════════════════════════════════════════════════
460
+ # TEST CLASS: CustomAgent Setup in Distributed Mode
461
+ # ═══════════════════════════════════════════════════════════════════════════════
462
+
463
+ class TestCustomAgentDistributedSetup:
464
+ """Tests for CustomAgent initialization in distributed mode."""
465
+
466
+ @pytest.mark.asyncio
467
+ async def test_customagent_inherits_from_profile(self, distributed_mesh_single):
468
+ """CustomAgent should inherit from Profile."""
469
+ mesh, agent = distributed_mesh_single
470
+
471
+ from jarviscore.core.profile import Profile
472
+ from jarviscore.core.agent import Agent
473
+
474
+ assert isinstance(agent, Profile)
475
+ assert isinstance(agent, Agent)
476
+ assert isinstance(agent, CustomAgent)
477
+
478
+ @pytest.mark.asyncio
479
+ async def test_customagent_has_required_attributes(self, distributed_mesh_single):
480
+ """CustomAgent should have role and capabilities."""
481
+ mesh, agent = distributed_mesh_single
482
+
483
+ assert agent.role == "researcher"
484
+ assert "research" in agent.capabilities
485
+ assert "analysis" in agent.capabilities
486
+
487
+ @pytest.mark.asyncio
488
+ async def test_customagent_setup_initializes_llm(self, distributed_mesh_single):
489
+ """CustomAgent setup should initialize LLM client."""
490
+ mesh, agent = distributed_mesh_single
491
+
492
+ assert agent.llm is not None, "LLM should be initialized"
493
+ assert isinstance(agent.llm, RealLLMClient)
494
+
495
+ @pytest.mark.asyncio
496
+ async def test_customagent_joins_distributed_mesh(self, distributed_mesh_single):
497
+ """CustomAgent should be registered in distributed mesh."""
498
+ mesh, agent = distributed_mesh_single
499
+
500
+ assert mesh.get_agent("researcher") == agent
501
+ assert mesh._p2p_coordinator is not None
502
+ assert mesh._workflow_engine is not None
503
+
504
+
505
+ # ═══════════════════════════════════════════════════════════════════════════════
506
+ # TEST CLASS: CustomAgent Workflow Execution
507
+ # ═══════════════════════════════════════════════════════════════════════════════
508
+
509
+ class TestCustomAgentWorkflowExecution:
510
+ """Tests for CustomAgent executing workflow steps with real LLM."""
511
+
512
+ @pytest.mark.asyncio
513
+ async def test_customagent_executes_single_step(self, distributed_mesh_single):
514
+ """CustomAgent should execute a single workflow step."""
515
+ mesh, agent = distributed_mesh_single
516
+
517
+ print("\n" + "="*60)
518
+ print("TEST: CustomAgent executes single step")
519
+ print("="*60)
520
+
521
+ results = await mesh.workflow("single-step", [
522
+ {"agent": "researcher", "task": "What are the three laws of thermodynamics? Summarize briefly."}
523
+ ])
524
+
525
+ print(f"\nResults: {results}")
526
+
527
+ assert len(results) == 1
528
+ result = results[0]
529
+
530
+ assert result["status"] == "success", f"Task failed: {result.get('error')}"
531
+ assert "output" in result
532
+ assert len(result["output"]) > 0
533
+
534
+ print(f"\nOutput: {result['output'][:300]}...")
535
+
536
+ @pytest.mark.asyncio
537
+ async def test_customagent_uses_llm_for_reasoning(self, distributed_mesh_single):
538
+ """CustomAgent should use LLM for complex reasoning."""
539
+ mesh, agent = distributed_mesh_single
540
+
541
+ print("\n" + "="*60)
542
+ print("TEST: CustomAgent uses LLM for reasoning")
543
+ print("="*60)
544
+
545
+ results = await mesh.workflow("reasoning-test", [
546
+ {
547
+ "agent": "researcher",
548
+ "task": "Compare and contrast renewable vs non-renewable energy sources. List 3 pros and cons of each."
549
+ }
550
+ ])
551
+
552
+ result = results[0]
553
+ assert result["status"] == "success"
554
+
555
+ output = result["output"]
556
+ print(f"\nOutput: {output[:500]}...")
557
+
558
+ # Should contain substantive content
559
+ assert len(output) > 100, "Should have detailed response"
560
+
561
+ @pytest.mark.asyncio
562
+ async def test_customagent_result_includes_metadata(self, distributed_mesh_single):
563
+ """CustomAgent result should include agent metadata."""
564
+ mesh, agent = distributed_mesh_single
565
+
566
+ results = await mesh.workflow("metadata-test", [
567
+ {"agent": "researcher", "task": "Define photosynthesis in one sentence."}
568
+ ])
569
+
570
+ result = results[0]
571
+ assert result["status"] == "success"
572
+ assert "agent_id" in result
573
+ assert "role" in result
574
+ assert result["role"] == "researcher"
575
+
576
+
577
+ # ═══════════════════════════════════════════════════════════════════════════════
578
+ # TEST CLASS: Multi-Step Pipeline with CustomAgents
579
+ # ═══════════════════════════════════════════════════════════════════════════════
580
+
581
+ class TestCustomAgentMultiStepWorkflow:
582
+ """Tests for multi-step workflows with multiple CustomAgents."""
583
+
584
+ @pytest.mark.asyncio
585
+ async def test_two_customagents_sequential(self, distributed_mesh_pipeline):
586
+ """Two CustomAgents should execute in sequence."""
587
+ mesh, researcher, writer, reviewer = distributed_mesh_pipeline
588
+
589
+ print("\n" + "="*60)
590
+ print("TEST: Two CustomAgents in sequence")
591
+ print("="*60)
592
+
593
+ results = await mesh.workflow("two-agent-test", [
594
+ {
595
+ "agent": "researcher",
596
+ "task": "List 3 key facts about the Python programming language."
597
+ },
598
+ {
599
+ "agent": "writer",
600
+ "task": "Write a short promotional paragraph about Python using these facts: Python is versatile, has a large community, and is easy to learn."
601
+ }
602
+ ])
603
+
604
+ print(f"\nResults count: {len(results)}")
605
+ for i, r in enumerate(results):
606
+ print(f"Step {i+1}: {r['status']} - {str(r.get('output', ''))[:100]}...")
607
+
608
+ assert len(results) == 2
609
+ assert results[0]["status"] == "success"
610
+ assert results[1]["status"] == "success"
611
+
612
+ @pytest.mark.asyncio
613
+ async def test_three_agent_pipeline(self, distributed_mesh_pipeline):
614
+ """Three CustomAgents should work in a pipeline."""
615
+ mesh, researcher, writer, reviewer = distributed_mesh_pipeline
616
+
617
+ print("\n" + "="*60)
618
+ print("TEST: Three agent content pipeline")
619
+ print("="*60)
620
+
621
+ results = await mesh.workflow("content-pipeline", [
622
+ {
623
+ "agent": "researcher",
624
+ "task": "Provide 3 interesting facts about machine learning."
625
+ },
626
+ {
627
+ "agent": "writer",
628
+ "task": "Write a brief blog introduction about machine learning mentioning these facts: ML learns from data, it improves over time, and it powers many modern applications."
629
+ },
630
+ {
631
+ "agent": "reviewer",
632
+ "task": "Review this blog intro and suggest one improvement: 'Machine learning is transforming how we interact with technology every day.'"
633
+ }
634
+ ])
635
+
636
+ print(f"\nPipeline results:")
637
+ for i, r in enumerate(results):
638
+ print(f" Step {i+1} [{r['status']}]: {str(r.get('output', ''))[:80]}...")
639
+
640
+ assert len(results) == 3
641
+ successes = sum(1 for r in results if r["status"] == "success")
642
+ assert successes >= 2, "At least 2 steps should succeed"
643
+
644
+
645
+ # ═══════════════════════════════════════════════════════════════════════════════
646
+ # TEST CLASS: CustomAgent with Peer Tools
647
+ # ═══════════════════════════════════════════════════════════════════════════════
648
+
649
+ class TestCustomAgentWithPeerTools:
650
+ """Tests for CustomAgent using peer tools in distributed mode."""
651
+
652
+ @pytest.mark.asyncio
653
+ async def test_customagent_has_peer_tools(self, distributed_mesh_with_peers):
654
+ """CustomAgent should have access to peer tools."""
655
+ mesh, coordinator, researcher, writer = distributed_mesh_with_peers
656
+
657
+ tools = coordinator.get_tools()
658
+ tool_names = [t["name"] for t in tools]
659
+
660
+ print(f"\nCoordinator tools: {tool_names}")
661
+
662
+ assert "ask_peer" in tool_names
663
+ assert "list_peers" in tool_names
664
+ assert "broadcast_update" in tool_names
665
+
666
+ @pytest.mark.asyncio
667
+ async def test_customagent_can_list_peers(self, distributed_mesh_with_peers):
668
+ """CustomAgent should be able to list available peers."""
669
+ mesh, coordinator, researcher, writer = distributed_mesh_with_peers
670
+
671
+ peers = coordinator.peers.list_peers()
672
+ roles = [p["role"] for p in peers]
673
+
674
+ print(f"\nAvailable peers: {roles}")
675
+
676
+ assert "researcher" in roles
677
+ assert "writer" in roles
678
+
679
+ @pytest.mark.asyncio
680
+ async def test_customagent_peer_communication(self, distributed_mesh_with_peers):
681
+ """CustomAgent should communicate with peers via peer tools."""
682
+ mesh, coordinator, researcher, writer = distributed_mesh_with_peers
683
+
684
+ print("\n" + "="*60)
685
+ print("TEST: Peer communication")
686
+ print("="*60)
687
+
688
+ # Start researcher listening
689
+ async def researcher_listener():
690
+ while not researcher.shutdown_requested:
691
+ if researcher.peers:
692
+ msg = await researcher.peers.receive(timeout=0.5)
693
+ if msg and msg.is_request:
694
+ query = msg.data.get("query", "")
695
+ researcher.requests_received.append(query)
696
+ # Respond using LLM
697
+ result = await researcher.execute_task({"task": query})
698
+ await researcher.peers.respond(msg, {"response": result["output"]})
699
+ else:
700
+ await asyncio.sleep(0.1)
701
+
702
+ listener_task = asyncio.create_task(researcher_listener())
703
+ await asyncio.sleep(0.3)
704
+
705
+ try:
706
+ # Coordinator asks researcher for help
707
+ result = await coordinator.peers.as_tool().execute(
708
+ "ask_peer",
709
+ {"role": "researcher", "question": "What is the speed of light?"}
710
+ )
711
+
712
+ print(f"\nPeer response: {result[:200]}...")
713
+
714
+ assert len(result) > 0
715
+ assert len(researcher.requests_received) > 0
716
+
717
+ print("\nPASSED: Peer communication works!")
718
+
719
+ finally:
720
+ researcher.request_shutdown()
721
+ listener_task.cancel()
722
+ try:
723
+ await listener_task
724
+ except asyncio.CancelledError:
725
+ pass
726
+
727
+
728
+ # ═══════════════════════════════════════════════════════════════════════════════
729
+ # MANUAL DEMONSTRATION
730
+ # ═══════════════════════════════════════════════════════════════════════════════
731
+
732
+ async def run_customagent_demo():
733
+ """Demonstrate CustomAgent in distributed mode with real LLM."""
734
+ print("\n" + "="*70)
735
+ print("CUSTOMAGENT DISTRIBUTED MODE DEMONSTRATION")
736
+ print("="*70)
737
+
738
+ mesh = Mesh(mode="distributed", config={'bind_port': 7995})
739
+
740
+ mesh.add(LLMResearchAgent)
741
+ mesh.add(LLMWriterAgent)
742
+ mesh.add(LLMReviewerAgent)
743
+
744
+ print("\n[SETUP] Created distributed mesh with CustomAgents:")
745
+ for agent in mesh.agents:
746
+ print(f" - {agent.role}: {agent.capabilities}")
747
+
748
+ await mesh.start()
749
+ print("\n[STARTED] Mesh running in distributed mode")
750
+
751
+ try:
752
+ # Demo: Content creation pipeline
753
+ print("\n" + "-"*60)
754
+ print("DEMO: Content Creation Pipeline")
755
+ print("-"*60)
756
+
757
+ results = await mesh.workflow("demo-pipeline", [
758
+ {
759
+ "agent": "researcher",
760
+ "task": "Provide 3 key benefits of remote work."
761
+ },
762
+ {
763
+ "agent": "writer",
764
+ "task": "Write a short paragraph promoting remote work using these benefits: flexibility, no commute, work-life balance."
765
+ },
766
+ {
767
+ "agent": "reviewer",
768
+ "task": "Review this paragraph and rate it 1-5: 'Remote work offers unprecedented flexibility, eliminates stressful commutes, and enables better work-life balance.'"
769
+ }
770
+ ])
771
+
772
+ for i, r in enumerate(results):
773
+ print(f"\nStep {i+1} - {r['role'] if 'role' in r else 'unknown'}:")
774
+ print(f" Status: {r['status']}")
775
+ output = str(r.get('output', ''))[:200]
776
+ print(f" Output: {output}...")
777
+
778
+ finally:
779
+ await mesh.stop()
780
+
781
+ print("\n" + "="*70)
782
+ print("DEMONSTRATION COMPLETE")
783
+ print("="*70)
784
+
785
+
786
+ if __name__ == "__main__":
787
+ asyncio.run(run_customagent_demo())