jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +730 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +730 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +1362 -0
  21. jarviscore/docs/GETTING_STARTED.md +107 -14
  22. jarviscore/docs/TROUBLESHOOTING.md +145 -7
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. jarviscore_framework-0.2.1.dist-info/METADATA +144 -0
  31. jarviscore_framework-0.2.1.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/METADATA +0 -137
  85. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  86. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,362 @@
1
+ """
2
+ CustomAgent Distributed Mode Example
3
+
4
+ Demonstrates CustomAgent in distributed mode, which combines:
5
+ - P2P network layer (SWIM protocol, ZMQ messaging)
6
+ - Workflow orchestration (step execution, dependencies)
7
+ - User-controlled execution logic (you write execute_task)
8
+
9
+ This is ideal for:
10
+ - Multi-node deployments with custom logic
11
+ - Integrating external frameworks (LangChain, CrewAI, etc.)
12
+ - Complex business logic that needs workflow coordination
13
+ - Agents that need both peer communication AND workflow support
14
+
15
+ Usage:
16
+ python examples/customagent_distributed_example.py
17
+
18
+ Prerequisites:
19
+ - .env file with LLM API key (CLAUDE_API_KEY, etc.)
20
+ """
21
+ import asyncio
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ sys.path.insert(0, str(Path(__file__).parent.parent))
26
+
27
+ from jarviscore import Mesh
28
+ from jarviscore.profiles import CustomAgent
29
+
30
+
31
+ # ═══════════════════════════════════════════════════════════════════════════════
32
+ # LLM CLIENT
33
+ # ═══════════════════════════════════════════════════════════════════════════════
34
+
35
+ class LLMClient:
36
+ """LLM client with tool support for CustomAgent."""
37
+
38
+ def __init__(self):
39
+ try:
40
+ from anthropic import Anthropic
41
+ from jarviscore.config import settings
42
+
43
+ api_key = settings.claude_api_key
44
+ if not api_key:
45
+ raise RuntimeError("No API key")
46
+
47
+ endpoint = settings.claude_endpoint
48
+ if endpoint:
49
+ self.client = Anthropic(api_key=api_key, base_url=endpoint)
50
+ else:
51
+ self.client = Anthropic(api_key=api_key)
52
+
53
+ self.model = settings.claude_model or "claude-sonnet-4-20250514"
54
+ self.available = True
55
+ except Exception as e:
56
+ print(f"[LLM] Not available: {e}")
57
+ self.available = False
58
+
59
+ def chat(self, message: str, system: str = None) -> str:
60
+ """Simple chat."""
61
+ if not self.available:
62
+ return f"[Mock response to: {message[:50]}...]"
63
+
64
+ kwargs = {
65
+ "model": self.model,
66
+ "max_tokens": 1024,
67
+ "messages": [{"role": "user", "content": message}]
68
+ }
69
+ if system:
70
+ kwargs["system"] = system
71
+
72
+ response = self.client.messages.create(**kwargs)
73
+ return response.content[0].text
74
+
75
+
76
+ # ═══════════════════════════════════════════════════════════════════════════════
77
+ # CUSTOMAGENT DEFINITIONS FOR DISTRIBUTED MODE
78
+ # ═══════════════════════════════════════════════════════════════════════════════
79
+
80
+ class ContentResearcherAgent(CustomAgent):
81
+ """
82
+ Researcher that finds information using LLM.
83
+
84
+ In distributed mode, this agent:
85
+ - Executes tasks via execute_task() (called by workflow engine)
86
+ - Can also communicate with peers via self.peers
87
+ - Benefits from workflow dependencies and orchestration
88
+ """
89
+ role = "content_researcher"
90
+ capabilities = ["research", "information_gathering", "fact_finding"]
91
+
92
+ def __init__(self, agent_id=None):
93
+ super().__init__(agent_id)
94
+ self.llm = None
95
+
96
+ async def setup(self):
97
+ """Initialize LLM - called automatically by mesh.start()."""
98
+ await super().setup()
99
+ self.llm = LLMClient()
100
+ self._logger.info(f"[{self.role}] Initialized with LLM")
101
+
102
+ async def execute_task(self, task):
103
+ """
104
+ REQUIRED: Called by workflow engine for each step.
105
+
106
+ Args:
107
+ task: Dict with 'task' key (description) and optional 'context'
108
+
109
+ Returns:
110
+ Dict with 'status', 'output', and optionally 'error'
111
+ """
112
+ task_desc = task.get("task", "")
113
+ context = task.get("context", {})
114
+
115
+ self._logger.info(f"[{self.role}] Researching: {task_desc[:50]}...")
116
+
117
+ # Build prompt with context from previous steps
118
+ prompt = task_desc
119
+ if context:
120
+ prompt = f"Previous context: {context}\n\nTask: {task_desc}"
121
+
122
+ # Use LLM for research
123
+ result = self.llm.chat(
124
+ prompt,
125
+ system="You are an expert researcher. Provide thorough, factual information. Be concise but comprehensive."
126
+ )
127
+
128
+ return {
129
+ "status": "success",
130
+ "output": result,
131
+ "agent_id": self.agent_id,
132
+ "role": self.role
133
+ }
134
+
135
+
136
+ class ContentWriterAgent(CustomAgent):
137
+ """
138
+ Writer that creates content using LLM.
139
+
140
+ Demonstrates:
141
+ - Using context from previous workflow steps
142
+ - Custom execution logic
143
+ - Integration with external LLM
144
+ """
145
+ role = "content_writer"
146
+ capabilities = ["writing", "content_creation", "editing"]
147
+
148
+ def __init__(self, agent_id=None):
149
+ super().__init__(agent_id)
150
+ self.llm = None
151
+
152
+ async def setup(self):
153
+ await super().setup()
154
+ self.llm = LLMClient()
155
+ self._logger.info(f"[{self.role}] Initialized with LLM")
156
+
157
+ async def execute_task(self, task):
158
+ """Write content based on task and context."""
159
+ task_desc = task.get("task", "")
160
+ context = task.get("context", {})
161
+
162
+ self._logger.info(f"[{self.role}] Writing: {task_desc[:50]}...")
163
+
164
+ # Include research context if available
165
+ prompt = task_desc
166
+ if context:
167
+ prompt = f"Use this research:\n{context}\n\nWriting task: {task_desc}"
168
+
169
+ result = self.llm.chat(
170
+ prompt,
171
+ system="You are a professional writer. Create engaging, well-structured content. Use clear language and logical flow."
172
+ )
173
+
174
+ return {
175
+ "status": "success",
176
+ "output": result,
177
+ "agent_id": self.agent_id,
178
+ "role": self.role
179
+ }
180
+
181
+
182
+ class ContentReviewerAgent(CustomAgent):
183
+ """
184
+ Reviewer that provides feedback using LLM.
185
+
186
+ Demonstrates:
187
+ - Quality control in workflows
188
+ - Peer communication capability (can ask other agents)
189
+ """
190
+ role = "content_reviewer"
191
+ capabilities = ["review", "feedback", "quality_assurance"]
192
+
193
+ def __init__(self, agent_id=None):
194
+ super().__init__(agent_id)
195
+ self.llm = None
196
+
197
+ async def setup(self):
198
+ await super().setup()
199
+ self.llm = LLMClient()
200
+ self._logger.info(f"[{self.role}] Initialized with LLM")
201
+
202
+ async def execute_task(self, task):
203
+ """Review content and provide feedback."""
204
+ task_desc = task.get("task", "")
205
+ context = task.get("context", {})
206
+
207
+ self._logger.info(f"[{self.role}] Reviewing: {task_desc[:50]}...")
208
+
209
+ prompt = task_desc
210
+ if context:
211
+ prompt = f"Content to review:\n{context}\n\nReview task: {task_desc}"
212
+
213
+ result = self.llm.chat(
214
+ prompt,
215
+ system="You are a content reviewer. Provide constructive feedback. Highlight strengths and suggest specific improvements."
216
+ )
217
+
218
+ return {
219
+ "status": "success",
220
+ "output": result,
221
+ "agent_id": self.agent_id,
222
+ "role": self.role
223
+ }
224
+
225
+
226
+ # ═══════════════════════════════════════════════════════════════════════════════
227
+ # MAIN EXAMPLE
228
+ # ═══════════════════════════════════════════════════════════════════════════════
229
+
230
+ async def main():
231
+ """Run CustomAgent distributed mode example."""
232
+ print("\n" + "="*70)
233
+ print("JarvisCore: CustomAgent in Distributed Mode")
234
+ print("="*70)
235
+
236
+ # ─────────────────────────────────────────────────────────────────────────
237
+ # KEY: mode="distributed" gives you BOTH P2P AND workflow orchestration
238
+ # ─────────────────────────────────────────────────────────────────────────
239
+ mesh = Mesh(
240
+ mode="distributed",
241
+ config={
242
+ # P2P Network Configuration
243
+ 'bind_host': '127.0.0.1',
244
+ 'bind_port': 7965,
245
+ 'node_name': 'content-team-node',
246
+
247
+ # For multi-node deployment:
248
+ # 'seed_nodes': '192.168.1.10:7965',
249
+ }
250
+ )
251
+
252
+ # Add CustomAgents
253
+ mesh.add(ContentResearcherAgent)
254
+ mesh.add(ContentWriterAgent)
255
+ mesh.add(ContentReviewerAgent)
256
+
257
+ try:
258
+ await mesh.start()
259
+
260
+ print("\n[INFO] Mesh started in DISTRIBUTED mode")
261
+ print(f" - P2P Coordinator: Active (for cross-node communication)")
262
+ print(f" - Workflow Engine: Active (for orchestrated execution)")
263
+ print(f" - Agents: {len(mesh.agents)}")
264
+
265
+ # ─────────────────────────────────────────────────────────────────────
266
+ # WORKFLOW EXECUTION with CustomAgents
267
+ # ─────────────────────────────────────────────────────────────────────
268
+ print("\n" + "-"*70)
269
+ print("Content Pipeline: Research → Write → Review")
270
+ print("-"*70)
271
+
272
+ results = await mesh.workflow("content-pipeline", [
273
+ {
274
+ "id": "research",
275
+ "agent": "content_researcher",
276
+ "task": "Research the key benefits of microservices architecture for modern applications"
277
+ },
278
+ {
279
+ "id": "write",
280
+ "agent": "content_writer",
281
+ "task": "Write a concise blog post introduction about microservices (2-3 paragraphs)",
282
+ "depends_on": ["research"]
283
+ },
284
+ {
285
+ "id": "review",
286
+ "agent": "content_reviewer",
287
+ "task": "Review the blog post and provide 3 specific improvement suggestions",
288
+ "depends_on": ["write"]
289
+ }
290
+ ])
291
+
292
+ # Display results
293
+ print("\n" + "="*70)
294
+ print("PIPELINE RESULTS")
295
+ print("="*70)
296
+
297
+ step_names = ["Research", "Writing", "Review"]
298
+ for i, result in enumerate(results):
299
+ print(f"\n{'─'*70}")
300
+ print(f"Step {i+1}: {step_names[i]}")
301
+ print(f"{'─'*70}")
302
+ print(f"Status: {result['status']}")
303
+ if result['status'] == 'success':
304
+ output = result.get('output', '')
305
+ # Truncate long outputs for display
306
+ if len(output) > 500:
307
+ print(f"Output:\n{output[:500]}...\n[truncated]")
308
+ else:
309
+ print(f"Output:\n{output}")
310
+ else:
311
+ print(f"Error: {result.get('error')}")
312
+
313
+ # Summary
314
+ successes = sum(1 for r in results if r['status'] == 'success')
315
+ print(f"\n{'='*70}")
316
+ print(f"Pipeline Complete: {successes}/{len(results)} steps successful")
317
+ print(f"{'='*70}")
318
+
319
+ await mesh.stop()
320
+
321
+ except Exception as e:
322
+ print(f"\nError: {e}")
323
+ import traceback
324
+ traceback.print_exc()
325
+
326
+
327
+ # ═══════════════════════════════════════════════════════════════════════════════
328
+ # PEER COMMUNICATION IN DISTRIBUTED MODE
329
+ # ═══════════════════════════════════════════════════════════════════════════════
330
+
331
+ async def peer_communication_example():
332
+ """
333
+ Example: Using peer communication in distributed mode.
334
+
335
+ CustomAgents in distributed mode can ALSO use peer tools
336
+ for direct agent-to-agent communication:
337
+
338
+ class SmartWriterAgent(CustomAgent):
339
+ role = "smart_writer"
340
+ capabilities = ["writing"]
341
+
342
+ async def execute_task(self, task):
343
+ # Can ask other agents for help via peers
344
+ if self.peers and "complex" in task.get("task", ""):
345
+ # Ask researcher for additional info
346
+ extra_info = await self.peers.as_tool().execute(
347
+ "ask_peer",
348
+ {"role": "content_researcher", "question": "Give me more context"}
349
+ )
350
+ # Use extra_info in writing...
351
+
352
+ return {"status": "success", "output": "..."}
353
+
354
+ This gives you the best of both worlds:
355
+ - Workflow orchestration for structured pipelines
356
+ - Peer communication for dynamic collaboration
357
+ """
358
+ pass
359
+
360
+
361
+ if __name__ == "__main__":
362
+ asyncio.run(main())