jarviscore-framework 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +347 -0
  6. jarviscore/__init__.py +49 -36
  7. jarviscore/adapter/__init__.py +15 -9
  8. jarviscore/adapter/decorator.py +23 -19
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/scaffold.py +1 -1
  11. jarviscore/cli/smoketest.py +3 -2
  12. jarviscore/core/agent.py +44 -1
  13. jarviscore/core/mesh.py +196 -35
  14. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  15. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  16. jarviscore/data/examples/customagent_p2p_example.py +347 -0
  17. jarviscore/docs/API_REFERENCE.md +264 -51
  18. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  19. jarviscore/docs/CONFIGURATION.md +35 -21
  20. jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
  21. jarviscore/docs/GETTING_STARTED.md +106 -13
  22. jarviscore/docs/TROUBLESHOOTING.md +144 -6
  23. jarviscore/docs/USER_GUIDE.md +138 -361
  24. jarviscore/orchestration/engine.py +20 -8
  25. jarviscore/p2p/__init__.py +10 -0
  26. jarviscore/p2p/coordinator.py +129 -0
  27. jarviscore/p2p/messages.py +87 -0
  28. jarviscore/p2p/peer_client.py +576 -0
  29. jarviscore/p2p/peer_tool.py +268 -0
  30. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/METADATA +60 -54
  31. jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
  32. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
  33. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
  34. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  35. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  36. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  37. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  38. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  39. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  40. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  41. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  42. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  43. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  44. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  45. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  46. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  47. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  48. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  49. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  50. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  51. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  52. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  53. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  54. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  55. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  56. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  57. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  60. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  61. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  62. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  63. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  64. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  65. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  66. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  67. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  68. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  69. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  70. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  71. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  72. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  73. tests/test_01_analyst_standalone.py +124 -0
  74. tests/test_02_assistant_standalone.py +164 -0
  75. tests/test_03_analyst_with_framework.py +945 -0
  76. tests/test_04_assistant_with_framework.py +1002 -0
  77. tests/test_05_integration.py +1301 -0
  78. tests/test_06_real_llm_integration.py +760 -0
  79. tests/test_07_distributed_single_node.py +578 -0
  80. tests/test_08_distributed_multi_node.py +454 -0
  81. tests/test_09_distributed_autoagent.py +509 -0
  82. tests/test_10_distributed_customagent.py +787 -0
  83. tests/test_mesh.py +35 -4
  84. jarviscore_framework-0.1.1.dist-info/RECORD +0 -69
  85. {jarviscore_framework-0.1.1.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,347 @@
1
+ """
2
+ CustomAgent P2P Mode Example
3
+
4
+ Demonstrates CustomAgent in pure P2P mode where:
5
+ - Agents run continuously in their own run() loops
6
+ - Agents communicate directly via peer tools (ask_peer, broadcast_update)
7
+ - No centralized workflow orchestration
8
+ - Agents self-coordinate and make their own decisions
9
+
10
+ This is ideal for:
11
+ - Autonomous agent swarms
12
+ - Real-time collaborative systems
13
+ - Event-driven architectures
14
+ - Agents that need to run indefinitely
15
+
16
+ Usage:
17
+ python examples/customagent_p2p_example.py
18
+
19
+ Prerequisites:
20
+ - .env file with LLM API key (CLAUDE_API_KEY, etc.)
21
+ """
22
+ import asyncio
23
+ import sys
24
+ from pathlib import Path
25
+
26
+ sys.path.insert(0, str(Path(__file__).parent.parent))
27
+
28
+ from jarviscore import Mesh
29
+ from jarviscore.profiles import CustomAgent
30
+
31
+
32
+ # ═══════════════════════════════════════════════════════════════════════════════
33
+ # LLM CLIENT (for real LLM integration)
34
+ # ═══════════════════════════════════════════════════════════════════════════════
35
+
36
+ class SimpleLLMClient:
37
+ """Simple LLM client wrapper."""
38
+
39
+ def __init__(self):
40
+ try:
41
+ from anthropic import Anthropic
42
+ from jarviscore.config import settings
43
+
44
+ api_key = settings.claude_api_key
45
+ if not api_key:
46
+ raise RuntimeError("No API key")
47
+
48
+ endpoint = settings.claude_endpoint
49
+ if endpoint:
50
+ self.client = Anthropic(api_key=api_key, base_url=endpoint)
51
+ else:
52
+ self.client = Anthropic(api_key=api_key)
53
+
54
+ self.model = settings.claude_model or "claude-sonnet-4-20250514"
55
+ self.available = True
56
+ except Exception as e:
57
+ print(f"[LLM] Not available: {e}")
58
+ self.available = False
59
+
60
+ def chat(self, message: str, system: str = None) -> str:
61
+ """Simple chat without tools."""
62
+ if not self.available:
63
+ return f"[Mock response to: {message[:50]}...]"
64
+
65
+ kwargs = {
66
+ "model": self.model,
67
+ "max_tokens": 512,
68
+ "messages": [{"role": "user", "content": message}]
69
+ }
70
+ if system:
71
+ kwargs["system"] = system
72
+
73
+ response = self.client.messages.create(**kwargs)
74
+ return response.content[0].text
75
+
76
+
77
+ # ═══════════════════════════════════════════════════════════════════════════════
78
+ # CUSTOMAGENT DEFINITIONS FOR P2P MODE
79
+ # ═══════════════════════════════════════════════════════════════════════════════
80
+
81
+ class ResearcherAgent(CustomAgent):
82
+ """
83
+ Researcher agent that responds to queries from peers.
84
+
85
+ In P2P mode, this agent:
86
+ 1. Runs continuously in its run() loop
87
+ 2. Listens for incoming peer requests
88
+ 3. Processes requests using LLM
89
+ 4. Sends responses back to requesters
90
+ """
91
+ role = "researcher"
92
+ capabilities = ["research", "analysis", "fact_checking"]
93
+
94
+ def __init__(self, agent_id=None):
95
+ super().__init__(agent_id)
96
+ self.llm = None
97
+ self.queries_handled = 0
98
+
99
+ async def setup(self):
100
+ """Initialize LLM client."""
101
+ await super().setup()
102
+ self.llm = SimpleLLMClient()
103
+ self._logger.info(f"[{self.role}] Ready to receive research queries")
104
+
105
+ async def run(self):
106
+ """
107
+ REQUIRED FOR P2P MODE: Continuous run loop.
108
+
109
+ This is the main difference from autonomous/distributed mode.
110
+ The agent runs indefinitely, processing incoming messages.
111
+ """
112
+ self._logger.info(f"[{self.role}] Starting P2P run loop...")
113
+
114
+ while not self.shutdown_requested:
115
+ # Check for incoming peer messages
116
+ if self.peers:
117
+ msg = await self.peers.receive(timeout=0.5)
118
+
119
+ if msg and msg.is_request:
120
+ # Process the research query
121
+ query = msg.data.get("question", msg.data.get("query", ""))
122
+ self._logger.info(f"[{self.role}] Received query: {query[:50]}...")
123
+
124
+ # Use LLM to generate response
125
+ response = self.llm.chat(
126
+ query,
127
+ system="You are a research expert. Provide concise, factual answers."
128
+ )
129
+
130
+ # Send response back to requester
131
+ await self.peers.respond(msg, {"response": response})
132
+ self.queries_handled += 1
133
+ self._logger.info(f"[{self.role}] Responded (total: {self.queries_handled})")
134
+ else:
135
+ await asyncio.sleep(0.1)
136
+
137
+ async def execute_task(self, task):
138
+ """Not used in P2P mode, but required by base class."""
139
+ return {"status": "success", "note": "P2P mode uses run() instead"}
140
+
141
+
142
+ class AssistantAgent(CustomAgent):
143
+ """
144
+ Assistant agent that coordinates with other agents.
145
+
146
+ In P2P mode, this agent:
147
+ 1. Runs in its own loop
148
+ 2. Can ask other agents for help via ask_peer
149
+ 3. Makes decisions about when to delegate
150
+ """
151
+ role = "assistant"
152
+ capabilities = ["coordination", "chat", "delegation"]
153
+
154
+ def __init__(self, agent_id=None):
155
+ super().__init__(agent_id)
156
+ self.llm = None
157
+ self.conversations = []
158
+
159
+ async def setup(self):
160
+ """Initialize LLM client."""
161
+ await super().setup()
162
+ self.llm = SimpleLLMClient()
163
+ self._logger.info(f"[{self.role}] Ready to assist and coordinate")
164
+
165
+ async def ask_researcher(self, question: str) -> str:
166
+ """Ask the researcher agent for help."""
167
+ if not self.peers:
168
+ return "No peers available"
169
+
170
+ result = await self.peers.as_tool().execute(
171
+ "ask_peer",
172
+ {"role": "researcher", "question": question}
173
+ )
174
+ return result
175
+
176
+ async def process_user_input(self, user_input: str) -> str:
177
+ """
178
+ Process user input, potentially delegating to researcher.
179
+
180
+ This demonstrates the P2P communication pattern.
181
+ """
182
+ self._logger.info(f"[{self.role}] Processing: {user_input[:50]}...")
183
+
184
+ # Decide if we need research help
185
+ needs_research = any(word in user_input.lower() for word in
186
+ ["research", "analyze", "fact", "data", "statistics", "study"])
187
+
188
+ if needs_research:
189
+ self._logger.info(f"[{self.role}] Delegating to researcher...")
190
+ research_result = await self.ask_researcher(user_input)
191
+
192
+ # Synthesize final response
193
+ final_response = self.llm.chat(
194
+ f"Based on this research: {research_result}\n\nProvide a helpful summary.",
195
+ system="You are a helpful assistant. Summarize research findings clearly."
196
+ )
197
+ return final_response
198
+ else:
199
+ # Handle directly
200
+ return self.llm.chat(
201
+ user_input,
202
+ system="You are a helpful assistant. Be concise and friendly."
203
+ )
204
+
205
+ async def run(self):
206
+ """
207
+ REQUIRED FOR P2P MODE: Continuous run loop.
208
+
209
+ In a real application, this might listen for:
210
+ - WebSocket connections
211
+ - HTTP requests
212
+ - Message queue events
213
+ - Other peer requests
214
+ """
215
+ self._logger.info(f"[{self.role}] Starting P2P run loop...")
216
+
217
+ while not self.shutdown_requested:
218
+ # In P2P mode, the assistant could:
219
+ # 1. Listen for external triggers (API, websocket, etc.)
220
+ # 2. Respond to peer messages
221
+ # 3. Proactively perform tasks
222
+
223
+ if self.peers:
224
+ msg = await self.peers.receive(timeout=0.5)
225
+ if msg and msg.is_request:
226
+ query = msg.data.get("query", "")
227
+ response = await self.process_user_input(query)
228
+ await self.peers.respond(msg, {"response": response})
229
+ else:
230
+ await asyncio.sleep(0.1)
231
+
232
+ async def execute_task(self, task):
233
+ """Not used in P2P mode, but required by base class."""
234
+ return {"status": "success", "note": "P2P mode uses run() instead"}
235
+
236
+
237
+ # ═══════════════════════════════════════════════════════════════════════════════
238
+ # MAIN EXAMPLE
239
+ # ═══════════════════════════════════════════════════════════════════════════════
240
+
241
+ async def main():
242
+ """Run CustomAgent P2P mode example."""
243
+ print("\n" + "="*70)
244
+ print("JarvisCore: CustomAgent in P2P Mode")
245
+ print("="*70)
246
+
247
+ # ─────────────────────────────────────────────────────────────────────────
248
+ # KEY DIFFERENCE: mode="p2p" - No workflow engine, agents run continuously
249
+ # ─────────────────────────────────────────────────────────────────────────
250
+ mesh = Mesh(
251
+ mode="p2p", # P2P only - no workflow orchestration
252
+ config={
253
+ 'bind_host': '127.0.0.1',
254
+ 'bind_port': 7960,
255
+ 'node_name': 'p2p-demo-node',
256
+ }
257
+ )
258
+
259
+ researcher = mesh.add(ResearcherAgent)
260
+ assistant = mesh.add(AssistantAgent)
261
+
262
+ try:
263
+ await mesh.start()
264
+
265
+ print("\n[INFO] Mesh started in P2P mode")
266
+ print(f" - P2P Coordinator: Active")
267
+ print(f" - Workflow Engine: NOT available (use run_forever instead)")
268
+ print(f" - Agents: {len(mesh.agents)}")
269
+
270
+ # In P2P mode, agents communicate directly
271
+ # Let's demonstrate by having the assistant ask the researcher
272
+
273
+ print("\n" + "-"*70)
274
+ print("Demonstrating P2P Agent Communication")
275
+ print("-"*70)
276
+
277
+ # Give agents time to initialize their peer connections
278
+ await asyncio.sleep(0.5)
279
+
280
+ # Start researcher's run loop in background
281
+ researcher_task = asyncio.create_task(researcher.run())
282
+
283
+ # Give researcher time to start listening
284
+ await asyncio.sleep(0.3)
285
+
286
+ # Simulate user queries that the assistant processes
287
+ test_queries = [
288
+ "Research the benefits of renewable energy",
289
+ "Hello, how are you?", # This won't be delegated
290
+ "Analyze the latest trends in AI development",
291
+ ]
292
+
293
+ for query in test_queries:
294
+ print(f"\n[User] {query}")
295
+ response = await assistant.process_user_input(query)
296
+ print(f"[Assistant] {response[:200]}...")
297
+
298
+ # Show statistics
299
+ print("\n" + "="*70)
300
+ print("P2P Session Statistics")
301
+ print("="*70)
302
+ print(f" Researcher queries handled: {researcher.queries_handled}")
303
+
304
+ # Cleanup
305
+ researcher.request_shutdown()
306
+ researcher_task.cancel()
307
+ try:
308
+ await researcher_task
309
+ except asyncio.CancelledError:
310
+ pass
311
+
312
+ await mesh.stop()
313
+ print("\n[INFO] P2P mesh stopped")
314
+
315
+ except Exception as e:
316
+ print(f"\nError: {e}")
317
+ import traceback
318
+ traceback.print_exc()
319
+
320
+
321
+ # ═══════════════════════════════════════════════════════════════════════════════
322
+ # LONG-RUNNING P2P EXAMPLE
323
+ # ═══════════════════════════════════════════════════════════════════════════════
324
+
325
+ async def run_forever_example():
326
+ """
327
+ Example: Running P2P agents indefinitely.
328
+
329
+ Use mesh.run_forever() to keep all agents running:
330
+
331
+ mesh = Mesh(mode="p2p", config={...})
332
+ mesh.add(ResearcherAgent)
333
+ mesh.add(AssistantAgent)
334
+
335
+ await mesh.start()
336
+ await mesh.run_forever() # Blocks until shutdown signal
337
+
338
+ Agents will run their run() loops continuously until:
339
+ - SIGINT (Ctrl+C)
340
+ - SIGTERM
341
+ - Programmatic shutdown
342
+ """
343
+ pass
344
+
345
+
346
+ if __name__ == "__main__":
347
+ asyncio.run(main())