jarviscore-framework 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. examples/autoagent_distributed_example.py +211 -0
  2. examples/custom_profile_decorator.py +134 -0
  3. examples/custom_profile_wrap.py +168 -0
  4. examples/customagent_distributed_example.py +362 -0
  5. examples/customagent_p2p_example.py +347 -0
  6. jarviscore/__init__.py +60 -15
  7. jarviscore/adapter/__init__.py +40 -0
  8. jarviscore/adapter/decorator.py +336 -0
  9. jarviscore/adapter/wrapper.py +303 -0
  10. jarviscore/cli/check.py +18 -13
  11. jarviscore/cli/scaffold.py +178 -0
  12. jarviscore/cli/smoketest.py +3 -2
  13. jarviscore/context/__init__.py +40 -0
  14. jarviscore/context/dependency.py +160 -0
  15. jarviscore/context/jarvis_context.py +207 -0
  16. jarviscore/context/memory.py +155 -0
  17. jarviscore/core/agent.py +44 -1
  18. jarviscore/core/mesh.py +196 -35
  19. jarviscore/data/.env.example +146 -0
  20. jarviscore/data/__init__.py +7 -0
  21. jarviscore/data/examples/autoagent_distributed_example.py +211 -0
  22. jarviscore/data/examples/calculator_agent_example.py +77 -0
  23. jarviscore/data/examples/customagent_distributed_example.py +362 -0
  24. jarviscore/data/examples/customagent_p2p_example.py +347 -0
  25. jarviscore/data/examples/multi_agent_workflow.py +132 -0
  26. jarviscore/data/examples/research_agent_example.py +76 -0
  27. jarviscore/docs/API_REFERENCE.md +264 -51
  28. jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
  29. jarviscore/docs/CONFIGURATION.md +41 -23
  30. jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
  31. jarviscore/docs/GETTING_STARTED.md +113 -17
  32. jarviscore/docs/TROUBLESHOOTING.md +155 -13
  33. jarviscore/docs/USER_GUIDE.md +144 -363
  34. jarviscore/execution/llm.py +23 -16
  35. jarviscore/orchestration/engine.py +20 -8
  36. jarviscore/p2p/__init__.py +10 -0
  37. jarviscore/p2p/coordinator.py +129 -0
  38. jarviscore/p2p/messages.py +87 -0
  39. jarviscore/p2p/peer_client.py +576 -0
  40. jarviscore/p2p/peer_tool.py +268 -0
  41. jarviscore_framework-0.2.0.dist-info/METADATA +143 -0
  42. jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
  43. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
  44. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
  45. test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
  46. test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
  47. test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
  48. test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
  49. test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
  50. test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
  51. test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
  52. test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
  53. test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
  54. test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
  55. test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
  56. test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
  57. test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
  58. test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
  59. test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
  60. test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
  61. test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
  62. test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
  63. test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
  64. test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
  65. test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
  66. test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
  67. test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
  68. test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
  69. test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
  70. test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
  71. test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
  72. test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
  73. test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
  74. test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
  75. test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
  76. test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
  77. test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
  78. test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
  79. test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
  80. test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
  81. test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
  82. test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
  83. test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
  84. tests/test_01_analyst_standalone.py +124 -0
  85. tests/test_02_assistant_standalone.py +164 -0
  86. tests/test_03_analyst_with_framework.py +945 -0
  87. tests/test_04_assistant_with_framework.py +1002 -0
  88. tests/test_05_integration.py +1301 -0
  89. tests/test_06_real_llm_integration.py +760 -0
  90. tests/test_07_distributed_single_node.py +578 -0
  91. tests/test_08_distributed_multi_node.py +454 -0
  92. tests/test_09_distributed_autoagent.py +509 -0
  93. tests/test_10_distributed_customagent.py +787 -0
  94. tests/test_context.py +467 -0
  95. tests/test_decorator.py +622 -0
  96. tests/test_mesh.py +35 -4
  97. jarviscore_framework-0.1.0.dist-info/METADATA +0 -136
  98. jarviscore_framework-0.1.0.dist-info/RECORD +0 -55
  99. {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,347 @@
1
+ """
2
+ CustomAgent P2P Mode Example
3
+
4
+ Demonstrates CustomAgent in pure P2P mode where:
5
+ - Agents run continuously in their own run() loops
6
+ - Agents communicate directly via peer tools (ask_peer, broadcast_update)
7
+ - No centralized workflow orchestration
8
+ - Agents self-coordinate and make their own decisions
9
+
10
+ This is ideal for:
11
+ - Autonomous agent swarms
12
+ - Real-time collaborative systems
13
+ - Event-driven architectures
14
+ - Agents that need to run indefinitely
15
+
16
+ Usage:
17
+ python examples/customagent_p2p_example.py
18
+
19
+ Prerequisites:
20
+ - .env file with LLM API key (CLAUDE_API_KEY, etc.)
21
+ """
22
+ import asyncio
23
+ import sys
24
+ from pathlib import Path
25
+
26
+ sys.path.insert(0, str(Path(__file__).parent.parent))
27
+
28
+ from jarviscore import Mesh
29
+ from jarviscore.profiles import CustomAgent
30
+
31
+
32
+ # ═══════════════════════════════════════════════════════════════════════════════
33
+ # LLM CLIENT (for real LLM integration)
34
+ # ═══════════════════════════════════════════════════════════════════════════════
35
+
36
+ class SimpleLLMClient:
37
+ """Simple LLM client wrapper."""
38
+
39
+ def __init__(self):
40
+ try:
41
+ from anthropic import Anthropic
42
+ from jarviscore.config import settings
43
+
44
+ api_key = settings.claude_api_key
45
+ if not api_key:
46
+ raise RuntimeError("No API key")
47
+
48
+ endpoint = settings.claude_endpoint
49
+ if endpoint:
50
+ self.client = Anthropic(api_key=api_key, base_url=endpoint)
51
+ else:
52
+ self.client = Anthropic(api_key=api_key)
53
+
54
+ self.model = settings.claude_model or "claude-sonnet-4-20250514"
55
+ self.available = True
56
+ except Exception as e:
57
+ print(f"[LLM] Not available: {e}")
58
+ self.available = False
59
+
60
+ def chat(self, message: str, system: str = None) -> str:
61
+ """Simple chat without tools."""
62
+ if not self.available:
63
+ return f"[Mock response to: {message[:50]}...]"
64
+
65
+ kwargs = {
66
+ "model": self.model,
67
+ "max_tokens": 512,
68
+ "messages": [{"role": "user", "content": message}]
69
+ }
70
+ if system:
71
+ kwargs["system"] = system
72
+
73
+ response = self.client.messages.create(**kwargs)
74
+ return response.content[0].text
75
+
76
+
77
+ # ═══════════════════════════════════════════════════════════════════════════════
78
+ # CUSTOMAGENT DEFINITIONS FOR P2P MODE
79
+ # ═══════════════════════════════════════════════════════════════════════════════
80
+
81
+ class ResearcherAgent(CustomAgent):
82
+ """
83
+ Researcher agent that responds to queries from peers.
84
+
85
+ In P2P mode, this agent:
86
+ 1. Runs continuously in its run() loop
87
+ 2. Listens for incoming peer requests
88
+ 3. Processes requests using LLM
89
+ 4. Sends responses back to requesters
90
+ """
91
+ role = "researcher"
92
+ capabilities = ["research", "analysis", "fact_checking"]
93
+
94
+ def __init__(self, agent_id=None):
95
+ super().__init__(agent_id)
96
+ self.llm = None
97
+ self.queries_handled = 0
98
+
99
+ async def setup(self):
100
+ """Initialize LLM client."""
101
+ await super().setup()
102
+ self.llm = SimpleLLMClient()
103
+ self._logger.info(f"[{self.role}] Ready to receive research queries")
104
+
105
+ async def run(self):
106
+ """
107
+ REQUIRED FOR P2P MODE: Continuous run loop.
108
+
109
+ This is the main difference from autonomous/distributed mode.
110
+ The agent runs indefinitely, processing incoming messages.
111
+ """
112
+ self._logger.info(f"[{self.role}] Starting P2P run loop...")
113
+
114
+ while not self.shutdown_requested:
115
+ # Check for incoming peer messages
116
+ if self.peers:
117
+ msg = await self.peers.receive(timeout=0.5)
118
+
119
+ if msg and msg.is_request:
120
+ # Process the research query
121
+ query = msg.data.get("question", msg.data.get("query", ""))
122
+ self._logger.info(f"[{self.role}] Received query: {query[:50]}...")
123
+
124
+ # Use LLM to generate response
125
+ response = self.llm.chat(
126
+ query,
127
+ system="You are a research expert. Provide concise, factual answers."
128
+ )
129
+
130
+ # Send response back to requester
131
+ await self.peers.respond(msg, {"response": response})
132
+ self.queries_handled += 1
133
+ self._logger.info(f"[{self.role}] Responded (total: {self.queries_handled})")
134
+ else:
135
+ await asyncio.sleep(0.1)
136
+
137
+ async def execute_task(self, task):
138
+ """Not used in P2P mode, but required by base class."""
139
+ return {"status": "success", "note": "P2P mode uses run() instead"}
140
+
141
+
142
+ class AssistantAgent(CustomAgent):
143
+ """
144
+ Assistant agent that coordinates with other agents.
145
+
146
+ In P2P mode, this agent:
147
+ 1. Runs in its own loop
148
+ 2. Can ask other agents for help via ask_peer
149
+ 3. Makes decisions about when to delegate
150
+ """
151
+ role = "assistant"
152
+ capabilities = ["coordination", "chat", "delegation"]
153
+
154
+ def __init__(self, agent_id=None):
155
+ super().__init__(agent_id)
156
+ self.llm = None
157
+ self.conversations = []
158
+
159
+ async def setup(self):
160
+ """Initialize LLM client."""
161
+ await super().setup()
162
+ self.llm = SimpleLLMClient()
163
+ self._logger.info(f"[{self.role}] Ready to assist and coordinate")
164
+
165
+ async def ask_researcher(self, question: str) -> str:
166
+ """Ask the researcher agent for help."""
167
+ if not self.peers:
168
+ return "No peers available"
169
+
170
+ result = await self.peers.as_tool().execute(
171
+ "ask_peer",
172
+ {"role": "researcher", "question": question}
173
+ )
174
+ return result
175
+
176
+ async def process_user_input(self, user_input: str) -> str:
177
+ """
178
+ Process user input, potentially delegating to researcher.
179
+
180
+ This demonstrates the P2P communication pattern.
181
+ """
182
+ self._logger.info(f"[{self.role}] Processing: {user_input[:50]}...")
183
+
184
+ # Decide if we need research help
185
+ needs_research = any(word in user_input.lower() for word in
186
+ ["research", "analyze", "fact", "data", "statistics", "study"])
187
+
188
+ if needs_research:
189
+ self._logger.info(f"[{self.role}] Delegating to researcher...")
190
+ research_result = await self.ask_researcher(user_input)
191
+
192
+ # Synthesize final response
193
+ final_response = self.llm.chat(
194
+ f"Based on this research: {research_result}\n\nProvide a helpful summary.",
195
+ system="You are a helpful assistant. Summarize research findings clearly."
196
+ )
197
+ return final_response
198
+ else:
199
+ # Handle directly
200
+ return self.llm.chat(
201
+ user_input,
202
+ system="You are a helpful assistant. Be concise and friendly."
203
+ )
204
+
205
+ async def run(self):
206
+ """
207
+ REQUIRED FOR P2P MODE: Continuous run loop.
208
+
209
+ In a real application, this might listen for:
210
+ - WebSocket connections
211
+ - HTTP requests
212
+ - Message queue events
213
+ - Other peer requests
214
+ """
215
+ self._logger.info(f"[{self.role}] Starting P2P run loop...")
216
+
217
+ while not self.shutdown_requested:
218
+ # In P2P mode, the assistant could:
219
+ # 1. Listen for external triggers (API, websocket, etc.)
220
+ # 2. Respond to peer messages
221
+ # 3. Proactively perform tasks
222
+
223
+ if self.peers:
224
+ msg = await self.peers.receive(timeout=0.5)
225
+ if msg and msg.is_request:
226
+ query = msg.data.get("query", "")
227
+ response = await self.process_user_input(query)
228
+ await self.peers.respond(msg, {"response": response})
229
+ else:
230
+ await asyncio.sleep(0.1)
231
+
232
+ async def execute_task(self, task):
233
+ """Not used in P2P mode, but required by base class."""
234
+ return {"status": "success", "note": "P2P mode uses run() instead"}
235
+
236
+
237
+ # ═══════════════════════════════════════════════════════════════════════════════
238
+ # MAIN EXAMPLE
239
+ # ═══════════════════════════════════════════════════════════════════════════════
240
+
241
+ async def main():
242
+ """Run CustomAgent P2P mode example."""
243
+ print("\n" + "="*70)
244
+ print("JarvisCore: CustomAgent in P2P Mode")
245
+ print("="*70)
246
+
247
+ # ─────────────────────────────────────────────────────────────────────────
248
+ # KEY DIFFERENCE: mode="p2p" - No workflow engine, agents run continuously
249
+ # ─────────────────────────────────────────────────────────────────────────
250
+ mesh = Mesh(
251
+ mode="p2p", # P2P only - no workflow orchestration
252
+ config={
253
+ 'bind_host': '127.0.0.1',
254
+ 'bind_port': 7960,
255
+ 'node_name': 'p2p-demo-node',
256
+ }
257
+ )
258
+
259
+ researcher = mesh.add(ResearcherAgent)
260
+ assistant = mesh.add(AssistantAgent)
261
+
262
+ try:
263
+ await mesh.start()
264
+
265
+ print("\n[INFO] Mesh started in P2P mode")
266
+ print(f" - P2P Coordinator: Active")
267
+ print(f" - Workflow Engine: NOT available (use run_forever instead)")
268
+ print(f" - Agents: {len(mesh.agents)}")
269
+
270
+ # In P2P mode, agents communicate directly
271
+ # Let's demonstrate by having the assistant ask the researcher
272
+
273
+ print("\n" + "-"*70)
274
+ print("Demonstrating P2P Agent Communication")
275
+ print("-"*70)
276
+
277
+ # Give agents time to initialize their peer connections
278
+ await asyncio.sleep(0.5)
279
+
280
+ # Start researcher's run loop in background
281
+ researcher_task = asyncio.create_task(researcher.run())
282
+
283
+ # Give researcher time to start listening
284
+ await asyncio.sleep(0.3)
285
+
286
+ # Simulate user queries that the assistant processes
287
+ test_queries = [
288
+ "Research the benefits of renewable energy",
289
+ "Hello, how are you?", # This won't be delegated
290
+ "Analyze the latest trends in AI development",
291
+ ]
292
+
293
+ for query in test_queries:
294
+ print(f"\n[User] {query}")
295
+ response = await assistant.process_user_input(query)
296
+ print(f"[Assistant] {response[:200]}...")
297
+
298
+ # Show statistics
299
+ print("\n" + "="*70)
300
+ print("P2P Session Statistics")
301
+ print("="*70)
302
+ print(f" Researcher queries handled: {researcher.queries_handled}")
303
+
304
+ # Cleanup
305
+ researcher.request_shutdown()
306
+ researcher_task.cancel()
307
+ try:
308
+ await researcher_task
309
+ except asyncio.CancelledError:
310
+ pass
311
+
312
+ await mesh.stop()
313
+ print("\n[INFO] P2P mesh stopped")
314
+
315
+ except Exception as e:
316
+ print(f"\nError: {e}")
317
+ import traceback
318
+ traceback.print_exc()
319
+
320
+
321
+ # ═══════════════════════════════════════════════════════════════════════════════
322
+ # LONG-RUNNING P2P EXAMPLE
323
+ # ═══════════════════════════════════════════════════════════════════════════════
324
+
325
+ async def run_forever_example():
326
+ """
327
+ Example: Running P2P agents indefinitely.
328
+
329
+ Use mesh.run_forever() to keep all agents running:
330
+
331
+ mesh = Mesh(mode="p2p", config={...})
332
+ mesh.add(ResearcherAgent)
333
+ mesh.add(AssistantAgent)
334
+
335
+ await mesh.start()
336
+ await mesh.run_forever() # Blocks until shutdown signal
337
+
338
+ Agents will run their run() loops continuously until:
339
+ - SIGINT (Ctrl+C)
340
+ - SIGTERM
341
+ - Programmatic shutdown
342
+ """
343
+ pass
344
+
345
+
346
+ if __name__ == "__main__":
347
+ asyncio.run(main())
@@ -0,0 +1,132 @@
1
+ """
2
+ Multi-Agent Workflow Example
3
+
4
+ Demonstrates multiple AutoAgents collaborating on a workflow with dependencies.
5
+ Shows how agents can pass data between steps automatically.
6
+
7
+ Usage:
8
+ python examples/multi_agent_workflow.py
9
+ """
10
+ import asyncio
11
+ import sys
12
+ from pathlib import Path
13
+
14
+ # Add parent directory to path
15
+ sys.path.insert(0, str(Path(__file__).parent.parent))
16
+
17
+ from jarviscore import Mesh
18
+ from jarviscore.profiles import AutoAgent
19
+
20
+
21
+ class DataGeneratorAgent(AutoAgent):
22
+ """Generates sample data."""
23
+ role = "generator"
24
+ capabilities = ["data_generation", "random_data"]
25
+ system_prompt = """
26
+ You are a data generator. Create sample datasets based on specifications.
27
+ Use Python's random module or create structured data.
28
+ Store the generated data in a variable named 'result'.
29
+ """
30
+
31
+
32
+ class DataAnalyzerAgent(AutoAgent):
33
+ """Analyzes data and computes statistics."""
34
+ role = "analyzer"
35
+ capabilities = ["data_analysis", "statistics"]
36
+ system_prompt = """
37
+ You are a data analyst. Analyze datasets and compute statistics.
38
+ Calculate mean, median, standard deviation, and find patterns.
39
+ Store your analysis results in a variable named 'result'.
40
+ """
41
+
42
+
43
+ class ReportGeneratorAgent(AutoAgent):
44
+ """Creates formatted reports."""
45
+ role = "reporter"
46
+ capabilities = ["report_generation", "formatting"]
47
+ system_prompt = """
48
+ You are a report generator. Create well-formatted reports from data.
49
+ Generate markdown or plain text reports with clear sections.
50
+ Store the formatted report in a variable named 'result'.
51
+ """
52
+
53
+
54
+ async def main():
55
+ """Run multi-agent workflow."""
56
+ print("\n" + "="*60)
57
+ print("JarvisCore: Multi-Agent Workflow Example")
58
+ print("="*60)
59
+
60
+ # Zero-config: Reads from .env automatically
61
+ # Framework tries: Claude → Azure → Gemini → vLLM
62
+
63
+ # Create mesh with all agents
64
+ mesh = Mesh(mode="autonomous")
65
+ mesh.add(DataGeneratorAgent)
66
+ mesh.add(DataAnalyzerAgent)
67
+ mesh.add(ReportGeneratorAgent)
68
+
69
+ try:
70
+ await mesh.start()
71
+ print(f"✓ Mesh started with {len(mesh.agents)} agents\n")
72
+
73
+ print("Workflow: Generate → Analyze → Report")
74
+ print("-" * 60)
75
+
76
+ # Execute 3-step workflow with dependencies
77
+ results = await mesh.workflow("data-pipeline", [
78
+ {
79
+ "id": "generate",
80
+ "agent": "generator",
81
+ "task": "Generate a list of 20 random numbers between 1 and 100"
82
+ },
83
+ {
84
+ "id": "analyze",
85
+ "agent": "analyzer",
86
+ "task": "Calculate mean, median, min, max, and standard deviation of the data",
87
+ "depends_on": ["generate"] # Waits for generator to complete
88
+ },
89
+ {
90
+ "id": "report",
91
+ "agent": "reporter",
92
+ "task": "Create a formatted report with the statistics",
93
+ "depends_on": ["analyze"] # Waits for analyzer to complete
94
+ }
95
+ ])
96
+
97
+ # Display results
98
+ print("\n" + "="*60)
99
+ print("RESULTS")
100
+ print("="*60)
101
+
102
+ print(f"\nStep 1 - Data Generation:")
103
+ print(f" Status: {results[0]['status']}")
104
+ print(f" Output: {results[0].get('output')}")
105
+
106
+ print(f"\nStep 2 - Data Analysis:")
107
+ print(f" Status: {results[1]['status']}")
108
+ print(f" Output: {results[1].get('output')}")
109
+
110
+ print(f"\nStep 3 - Report Generation:")
111
+ print(f" Status: {results[2]['status']}")
112
+ print(f" Report:\n{results[2].get('output')}")
113
+
114
+ print(f"\n" + "="*60)
115
+ print("WORKFLOW SUMMARY")
116
+ print("="*60)
117
+ total_repairs = sum(r.get('repairs', 0) for r in results)
118
+ print(f"Total steps: {len(results)}")
119
+ print(f"Successful: {sum(1 for r in results if r['status'] == 'success')}")
120
+ print(f"Total repairs: {total_repairs}")
121
+
122
+ await mesh.stop()
123
+ print("\n✓ Workflow completed\n")
124
+
125
+ except Exception as e:
126
+ print(f"\n✗ Error: {e}")
127
+ import traceback
128
+ traceback.print_exc()
129
+
130
+
131
+ if __name__ == "__main__":
132
+ asyncio.run(main())
@@ -0,0 +1,76 @@
1
+ """
2
+ Research Agent Example - Internet Search & Data Extraction
3
+
4
+ Demonstrates AutoAgent with internet search capabilities.
5
+ Agent automatically gets access to web search tools (DuckDuckGo).
6
+
7
+ Usage:
8
+ python examples/research_agent_example.py
9
+ """
10
+ import asyncio
11
+ import sys
12
+ from pathlib import Path
13
+
14
+ # Add parent directory to path
15
+ sys.path.insert(0, str(Path(__file__).parent.parent))
16
+
17
+ from jarviscore import Mesh
18
+ from jarviscore.profiles import AutoAgent
19
+
20
+
21
+ class ResearchAgent(AutoAgent):
22
+ """Research assistant with internet access."""
23
+ role = "researcher"
24
+ capabilities = ["research", "web_search", "information_gathering"]
25
+ system_prompt = """
26
+ You are a research assistant with internet access.
27
+ Search the web for information and provide concise summaries.
28
+ Use the 'search' object available in your code:
29
+ - await search.search(query, max_results=5)
30
+ - await search.extract_content(url)
31
+ - await search.search_and_extract(query, num_results=3)
32
+ Store your findings in a variable named 'result'.
33
+ """
34
+
35
+
36
+ async def main():
37
+ """Run research agent example."""
38
+ print("\n" + "="*60)
39
+ print("JarvisCore: Research Agent Example")
40
+ print("="*60)
41
+
42
+ # Zero-config: Reads from .env automatically
43
+ # Framework auto-detects: Claude → Azure → Gemini → vLLM
44
+ mesh = Mesh(mode="autonomous")
45
+ mesh.add(ResearchAgent)
46
+
47
+ try:
48
+ await mesh.start()
49
+ print("✓ Mesh started with internet search enabled\n")
50
+
51
+ print("Example: Research Python asyncio")
52
+ print("-" * 60)
53
+
54
+ results = await mesh.workflow("research-asyncio", [
55
+ {
56
+ "agent": "researcher",
57
+ "task": "Search for 'Python asyncio tutorial' and summarize the top 2 results"
58
+ }
59
+ ])
60
+
61
+ result = results[0]
62
+ print(f"Status: {result['status']}")
63
+ print(f"Summary:\n{result.get('output')}")
64
+ print(f"\nRepairs needed: {result.get('repairs', 0)}")
65
+
66
+ await mesh.stop()
67
+ print("\n✓ Research completed\n")
68
+
69
+ except Exception as e:
70
+ print(f"\n✗ Error: {e}")
71
+ import traceback
72
+ traceback.print_exc()
73
+
74
+
75
+ if __name__ == "__main__":
76
+ asyncio.run(main())