jarviscore-framework 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/autoagent_distributed_example.py +211 -0
- examples/custom_profile_decorator.py +134 -0
- examples/custom_profile_wrap.py +168 -0
- examples/customagent_distributed_example.py +362 -0
- examples/customagent_p2p_example.py +347 -0
- jarviscore/__init__.py +60 -15
- jarviscore/adapter/__init__.py +40 -0
- jarviscore/adapter/decorator.py +336 -0
- jarviscore/adapter/wrapper.py +303 -0
- jarviscore/cli/check.py +18 -13
- jarviscore/cli/scaffold.py +178 -0
- jarviscore/cli/smoketest.py +3 -2
- jarviscore/context/__init__.py +40 -0
- jarviscore/context/dependency.py +160 -0
- jarviscore/context/jarvis_context.py +207 -0
- jarviscore/context/memory.py +155 -0
- jarviscore/core/agent.py +44 -1
- jarviscore/core/mesh.py +196 -35
- jarviscore/data/.env.example +146 -0
- jarviscore/data/__init__.py +7 -0
- jarviscore/data/examples/autoagent_distributed_example.py +211 -0
- jarviscore/data/examples/calculator_agent_example.py +77 -0
- jarviscore/data/examples/customagent_distributed_example.py +362 -0
- jarviscore/data/examples/customagent_p2p_example.py +347 -0
- jarviscore/data/examples/multi_agent_workflow.py +132 -0
- jarviscore/data/examples/research_agent_example.py +76 -0
- jarviscore/docs/API_REFERENCE.md +264 -51
- jarviscore/docs/AUTOAGENT_GUIDE.md +198 -0
- jarviscore/docs/CONFIGURATION.md +41 -23
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +415 -0
- jarviscore/docs/GETTING_STARTED.md +113 -17
- jarviscore/docs/TROUBLESHOOTING.md +155 -13
- jarviscore/docs/USER_GUIDE.md +144 -363
- jarviscore/execution/llm.py +23 -16
- jarviscore/orchestration/engine.py +20 -8
- jarviscore/p2p/__init__.py +10 -0
- jarviscore/p2p/coordinator.py +129 -0
- jarviscore/p2p/messages.py +87 -0
- jarviscore/p2p/peer_client.py +576 -0
- jarviscore/p2p/peer_tool.py +268 -0
- jarviscore_framework-0.2.0.dist-info/METADATA +143 -0
- jarviscore_framework-0.2.0.dist-info/RECORD +132 -0
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/WHEEL +1 -1
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/top_level.txt +1 -0
- test_logs/code_registry/functions/data_generator-558779ed_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-5ed3609e_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-66da0356_43970bb9.py +25 -0
- test_logs/code_registry/functions/data_generator-7a2fac83_583709d9.py +36 -0
- test_logs/code_registry/functions/data_generator-888b670f_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-9ca5f642_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-bfd90775_560ebc37.py +7 -0
- test_logs/code_registry/functions/data_generator-e95d2f7d_aa235863.py +9 -0
- test_logs/code_registry/functions/data_generator-f60ca8a2_327eb8c2.py +29 -0
- test_logs/code_registry/functions/mathematician-02adf9ee_958658d9.py +19 -0
- test_logs/code_registry/functions/mathematician-0706fb57_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-153c9c4a_ba59c918.py +83 -0
- test_logs/code_registry/functions/mathematician-287e61c0_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-2967af5a_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-303ca6d6_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-308a4afd_cbf5064d.py +73 -0
- test_logs/code_registry/functions/mathematician-353f16e2_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-3c22475a_41daa793.py +17 -0
- test_logs/code_registry/functions/mathematician-5bac1029_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-640f76b2_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-752fa7ea_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-baf9ef39_0968bcf5.py +18 -0
- test_logs/code_registry/functions/mathematician-bc8b2a2f_5df13441.py +23 -0
- test_logs/code_registry/functions/mathematician-c31e4686_41daa793.py +18 -0
- test_logs/code_registry/functions/mathematician-cc84c84c_863c2cc6.py +17 -0
- test_logs/code_registry/functions/mathematician-dd7c7144_9198780b.py +19 -0
- test_logs/code_registry/functions/mathematician-e671c256_41ea4487.py +74 -0
- test_logs/code_registry/functions/report_generator-1a878fcc_18d44bdc.py +47 -0
- test_logs/code_registry/functions/report_generator-25c1c331_cea57d0d.py +35 -0
- test_logs/code_registry/functions/report_generator-37552117_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-bc662768_e711c2b9.py +35 -0
- test_logs/code_registry/functions/report_generator-d6c0e76b_5e7722ec.py +44 -0
- test_logs/code_registry/functions/report_generator-f270fb02_680529c3.py +44 -0
- test_logs/code_registry/functions/text_processor-11393b14_4370d3ed.py +40 -0
- test_logs/code_registry/functions/text_processor-7d02dfc3_d3b569be.py +37 -0
- test_logs/code_registry/functions/text_processor-8adb5e32_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-c58ffc19_78b4ceac.py +42 -0
- test_logs/code_registry/functions/text_processor-cd5977b1_9168c5fe.py +13 -0
- test_logs/code_registry/functions/text_processor-ec1c8773_9168c5fe.py +13 -0
- tests/test_01_analyst_standalone.py +124 -0
- tests/test_02_assistant_standalone.py +164 -0
- tests/test_03_analyst_with_framework.py +945 -0
- tests/test_04_assistant_with_framework.py +1002 -0
- tests/test_05_integration.py +1301 -0
- tests/test_06_real_llm_integration.py +760 -0
- tests/test_07_distributed_single_node.py +578 -0
- tests/test_08_distributed_multi_node.py +454 -0
- tests/test_09_distributed_autoagent.py +509 -0
- tests/test_10_distributed_customagent.py +787 -0
- tests/test_context.py +467 -0
- tests/test_decorator.py +622 -0
- tests/test_mesh.py +35 -4
- jarviscore_framework-0.1.0.dist-info/METADATA +0 -136
- jarviscore_framework-0.1.0.dist-info/RECORD +0 -55
- {jarviscore_framework-0.1.0.dist-info → jarviscore_framework-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CustomAgent P2P Mode Example
|
|
3
|
+
|
|
4
|
+
Demonstrates CustomAgent in pure P2P mode where:
|
|
5
|
+
- Agents run continuously in their own run() loops
|
|
6
|
+
- Agents communicate directly via peer tools (ask_peer, broadcast_update)
|
|
7
|
+
- No centralized workflow orchestration
|
|
8
|
+
- Agents self-coordinate and make their own decisions
|
|
9
|
+
|
|
10
|
+
This is ideal for:
|
|
11
|
+
- Autonomous agent swarms
|
|
12
|
+
- Real-time collaborative systems
|
|
13
|
+
- Event-driven architectures
|
|
14
|
+
- Agents that need to run indefinitely
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
python examples/customagent_p2p_example.py
|
|
18
|
+
|
|
19
|
+
Prerequisites:
|
|
20
|
+
- .env file with LLM API key (CLAUDE_API_KEY, etc.)
|
|
21
|
+
"""
|
|
22
|
+
import asyncio
|
|
23
|
+
import sys
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
|
|
26
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
27
|
+
|
|
28
|
+
from jarviscore import Mesh
|
|
29
|
+
from jarviscore.profiles import CustomAgent
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
33
|
+
# LLM CLIENT (for real LLM integration)
|
|
34
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
35
|
+
|
|
36
|
+
class SimpleLLMClient:
|
|
37
|
+
"""Simple LLM client wrapper."""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
try:
|
|
41
|
+
from anthropic import Anthropic
|
|
42
|
+
from jarviscore.config import settings
|
|
43
|
+
|
|
44
|
+
api_key = settings.claude_api_key
|
|
45
|
+
if not api_key:
|
|
46
|
+
raise RuntimeError("No API key")
|
|
47
|
+
|
|
48
|
+
endpoint = settings.claude_endpoint
|
|
49
|
+
if endpoint:
|
|
50
|
+
self.client = Anthropic(api_key=api_key, base_url=endpoint)
|
|
51
|
+
else:
|
|
52
|
+
self.client = Anthropic(api_key=api_key)
|
|
53
|
+
|
|
54
|
+
self.model = settings.claude_model or "claude-sonnet-4-20250514"
|
|
55
|
+
self.available = True
|
|
56
|
+
except Exception as e:
|
|
57
|
+
print(f"[LLM] Not available: {e}")
|
|
58
|
+
self.available = False
|
|
59
|
+
|
|
60
|
+
def chat(self, message: str, system: str = None) -> str:
|
|
61
|
+
"""Simple chat without tools."""
|
|
62
|
+
if not self.available:
|
|
63
|
+
return f"[Mock response to: {message[:50]}...]"
|
|
64
|
+
|
|
65
|
+
kwargs = {
|
|
66
|
+
"model": self.model,
|
|
67
|
+
"max_tokens": 512,
|
|
68
|
+
"messages": [{"role": "user", "content": message}]
|
|
69
|
+
}
|
|
70
|
+
if system:
|
|
71
|
+
kwargs["system"] = system
|
|
72
|
+
|
|
73
|
+
response = self.client.messages.create(**kwargs)
|
|
74
|
+
return response.content[0].text
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
78
|
+
# CUSTOMAGENT DEFINITIONS FOR P2P MODE
|
|
79
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
80
|
+
|
|
81
|
+
class ResearcherAgent(CustomAgent):
|
|
82
|
+
"""
|
|
83
|
+
Researcher agent that responds to queries from peers.
|
|
84
|
+
|
|
85
|
+
In P2P mode, this agent:
|
|
86
|
+
1. Runs continuously in its run() loop
|
|
87
|
+
2. Listens for incoming peer requests
|
|
88
|
+
3. Processes requests using LLM
|
|
89
|
+
4. Sends responses back to requesters
|
|
90
|
+
"""
|
|
91
|
+
role = "researcher"
|
|
92
|
+
capabilities = ["research", "analysis", "fact_checking"]
|
|
93
|
+
|
|
94
|
+
def __init__(self, agent_id=None):
|
|
95
|
+
super().__init__(agent_id)
|
|
96
|
+
self.llm = None
|
|
97
|
+
self.queries_handled = 0
|
|
98
|
+
|
|
99
|
+
async def setup(self):
|
|
100
|
+
"""Initialize LLM client."""
|
|
101
|
+
await super().setup()
|
|
102
|
+
self.llm = SimpleLLMClient()
|
|
103
|
+
self._logger.info(f"[{self.role}] Ready to receive research queries")
|
|
104
|
+
|
|
105
|
+
async def run(self):
|
|
106
|
+
"""
|
|
107
|
+
REQUIRED FOR P2P MODE: Continuous run loop.
|
|
108
|
+
|
|
109
|
+
This is the main difference from autonomous/distributed mode.
|
|
110
|
+
The agent runs indefinitely, processing incoming messages.
|
|
111
|
+
"""
|
|
112
|
+
self._logger.info(f"[{self.role}] Starting P2P run loop...")
|
|
113
|
+
|
|
114
|
+
while not self.shutdown_requested:
|
|
115
|
+
# Check for incoming peer messages
|
|
116
|
+
if self.peers:
|
|
117
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
118
|
+
|
|
119
|
+
if msg and msg.is_request:
|
|
120
|
+
# Process the research query
|
|
121
|
+
query = msg.data.get("question", msg.data.get("query", ""))
|
|
122
|
+
self._logger.info(f"[{self.role}] Received query: {query[:50]}...")
|
|
123
|
+
|
|
124
|
+
# Use LLM to generate response
|
|
125
|
+
response = self.llm.chat(
|
|
126
|
+
query,
|
|
127
|
+
system="You are a research expert. Provide concise, factual answers."
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Send response back to requester
|
|
131
|
+
await self.peers.respond(msg, {"response": response})
|
|
132
|
+
self.queries_handled += 1
|
|
133
|
+
self._logger.info(f"[{self.role}] Responded (total: {self.queries_handled})")
|
|
134
|
+
else:
|
|
135
|
+
await asyncio.sleep(0.1)
|
|
136
|
+
|
|
137
|
+
async def execute_task(self, task):
|
|
138
|
+
"""Not used in P2P mode, but required by base class."""
|
|
139
|
+
return {"status": "success", "note": "P2P mode uses run() instead"}
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class AssistantAgent(CustomAgent):
|
|
143
|
+
"""
|
|
144
|
+
Assistant agent that coordinates with other agents.
|
|
145
|
+
|
|
146
|
+
In P2P mode, this agent:
|
|
147
|
+
1. Runs in its own loop
|
|
148
|
+
2. Can ask other agents for help via ask_peer
|
|
149
|
+
3. Makes decisions about when to delegate
|
|
150
|
+
"""
|
|
151
|
+
role = "assistant"
|
|
152
|
+
capabilities = ["coordination", "chat", "delegation"]
|
|
153
|
+
|
|
154
|
+
def __init__(self, agent_id=None):
|
|
155
|
+
super().__init__(agent_id)
|
|
156
|
+
self.llm = None
|
|
157
|
+
self.conversations = []
|
|
158
|
+
|
|
159
|
+
async def setup(self):
|
|
160
|
+
"""Initialize LLM client."""
|
|
161
|
+
await super().setup()
|
|
162
|
+
self.llm = SimpleLLMClient()
|
|
163
|
+
self._logger.info(f"[{self.role}] Ready to assist and coordinate")
|
|
164
|
+
|
|
165
|
+
async def ask_researcher(self, question: str) -> str:
|
|
166
|
+
"""Ask the researcher agent for help."""
|
|
167
|
+
if not self.peers:
|
|
168
|
+
return "No peers available"
|
|
169
|
+
|
|
170
|
+
result = await self.peers.as_tool().execute(
|
|
171
|
+
"ask_peer",
|
|
172
|
+
{"role": "researcher", "question": question}
|
|
173
|
+
)
|
|
174
|
+
return result
|
|
175
|
+
|
|
176
|
+
async def process_user_input(self, user_input: str) -> str:
|
|
177
|
+
"""
|
|
178
|
+
Process user input, potentially delegating to researcher.
|
|
179
|
+
|
|
180
|
+
This demonstrates the P2P communication pattern.
|
|
181
|
+
"""
|
|
182
|
+
self._logger.info(f"[{self.role}] Processing: {user_input[:50]}...")
|
|
183
|
+
|
|
184
|
+
# Decide if we need research help
|
|
185
|
+
needs_research = any(word in user_input.lower() for word in
|
|
186
|
+
["research", "analyze", "fact", "data", "statistics", "study"])
|
|
187
|
+
|
|
188
|
+
if needs_research:
|
|
189
|
+
self._logger.info(f"[{self.role}] Delegating to researcher...")
|
|
190
|
+
research_result = await self.ask_researcher(user_input)
|
|
191
|
+
|
|
192
|
+
# Synthesize final response
|
|
193
|
+
final_response = self.llm.chat(
|
|
194
|
+
f"Based on this research: {research_result}\n\nProvide a helpful summary.",
|
|
195
|
+
system="You are a helpful assistant. Summarize research findings clearly."
|
|
196
|
+
)
|
|
197
|
+
return final_response
|
|
198
|
+
else:
|
|
199
|
+
# Handle directly
|
|
200
|
+
return self.llm.chat(
|
|
201
|
+
user_input,
|
|
202
|
+
system="You are a helpful assistant. Be concise and friendly."
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
async def run(self):
|
|
206
|
+
"""
|
|
207
|
+
REQUIRED FOR P2P MODE: Continuous run loop.
|
|
208
|
+
|
|
209
|
+
In a real application, this might listen for:
|
|
210
|
+
- WebSocket connections
|
|
211
|
+
- HTTP requests
|
|
212
|
+
- Message queue events
|
|
213
|
+
- Other peer requests
|
|
214
|
+
"""
|
|
215
|
+
self._logger.info(f"[{self.role}] Starting P2P run loop...")
|
|
216
|
+
|
|
217
|
+
while not self.shutdown_requested:
|
|
218
|
+
# In P2P mode, the assistant could:
|
|
219
|
+
# 1. Listen for external triggers (API, websocket, etc.)
|
|
220
|
+
# 2. Respond to peer messages
|
|
221
|
+
# 3. Proactively perform tasks
|
|
222
|
+
|
|
223
|
+
if self.peers:
|
|
224
|
+
msg = await self.peers.receive(timeout=0.5)
|
|
225
|
+
if msg and msg.is_request:
|
|
226
|
+
query = msg.data.get("query", "")
|
|
227
|
+
response = await self.process_user_input(query)
|
|
228
|
+
await self.peers.respond(msg, {"response": response})
|
|
229
|
+
else:
|
|
230
|
+
await asyncio.sleep(0.1)
|
|
231
|
+
|
|
232
|
+
async def execute_task(self, task):
|
|
233
|
+
"""Not used in P2P mode, but required by base class."""
|
|
234
|
+
return {"status": "success", "note": "P2P mode uses run() instead"}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
238
|
+
# MAIN EXAMPLE
|
|
239
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
240
|
+
|
|
241
|
+
async def main():
|
|
242
|
+
"""Run CustomAgent P2P mode example."""
|
|
243
|
+
print("\n" + "="*70)
|
|
244
|
+
print("JarvisCore: CustomAgent in P2P Mode")
|
|
245
|
+
print("="*70)
|
|
246
|
+
|
|
247
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
248
|
+
# KEY DIFFERENCE: mode="p2p" - No workflow engine, agents run continuously
|
|
249
|
+
# ─────────────────────────────────────────────────────────────────────────
|
|
250
|
+
mesh = Mesh(
|
|
251
|
+
mode="p2p", # P2P only - no workflow orchestration
|
|
252
|
+
config={
|
|
253
|
+
'bind_host': '127.0.0.1',
|
|
254
|
+
'bind_port': 7960,
|
|
255
|
+
'node_name': 'p2p-demo-node',
|
|
256
|
+
}
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
researcher = mesh.add(ResearcherAgent)
|
|
260
|
+
assistant = mesh.add(AssistantAgent)
|
|
261
|
+
|
|
262
|
+
try:
|
|
263
|
+
await mesh.start()
|
|
264
|
+
|
|
265
|
+
print("\n[INFO] Mesh started in P2P mode")
|
|
266
|
+
print(f" - P2P Coordinator: Active")
|
|
267
|
+
print(f" - Workflow Engine: NOT available (use run_forever instead)")
|
|
268
|
+
print(f" - Agents: {len(mesh.agents)}")
|
|
269
|
+
|
|
270
|
+
# In P2P mode, agents communicate directly
|
|
271
|
+
# Let's demonstrate by having the assistant ask the researcher
|
|
272
|
+
|
|
273
|
+
print("\n" + "-"*70)
|
|
274
|
+
print("Demonstrating P2P Agent Communication")
|
|
275
|
+
print("-"*70)
|
|
276
|
+
|
|
277
|
+
# Give agents time to initialize their peer connections
|
|
278
|
+
await asyncio.sleep(0.5)
|
|
279
|
+
|
|
280
|
+
# Start researcher's run loop in background
|
|
281
|
+
researcher_task = asyncio.create_task(researcher.run())
|
|
282
|
+
|
|
283
|
+
# Give researcher time to start listening
|
|
284
|
+
await asyncio.sleep(0.3)
|
|
285
|
+
|
|
286
|
+
# Simulate user queries that the assistant processes
|
|
287
|
+
test_queries = [
|
|
288
|
+
"Research the benefits of renewable energy",
|
|
289
|
+
"Hello, how are you?", # This won't be delegated
|
|
290
|
+
"Analyze the latest trends in AI development",
|
|
291
|
+
]
|
|
292
|
+
|
|
293
|
+
for query in test_queries:
|
|
294
|
+
print(f"\n[User] {query}")
|
|
295
|
+
response = await assistant.process_user_input(query)
|
|
296
|
+
print(f"[Assistant] {response[:200]}...")
|
|
297
|
+
|
|
298
|
+
# Show statistics
|
|
299
|
+
print("\n" + "="*70)
|
|
300
|
+
print("P2P Session Statistics")
|
|
301
|
+
print("="*70)
|
|
302
|
+
print(f" Researcher queries handled: {researcher.queries_handled}")
|
|
303
|
+
|
|
304
|
+
# Cleanup
|
|
305
|
+
researcher.request_shutdown()
|
|
306
|
+
researcher_task.cancel()
|
|
307
|
+
try:
|
|
308
|
+
await researcher_task
|
|
309
|
+
except asyncio.CancelledError:
|
|
310
|
+
pass
|
|
311
|
+
|
|
312
|
+
await mesh.stop()
|
|
313
|
+
print("\n[INFO] P2P mesh stopped")
|
|
314
|
+
|
|
315
|
+
except Exception as e:
|
|
316
|
+
print(f"\nError: {e}")
|
|
317
|
+
import traceback
|
|
318
|
+
traceback.print_exc()
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
322
|
+
# LONG-RUNNING P2P EXAMPLE
|
|
323
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
324
|
+
|
|
325
|
+
async def run_forever_example():
|
|
326
|
+
"""
|
|
327
|
+
Example: Running P2P agents indefinitely.
|
|
328
|
+
|
|
329
|
+
Use mesh.run_forever() to keep all agents running:
|
|
330
|
+
|
|
331
|
+
mesh = Mesh(mode="p2p", config={...})
|
|
332
|
+
mesh.add(ResearcherAgent)
|
|
333
|
+
mesh.add(AssistantAgent)
|
|
334
|
+
|
|
335
|
+
await mesh.start()
|
|
336
|
+
await mesh.run_forever() # Blocks until shutdown signal
|
|
337
|
+
|
|
338
|
+
Agents will run their run() loops continuously until:
|
|
339
|
+
- SIGINT (Ctrl+C)
|
|
340
|
+
- SIGTERM
|
|
341
|
+
- Programmatic shutdown
|
|
342
|
+
"""
|
|
343
|
+
pass
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
if __name__ == "__main__":
|
|
347
|
+
asyncio.run(main())
|
jarviscore/__init__.py
CHANGED
|
@@ -2,30 +2,51 @@
|
|
|
2
2
|
JarvisCore - P2P Distributed Agent Framework
|
|
3
3
|
|
|
4
4
|
A production-grade framework for building autonomous agent systems with:
|
|
5
|
-
- Event-sourced state management (crash recovery, HITL support)
|
|
6
5
|
- P2P coordination via SWIM protocol
|
|
7
|
-
-
|
|
8
|
-
|
|
9
|
-
* CustomAgent: Framework-agnostic (LangChain, MCP, raw Python)
|
|
6
|
+
- Workflow orchestration with dependencies
|
|
7
|
+
- Two agent profiles: AutoAgent (LLM-powered) and CustomAgent (your code)
|
|
10
8
|
|
|
11
|
-
|
|
12
|
-
from
|
|
9
|
+
Profiles:
|
|
10
|
+
AutoAgent - LLM generates and executes code from prompts (autonomous mode)
|
|
11
|
+
CustomAgent - You provide execute_task() or run() (p2p/distributed modes)
|
|
13
12
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
13
|
+
Modes:
|
|
14
|
+
autonomous - Workflow engine only (AutoAgent)
|
|
15
|
+
p2p - P2P coordinator only (CustomAgent with run() loops)
|
|
16
|
+
distributed - Both workflow + P2P (CustomAgent with execute_task())
|
|
17
|
+
|
|
18
|
+
Quick Start (AutoAgent - autonomous mode):
|
|
19
|
+
from jarviscore import Mesh
|
|
20
|
+
from jarviscore.profiles import AutoAgent
|
|
21
|
+
|
|
22
|
+
class CalcAgent(AutoAgent):
|
|
23
|
+
role = "calculator"
|
|
24
|
+
capabilities = ["math"]
|
|
25
|
+
system_prompt = "You are a math expert. Store result in 'result'."
|
|
18
26
|
|
|
19
27
|
mesh = Mesh(mode="autonomous")
|
|
20
|
-
mesh.add(
|
|
28
|
+
mesh.add(CalcAgent)
|
|
21
29
|
await mesh.start()
|
|
30
|
+
results = await mesh.workflow("calc", [{"agent": "calculator", "task": "Calculate 10!"}])
|
|
31
|
+
|
|
32
|
+
Quick Start (CustomAgent - distributed mode):
|
|
33
|
+
from jarviscore import Mesh
|
|
34
|
+
from jarviscore.profiles import CustomAgent
|
|
22
35
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
36
|
+
class MyAgent(CustomAgent):
|
|
37
|
+
role = "processor"
|
|
38
|
+
capabilities = ["processing"]
|
|
39
|
+
|
|
40
|
+
async def execute_task(self, task):
|
|
41
|
+
return {"status": "success", "output": task.get("task").upper()}
|
|
42
|
+
|
|
43
|
+
mesh = Mesh(mode="distributed", config={'bind_port': 7950})
|
|
44
|
+
mesh.add(MyAgent)
|
|
45
|
+
await mesh.start()
|
|
46
|
+
results = await mesh.workflow("demo", [{"agent": "processor", "task": "hello"}])
|
|
26
47
|
"""
|
|
27
48
|
|
|
28
|
-
__version__ = "0.
|
|
49
|
+
__version__ = "0.2.0"
|
|
29
50
|
__author__ = "JarvisCore Contributors"
|
|
30
51
|
__license__ = "MIT"
|
|
31
52
|
|
|
@@ -38,12 +59,23 @@ from jarviscore.core.mesh import Mesh, MeshMode
|
|
|
38
59
|
from jarviscore.profiles.autoagent import AutoAgent
|
|
39
60
|
from jarviscore.profiles.customagent import CustomAgent
|
|
40
61
|
|
|
62
|
+
# Custom Profile: Decorator, Wrapper, and Context
|
|
63
|
+
from jarviscore.adapter import jarvis_agent, wrap
|
|
64
|
+
from jarviscore.context import JarvisContext, MemoryAccessor, DependencyAccessor
|
|
65
|
+
|
|
66
|
+
# P2P Direct Communication
|
|
67
|
+
from jarviscore.p2p import PeerClient, PeerTool, PeerInfo, IncomingMessage
|
|
68
|
+
|
|
69
|
+
# Alias for p2p mode agents
|
|
70
|
+
JarvisAgent = Agent # Use this for agents with run() loops
|
|
71
|
+
|
|
41
72
|
__all__ = [
|
|
42
73
|
# Version
|
|
43
74
|
"__version__",
|
|
44
75
|
|
|
45
76
|
# Core
|
|
46
77
|
"Agent",
|
|
78
|
+
"JarvisAgent", # Alias for p2p mode
|
|
47
79
|
"Profile",
|
|
48
80
|
"Mesh",
|
|
49
81
|
"MeshMode",
|
|
@@ -51,4 +83,17 @@ __all__ = [
|
|
|
51
83
|
# Profiles
|
|
52
84
|
"AutoAgent",
|
|
53
85
|
"CustomAgent",
|
|
86
|
+
|
|
87
|
+
# Custom Profile (decorator and wrapper)
|
|
88
|
+
"jarvis_agent",
|
|
89
|
+
"wrap",
|
|
90
|
+
"JarvisContext",
|
|
91
|
+
"MemoryAccessor",
|
|
92
|
+
"DependencyAccessor",
|
|
93
|
+
|
|
94
|
+
# P2P Direct Communication
|
|
95
|
+
"PeerClient",
|
|
96
|
+
"PeerTool",
|
|
97
|
+
"PeerInfo",
|
|
98
|
+
"IncomingMessage",
|
|
54
99
|
]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Adapter module for JarvisCore Custom Profile.
|
|
3
|
+
|
|
4
|
+
Provides utilities to wrap existing agents for use with JarvisCore:
|
|
5
|
+
- @jarvis_agent: Decorator to convert any class into a JarvisCore agent
|
|
6
|
+
- wrap(): Function to wrap an existing instance as a JarvisCore agent
|
|
7
|
+
|
|
8
|
+
Example (decorator):
|
|
9
|
+
from jarviscore import jarvis_agent, Mesh, JarvisContext
|
|
10
|
+
|
|
11
|
+
@jarvis_agent(role="processor", capabilities=["processing"])
|
|
12
|
+
class DataProcessor:
|
|
13
|
+
def run(self, data):
|
|
14
|
+
return {"processed": data * 2}
|
|
15
|
+
|
|
16
|
+
mesh = Mesh(mode="autonomous")
|
|
17
|
+
mesh.add(DataProcessor)
|
|
18
|
+
await mesh.start()
|
|
19
|
+
|
|
20
|
+
Example (wrap function):
|
|
21
|
+
from jarviscore import wrap, Mesh
|
|
22
|
+
|
|
23
|
+
# Wrap an existing instance
|
|
24
|
+
my_agent = MyLangChainAgent(llm=my_llm)
|
|
25
|
+
wrapped = wrap(my_agent, role="assistant", capabilities=["chat"])
|
|
26
|
+
|
|
27
|
+
mesh = Mesh(mode="autonomous")
|
|
28
|
+
mesh.add(wrapped)
|
|
29
|
+
await mesh.start()
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from .decorator import jarvis_agent, detect_execute_method, EXECUTE_METHODS
|
|
33
|
+
from .wrapper import wrap
|
|
34
|
+
|
|
35
|
+
__all__ = [
|
|
36
|
+
'jarvis_agent',
|
|
37
|
+
'wrap',
|
|
38
|
+
'detect_execute_method',
|
|
39
|
+
'EXECUTE_METHODS',
|
|
40
|
+
]
|