jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +162 -0
- examples/fastapi_integration_example.py +570 -0
- examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/__init__.py +22 -5
- jarviscore/cli/smoketest.py +8 -4
- jarviscore/core/agent.py +227 -0
- jarviscore/data/examples/cloud_deployment_example.py +162 -0
- jarviscore/data/examples/fastapi_integration_example.py +570 -0
- jarviscore/data/examples/listeneragent_cognitive_discovery_example.py +343 -0
- jarviscore/docs/API_REFERENCE.md +296 -3
- jarviscore/docs/CHANGELOG.md +97 -0
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +832 -13
- jarviscore/docs/GETTING_STARTED.md +111 -7
- jarviscore/docs/USER_GUIDE.md +152 -6
- jarviscore/integrations/__init__.py +16 -0
- jarviscore/integrations/fastapi.py +247 -0
- jarviscore/p2p/broadcaster.py +10 -3
- jarviscore/p2p/coordinator.py +310 -14
- jarviscore/p2p/keepalive.py +45 -23
- jarviscore/p2p/peer_client.py +282 -10
- jarviscore/p2p/swim_manager.py +9 -4
- jarviscore/profiles/__init__.py +10 -2
- jarviscore/profiles/listeneragent.py +292 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/METADATA +37 -4
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/RECORD +32 -18
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/WHEEL +1 -1
- tests/test_13_dx_improvements.py +554 -0
- tests/test_14_cloud_deployment.py +403 -0
- tests/test_15_llm_cognitive_discovery.py +684 -0
- tests/test_16_unified_dx_flow.py +947 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ListenerAgent + Cognitive Discovery Example
|
|
3
|
+
|
|
4
|
+
Demonstrates two v0.3.0 features:
|
|
5
|
+
|
|
6
|
+
1. ListenerAgent - Handler-based P2P agents (no run() loop needed)
|
|
7
|
+
- on_peer_request() handles incoming requests
|
|
8
|
+
- on_peer_notify() handles broadcast notifications
|
|
9
|
+
|
|
10
|
+
2. Cognitive Discovery - Dynamic peer awareness for LLMs
|
|
11
|
+
- get_cognitive_context() generates LLM-ready peer descriptions
|
|
12
|
+
- No hardcoded agent names in prompts
|
|
13
|
+
- LLM autonomously decides when to delegate
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
python examples/listeneragent_cognitive_discovery_example.py
|
|
17
|
+
|
|
18
|
+
Prerequisites:
|
|
19
|
+
- .env file with CLAUDE_API_KEY (or other LLM provider)
|
|
20
|
+
"""
|
|
21
|
+
import asyncio
|
|
22
|
+
import sys
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
26
|
+
|
|
27
|
+
from jarviscore import Mesh
|
|
28
|
+
from jarviscore.profiles import ListenerAgent
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
32
|
+
# SPECIALIST AGENT - Responds to requests from other agents
|
|
33
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
34
|
+
|
|
35
|
+
class AnalystAgent(ListenerAgent):
|
|
36
|
+
"""
|
|
37
|
+
Specialist agent that handles analysis requests.
|
|
38
|
+
|
|
39
|
+
Uses ListenerAgent profile - just implement handlers, no run() loop needed.
|
|
40
|
+
"""
|
|
41
|
+
role = "analyst"
|
|
42
|
+
capabilities = ["data_analysis", "statistics", "insights"]
|
|
43
|
+
description = "Analyzes data and provides statistical insights"
|
|
44
|
+
|
|
45
|
+
async def on_peer_request(self, msg):
|
|
46
|
+
"""Handle incoming analysis requests."""
|
|
47
|
+
query = msg.data.get("question", msg.data.get("query", ""))
|
|
48
|
+
print(f"\n[Analyst] Received request: {query[:50]}...")
|
|
49
|
+
|
|
50
|
+
# Simulate analysis (in real usage, this would use an LLM)
|
|
51
|
+
result = {
|
|
52
|
+
"analysis": f"Analysis of '{query}': The data shows positive trends.",
|
|
53
|
+
"confidence": 0.85,
|
|
54
|
+
"insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"]
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
print(f"[Analyst] Sending response with {len(result['insights'])} insights")
|
|
58
|
+
return result
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
62
|
+
# COORDINATOR AGENT - Uses LLM with cognitive discovery
|
|
63
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
64
|
+
|
|
65
|
+
class CoordinatorAgent(ListenerAgent):
|
|
66
|
+
"""
|
|
67
|
+
Coordinator agent that uses LLM with dynamic peer discovery.
|
|
68
|
+
|
|
69
|
+
Key pattern:
|
|
70
|
+
1. Uses get_cognitive_context() to learn about available peers
|
|
71
|
+
2. Injects peer context into LLM system prompt
|
|
72
|
+
3. LLM decides when to delegate to specialists
|
|
73
|
+
"""
|
|
74
|
+
role = "coordinator"
|
|
75
|
+
capabilities = ["coordination", "delegation", "chat"]
|
|
76
|
+
description = "Coordinates tasks and delegates to specialists"
|
|
77
|
+
|
|
78
|
+
async def setup(self):
|
|
79
|
+
await super().setup()
|
|
80
|
+
self.llm = self._create_llm_client()
|
|
81
|
+
|
|
82
|
+
def _create_llm_client(self):
|
|
83
|
+
"""Create LLM client with fallback to mock."""
|
|
84
|
+
try:
|
|
85
|
+
from anthropic import Anthropic
|
|
86
|
+
from jarviscore.config import settings
|
|
87
|
+
import os
|
|
88
|
+
|
|
89
|
+
api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
|
|
90
|
+
if not api_key:
|
|
91
|
+
raise RuntimeError("No API key")
|
|
92
|
+
|
|
93
|
+
# Check for custom endpoint (e.g., Azure-hosted Claude)
|
|
94
|
+
endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
|
|
95
|
+
model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
|
|
96
|
+
|
|
97
|
+
if endpoint:
|
|
98
|
+
client = Anthropic(api_key=api_key, base_url=endpoint)
|
|
99
|
+
else:
|
|
100
|
+
client = Anthropic(api_key=api_key)
|
|
101
|
+
|
|
102
|
+
# Test the API key with a minimal request
|
|
103
|
+
try:
|
|
104
|
+
client.messages.create(
|
|
105
|
+
model=model,
|
|
106
|
+
max_tokens=10,
|
|
107
|
+
messages=[{"role": "user", "content": "Hi"}]
|
|
108
|
+
)
|
|
109
|
+
except Exception as e:
|
|
110
|
+
raise RuntimeError(f"API key validation failed: {e}")
|
|
111
|
+
|
|
112
|
+
print(f"[Coordinator] LLM initialized: {model}")
|
|
113
|
+
return {"client": client, "model": model, "available": True}
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f"[Coordinator] LLM not available ({e}), using mock responses")
|
|
116
|
+
return {"available": False}
|
|
117
|
+
|
|
118
|
+
def _build_dynamic_prompt(self, base_prompt: str) -> str:
|
|
119
|
+
"""
|
|
120
|
+
Build system prompt with dynamic peer awareness.
|
|
121
|
+
|
|
122
|
+
THIS IS THE KEY PATTERN - the LLM learns about peers dynamically!
|
|
123
|
+
"""
|
|
124
|
+
if not self.peers:
|
|
125
|
+
return base_prompt
|
|
126
|
+
|
|
127
|
+
# Use get_cognitive_context() for dynamic peer discovery
|
|
128
|
+
peer_context = self.peers.get_cognitive_context(
|
|
129
|
+
format="markdown",
|
|
130
|
+
include_capabilities=True,
|
|
131
|
+
include_description=True,
|
|
132
|
+
tool_name="ask_peer"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return f"{base_prompt}\n\n{peer_context}"
|
|
136
|
+
|
|
137
|
+
async def process_query(self, user_query: str) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Process a user query using LLM with peer awareness.
|
|
140
|
+
|
|
141
|
+
The LLM sees available peers and can decide to delegate.
|
|
142
|
+
"""
|
|
143
|
+
base_prompt = """You are a coordinator assistant that delegates tasks to specialists.
|
|
144
|
+
|
|
145
|
+
IMPORTANT: You MUST use the ask_peer tool to delegate to specialists. You cannot perform analysis yourself.
|
|
146
|
+
|
|
147
|
+
When a user asks for data analysis, statistics, or insights:
|
|
148
|
+
1. Use the ask_peer tool with role="analyst"
|
|
149
|
+
2. Pass their question to the analyst
|
|
150
|
+
3. Report the analyst's findings
|
|
151
|
+
|
|
152
|
+
Never try to do analysis yourself - always delegate to the analyst."""
|
|
153
|
+
|
|
154
|
+
# Build prompt with dynamic peer discovery
|
|
155
|
+
system_prompt = self._build_dynamic_prompt(base_prompt)
|
|
156
|
+
|
|
157
|
+
print(f"\n[Coordinator] System prompt includes peer context:")
|
|
158
|
+
print("-" * 40)
|
|
159
|
+
# Show just the peer context part
|
|
160
|
+
if "AVAILABLE MESH PEERS" in system_prompt:
|
|
161
|
+
peer_section = system_prompt.split("AVAILABLE MESH PEERS")[1][:200]
|
|
162
|
+
print(f"...AVAILABLE MESH PEERS{peer_section}...")
|
|
163
|
+
print("-" * 40)
|
|
164
|
+
|
|
165
|
+
# Check if LLM is available
|
|
166
|
+
if not self.llm.get("available"):
|
|
167
|
+
# Mock: simulate LLM deciding to delegate
|
|
168
|
+
if any(word in user_query.lower() for word in ["analyze", "analysis", "statistics", "data"]):
|
|
169
|
+
print("[Coordinator] Mock LLM decides to delegate to analyst")
|
|
170
|
+
response = await self.peers.request(
|
|
171
|
+
"analyst",
|
|
172
|
+
{"question": user_query},
|
|
173
|
+
timeout=30
|
|
174
|
+
)
|
|
175
|
+
return f"Based on the analyst's findings: {response.get('analysis', 'No response')}"
|
|
176
|
+
return f"I can help with: {user_query}"
|
|
177
|
+
|
|
178
|
+
# Real LLM call with tools
|
|
179
|
+
tools = self._get_tools()
|
|
180
|
+
messages = [{"role": "user", "content": user_query}]
|
|
181
|
+
|
|
182
|
+
print(f"[Coordinator] Calling LLM with {len(tools)} tools: {[t['name'] for t in tools]}")
|
|
183
|
+
|
|
184
|
+
response = self.llm["client"].messages.create(
|
|
185
|
+
model=self.llm["model"],
|
|
186
|
+
max_tokens=1024,
|
|
187
|
+
system=system_prompt,
|
|
188
|
+
messages=messages,
|
|
189
|
+
tools=tools
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
print(f"[Coordinator] LLM stop_reason: {response.stop_reason}")
|
|
193
|
+
print(f"[Coordinator] Response blocks: {[b.type for b in response.content]}")
|
|
194
|
+
|
|
195
|
+
# Handle tool use - check for tool_use FIRST (prioritize over text)
|
|
196
|
+
tool_use_block = None
|
|
197
|
+
text_content = None
|
|
198
|
+
|
|
199
|
+
for block in response.content:
|
|
200
|
+
if block.type == "tool_use" and block.name == "ask_peer":
|
|
201
|
+
tool_use_block = block
|
|
202
|
+
elif hasattr(block, 'text'):
|
|
203
|
+
text_content = block.text
|
|
204
|
+
|
|
205
|
+
# If there's a tool use, execute it
|
|
206
|
+
if tool_use_block:
|
|
207
|
+
print(f"[Coordinator] LLM decided to use ask_peer tool")
|
|
208
|
+
peer_response = await self._execute_peer_tool(tool_use_block.input)
|
|
209
|
+
|
|
210
|
+
# Continue conversation with tool result
|
|
211
|
+
messages.append({"role": "assistant", "content": response.content})
|
|
212
|
+
messages.append({
|
|
213
|
+
"role": "user",
|
|
214
|
+
"content": [{
|
|
215
|
+
"type": "tool_result",
|
|
216
|
+
"tool_use_id": tool_use_block.id,
|
|
217
|
+
"content": str(peer_response)
|
|
218
|
+
}]
|
|
219
|
+
})
|
|
220
|
+
|
|
221
|
+
final_response = self.llm["client"].messages.create(
|
|
222
|
+
model=self.llm["model"],
|
|
223
|
+
max_tokens=1024,
|
|
224
|
+
system=system_prompt,
|
|
225
|
+
messages=messages
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
for final_block in final_response.content:
|
|
229
|
+
if hasattr(final_block, 'text'):
|
|
230
|
+
return final_block.text
|
|
231
|
+
|
|
232
|
+
# No tool use, return text content
|
|
233
|
+
if text_content:
|
|
234
|
+
return text_content
|
|
235
|
+
|
|
236
|
+
return "I processed your request."
|
|
237
|
+
|
|
238
|
+
def _get_tools(self) -> list:
|
|
239
|
+
"""Get tools for LLM, including peer tools."""
|
|
240
|
+
return [{
|
|
241
|
+
"name": "ask_peer",
|
|
242
|
+
"description": "Ask a specialist agent for help. Use this to delegate tasks to experts.",
|
|
243
|
+
"input_schema": {
|
|
244
|
+
"type": "object",
|
|
245
|
+
"properties": {
|
|
246
|
+
"role": {
|
|
247
|
+
"type": "string",
|
|
248
|
+
"description": "Role of the agent to ask (e.g., 'analyst')"
|
|
249
|
+
},
|
|
250
|
+
"question": {
|
|
251
|
+
"type": "string",
|
|
252
|
+
"description": "The question or task for the specialist"
|
|
253
|
+
}
|
|
254
|
+
},
|
|
255
|
+
"required": ["role", "question"]
|
|
256
|
+
}
|
|
257
|
+
}]
|
|
258
|
+
|
|
259
|
+
async def _execute_peer_tool(self, args: dict) -> dict:
|
|
260
|
+
"""Execute ask_peer tool."""
|
|
261
|
+
role = args.get("role", "")
|
|
262
|
+
question = args.get("question", "")
|
|
263
|
+
|
|
264
|
+
print(f"[Coordinator] Asking {role}: {question[:50]}...")
|
|
265
|
+
|
|
266
|
+
response = await self.peers.request(
|
|
267
|
+
role,
|
|
268
|
+
{"question": question},
|
|
269
|
+
timeout=30
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return response
|
|
273
|
+
|
|
274
|
+
async def on_peer_request(self, msg):
|
|
275
|
+
"""Handle incoming peer requests (for workflow compatibility)."""
|
|
276
|
+
query = msg.data.get("query", msg.data.get("question", ""))
|
|
277
|
+
result = await self.process_query(query)
|
|
278
|
+
return {"response": result}
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
282
|
+
# MAIN - Demonstrate cognitive discovery
|
|
283
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
284
|
+
|
|
285
|
+
async def main():
|
|
286
|
+
print("=" * 60)
|
|
287
|
+
print("LLM Cognitive Discovery Example")
|
|
288
|
+
print("=" * 60)
|
|
289
|
+
|
|
290
|
+
# Create mesh with both agents
|
|
291
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7960})
|
|
292
|
+
|
|
293
|
+
analyst = mesh.add(AnalystAgent())
|
|
294
|
+
coordinator = mesh.add(CoordinatorAgent())
|
|
295
|
+
|
|
296
|
+
await mesh.start()
|
|
297
|
+
|
|
298
|
+
print(f"\n[Setup] Mesh started with agents:")
|
|
299
|
+
print(f" - {analyst.role}: {analyst.capabilities}")
|
|
300
|
+
print(f" - {coordinator.role}: {coordinator.capabilities}")
|
|
301
|
+
|
|
302
|
+
# Start analyst listener in background
|
|
303
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
304
|
+
|
|
305
|
+
# Give time for setup
|
|
306
|
+
await asyncio.sleep(0.5)
|
|
307
|
+
|
|
308
|
+
# Show cognitive context that LLM will see
|
|
309
|
+
print("\n" + "=" * 60)
|
|
310
|
+
print("COGNITIVE CONTEXT (what LLM sees about peers)")
|
|
311
|
+
print("=" * 60)
|
|
312
|
+
context = coordinator.peers.get_cognitive_context()
|
|
313
|
+
print(context)
|
|
314
|
+
|
|
315
|
+
# Test queries - one that should trigger delegation, one that shouldn't
|
|
316
|
+
test_queries = [
|
|
317
|
+
"Please analyze the Q4 sales data and give me insights",
|
|
318
|
+
"What time is it?",
|
|
319
|
+
]
|
|
320
|
+
|
|
321
|
+
print("\n" + "=" * 60)
|
|
322
|
+
print("PROCESSING QUERIES")
|
|
323
|
+
print("=" * 60)
|
|
324
|
+
|
|
325
|
+
for query in test_queries:
|
|
326
|
+
print(f"\n>>> User: {query}")
|
|
327
|
+
response = await coordinator.process_query(query)
|
|
328
|
+
print(f"<<< Coordinator: {response}")
|
|
329
|
+
|
|
330
|
+
# Cleanup
|
|
331
|
+
analyst.request_shutdown()
|
|
332
|
+
analyst_task.cancel()
|
|
333
|
+
try:
|
|
334
|
+
await analyst_task
|
|
335
|
+
except asyncio.CancelledError:
|
|
336
|
+
pass
|
|
337
|
+
|
|
338
|
+
await mesh.stop()
|
|
339
|
+
print("\n[Done] Example completed!")
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
if __name__ == "__main__":
|
|
343
|
+
asyncio.run(main())
|
jarviscore/__init__.py
CHANGED
|
@@ -4,15 +4,16 @@ JarvisCore - P2P Distributed Agent Framework
|
|
|
4
4
|
A production-grade framework for building autonomous agent systems with:
|
|
5
5
|
- P2P coordination via SWIM protocol
|
|
6
6
|
- Workflow orchestration with dependencies
|
|
7
|
-
-
|
|
7
|
+
- Three agent profiles: AutoAgent, CustomAgent, and ListenerAgent
|
|
8
8
|
|
|
9
9
|
Profiles:
|
|
10
|
-
AutoAgent
|
|
11
|
-
CustomAgent
|
|
10
|
+
AutoAgent - LLM generates and executes code from prompts (autonomous mode)
|
|
11
|
+
CustomAgent - You provide execute_task() or run() (p2p/distributed modes)
|
|
12
|
+
ListenerAgent - API-first agents with background P2P (just implement handlers)
|
|
12
13
|
|
|
13
14
|
Modes:
|
|
14
15
|
autonomous - Workflow engine only (AutoAgent)
|
|
15
|
-
p2p - P2P coordinator only (CustomAgent with run() loops)
|
|
16
|
+
p2p - P2P coordinator only (CustomAgent/ListenerAgent with run() loops)
|
|
16
17
|
distributed - Both workflow + P2P (CustomAgent with execute_task())
|
|
17
18
|
|
|
18
19
|
Quick Start (AutoAgent - autonomous mode):
|
|
@@ -29,6 +30,20 @@ Quick Start (AutoAgent - autonomous mode):
|
|
|
29
30
|
await mesh.start()
|
|
30
31
|
results = await mesh.workflow("calc", [{"agent": "calculator", "task": "Calculate 10!"}])
|
|
31
32
|
|
|
33
|
+
Quick Start (ListenerAgent + FastAPI):
|
|
34
|
+
from fastapi import FastAPI
|
|
35
|
+
from jarviscore.profiles import ListenerAgent
|
|
36
|
+
from jarviscore.integrations.fastapi import JarvisLifespan
|
|
37
|
+
|
|
38
|
+
class MyAgent(ListenerAgent):
|
|
39
|
+
role = "processor"
|
|
40
|
+
capabilities = ["processing"]
|
|
41
|
+
|
|
42
|
+
async def on_peer_request(self, msg):
|
|
43
|
+
return {"result": msg.data.get("task", "").upper()}
|
|
44
|
+
|
|
45
|
+
app = FastAPI(lifespan=JarvisLifespan(MyAgent(), mode="p2p"))
|
|
46
|
+
|
|
32
47
|
Quick Start (CustomAgent - distributed mode):
|
|
33
48
|
from jarviscore import Mesh
|
|
34
49
|
from jarviscore.profiles import CustomAgent
|
|
@@ -46,7 +61,7 @@ Quick Start (CustomAgent - distributed mode):
|
|
|
46
61
|
results = await mesh.workflow("demo", [{"agent": "processor", "task": "hello"}])
|
|
47
62
|
"""
|
|
48
63
|
|
|
49
|
-
__version__ = "0.
|
|
64
|
+
__version__ = "0.3.0"
|
|
50
65
|
__author__ = "JarvisCore Contributors"
|
|
51
66
|
__license__ = "MIT"
|
|
52
67
|
|
|
@@ -58,6 +73,7 @@ from jarviscore.core.mesh import Mesh, MeshMode
|
|
|
58
73
|
# Execution profiles
|
|
59
74
|
from jarviscore.profiles.autoagent import AutoAgent
|
|
60
75
|
from jarviscore.profiles.customagent import CustomAgent
|
|
76
|
+
from jarviscore.profiles.listeneragent import ListenerAgent
|
|
61
77
|
|
|
62
78
|
# Custom Profile: Decorator, Wrapper, and Context
|
|
63
79
|
from jarviscore.adapter import jarvis_agent, wrap
|
|
@@ -83,6 +99,7 @@ __all__ = [
|
|
|
83
99
|
# Profiles
|
|
84
100
|
"AutoAgent",
|
|
85
101
|
"CustomAgent",
|
|
102
|
+
"ListenerAgent",
|
|
86
103
|
|
|
87
104
|
# Custom Profile (decorator and wrapper)
|
|
88
105
|
"jarvis_agent",
|
jarviscore/cli/smoketest.py
CHANGED
|
@@ -311,10 +311,14 @@ class SmokeTest:
|
|
|
311
311
|
|
|
312
312
|
print("\n✓ All smoke tests passed!")
|
|
313
313
|
print("\nJarvisCore is working correctly. Next steps:")
|
|
314
|
-
print(" 1.
|
|
315
|
-
print(" 2.
|
|
316
|
-
print(" 3.
|
|
317
|
-
print("
|
|
314
|
+
print(" 1. AutoAgent example: python examples/calculator_agent_example.py")
|
|
315
|
+
print(" 2. CustomAgent P2P: python examples/customagent_p2p_example.py")
|
|
316
|
+
print(" 3. ListenerAgent (v0.3): python examples/listeneragent_cognitive_discovery_example.py")
|
|
317
|
+
print(" 4. FastAPI (v0.3): python examples/fastapi_integration_example.py")
|
|
318
|
+
print(" 5. Cloud deploy (v0.3): python examples/cloud_deployment_example.py")
|
|
319
|
+
print("\nDocumentation:")
|
|
320
|
+
print(" - Getting Started: docs/GETTING_STARTED.md")
|
|
321
|
+
print(" - User Guide: docs/USER_GUIDE.md")
|
|
318
322
|
print()
|
|
319
323
|
return True
|
|
320
324
|
|
jarviscore/core/agent.py
CHANGED
|
@@ -5,14 +5,19 @@ This is the foundation of the JarvisCore framework. All agents inherit from this
|
|
|
5
5
|
|
|
6
6
|
For p2p mode, agents can implement a run() method for their own execution loop
|
|
7
7
|
and use self.peers for direct peer-to-peer communication.
|
|
8
|
+
|
|
9
|
+
For cloud deployment, agents can self-register with a mesh using join_mesh().
|
|
8
10
|
"""
|
|
9
11
|
from abc import ABC, abstractmethod
|
|
10
12
|
from typing import List, Dict, Any, Optional, TYPE_CHECKING
|
|
11
13
|
from uuid import uuid4
|
|
14
|
+
import asyncio
|
|
12
15
|
import logging
|
|
16
|
+
import os
|
|
13
17
|
|
|
14
18
|
if TYPE_CHECKING:
|
|
15
19
|
from jarviscore.p2p import PeerClient
|
|
20
|
+
from jarviscore.p2p.coordinator import P2PCoordinator
|
|
16
21
|
|
|
17
22
|
logger = logging.getLogger(__name__)
|
|
18
23
|
|
|
@@ -70,6 +75,10 @@ class Agent(ABC):
|
|
|
70
75
|
self.peers: Optional['PeerClient'] = None # Injected by Mesh in p2p mode
|
|
71
76
|
self.shutdown_requested: bool = False # Set True to stop run() loop
|
|
72
77
|
|
|
78
|
+
# Cloud deployment support (standalone mode)
|
|
79
|
+
self._standalone_p2p: Optional['P2PCoordinator'] = None
|
|
80
|
+
self._mesh_connected: bool = False
|
|
81
|
+
|
|
73
82
|
self._logger.debug(f"Agent initialized: {self.agent_id}")
|
|
74
83
|
|
|
75
84
|
@abstractmethod
|
|
@@ -204,3 +213,221 @@ class Agent(ABC):
|
|
|
204
213
|
def __str__(self) -> str:
|
|
205
214
|
"""Human-readable string representation."""
|
|
206
215
|
return f"{self.role} ({self.agent_id})"
|
|
216
|
+
|
|
217
|
+
# ─────────────────────────────────────────────────────────────────
|
|
218
|
+
# CLOUD DEPLOYMENT (Standalone Mode)
|
|
219
|
+
# ─────────────────────────────────────────────────────────────────
|
|
220
|
+
|
|
221
|
+
async def join_mesh(
|
|
222
|
+
self,
|
|
223
|
+
endpoint: str = None,
|
|
224
|
+
seed_nodes: str = None,
|
|
225
|
+
config: dict = None
|
|
226
|
+
) -> bool:
|
|
227
|
+
"""
|
|
228
|
+
Self-register with a running mesh (for cloud/container deployment).
|
|
229
|
+
|
|
230
|
+
Instead of using mesh.add(), agents can join an existing mesh
|
|
231
|
+
independently. This is the pattern for containerized deployments
|
|
232
|
+
where each container runs a single agent.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
endpoint: Mesh endpoint (host:port) - uses JARVISCORE_MESH_ENDPOINT env if not provided
|
|
236
|
+
seed_nodes: Comma-separated seed nodes - uses JARVISCORE_SEED_NODES env if not provided
|
|
237
|
+
config: Additional P2P configuration options
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
True if successfully joined the mesh
|
|
241
|
+
|
|
242
|
+
Raises:
|
|
243
|
+
ValueError: If no endpoint or seed_nodes provided and not in environment
|
|
244
|
+
|
|
245
|
+
Example - Direct:
|
|
246
|
+
agent = MyAgent()
|
|
247
|
+
await agent.join_mesh(seed_nodes="192.168.1.10:7946")
|
|
248
|
+
await agent.run()
|
|
249
|
+
await agent.leave_mesh()
|
|
250
|
+
|
|
251
|
+
Example - Environment Variable:
|
|
252
|
+
# Set JARVISCORE_SEED_NODES=192.168.1.10:7946
|
|
253
|
+
agent = MyAgent()
|
|
254
|
+
await agent.join_mesh() # Auto-discovers from env
|
|
255
|
+
await agent.run()
|
|
256
|
+
await agent.leave_mesh()
|
|
257
|
+
|
|
258
|
+
Example - Docker/K8s:
|
|
259
|
+
# In container entrypoint
|
|
260
|
+
async def main():
|
|
261
|
+
agent = ProcessorAgent()
|
|
262
|
+
await agent.join_mesh() # Uses env vars
|
|
263
|
+
await agent.run_standalone() # Handles graceful shutdown
|
|
264
|
+
"""
|
|
265
|
+
from jarviscore.p2p.coordinator import P2PCoordinator
|
|
266
|
+
from jarviscore.p2p.peer_client import PeerClient
|
|
267
|
+
|
|
268
|
+
# 1. Resolve connection info from args or environment
|
|
269
|
+
endpoint = endpoint or os.environ.get("JARVISCORE_MESH_ENDPOINT")
|
|
270
|
+
seed_nodes = seed_nodes or os.environ.get("JARVISCORE_SEED_NODES", "")
|
|
271
|
+
|
|
272
|
+
if not endpoint and not seed_nodes:
|
|
273
|
+
raise ValueError(
|
|
274
|
+
"Must provide endpoint, seed_nodes, or set "
|
|
275
|
+
"JARVISCORE_MESH_ENDPOINT / JARVISCORE_SEED_NODES environment variable"
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
# 2. Build P2P configuration - use same config loading as Mesh
|
|
279
|
+
from jarviscore.config import get_config_from_dict
|
|
280
|
+
mesh_config = get_config_from_dict(config)
|
|
281
|
+
|
|
282
|
+
# Set seed nodes for joining the cluster
|
|
283
|
+
if endpoint:
|
|
284
|
+
mesh_config["seed_nodes"] = endpoint
|
|
285
|
+
if seed_nodes:
|
|
286
|
+
mesh_config["seed_nodes"] = seed_nodes
|
|
287
|
+
|
|
288
|
+
# Find an available port for this agent's P2P listener
|
|
289
|
+
# SWIM doesn't support bind_port=0, so we find a free port
|
|
290
|
+
if "bind_port" not in mesh_config or mesh_config.get("bind_port") == 0:
|
|
291
|
+
import socket
|
|
292
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
293
|
+
s.bind(('', 0))
|
|
294
|
+
mesh_config["bind_port"] = s.getsockname()[1]
|
|
295
|
+
|
|
296
|
+
mesh_config["node_name"] = f"agent-{self.agent_id}"
|
|
297
|
+
|
|
298
|
+
self._logger.info(f"Joining mesh via {endpoint or seed_nodes}...")
|
|
299
|
+
|
|
300
|
+
# 3. Setup agent (call setup hook)
|
|
301
|
+
await self.setup()
|
|
302
|
+
|
|
303
|
+
# 4. Start standalone P2P coordinator
|
|
304
|
+
self._standalone_p2p = P2PCoordinator([self], mesh_config)
|
|
305
|
+
await self._standalone_p2p.start()
|
|
306
|
+
|
|
307
|
+
# 5. Wait for SWIM cluster to converge
|
|
308
|
+
# This allows SWIM gossip to sync membership
|
|
309
|
+
import asyncio
|
|
310
|
+
self._logger.info("Waiting for SWIM cluster convergence...")
|
|
311
|
+
await asyncio.sleep(1.0) # Brief wait for SWIM gossip
|
|
312
|
+
|
|
313
|
+
# 6. Request existing capabilities from peers (we're a late joiner)
|
|
314
|
+
# Note: request_peer_capabilities will wait for ZMQ connections internally
|
|
315
|
+
self._logger.info("Requesting capabilities from existing peers...")
|
|
316
|
+
await self._standalone_p2p.request_peer_capabilities()
|
|
317
|
+
|
|
318
|
+
# 7. Announce our own capabilities to mesh
|
|
319
|
+
# Note: announce_capabilities will wait for ZMQ connections internally
|
|
320
|
+
await self._standalone_p2p.announce_capabilities()
|
|
321
|
+
|
|
322
|
+
# 7. Setup PeerClient for this agent
|
|
323
|
+
node_id = ""
|
|
324
|
+
if self._standalone_p2p.swim_manager:
|
|
325
|
+
addr = self._standalone_p2p.swim_manager.bind_addr
|
|
326
|
+
if addr:
|
|
327
|
+
node_id = f"{addr[0]}:{addr[1]}"
|
|
328
|
+
|
|
329
|
+
self.peers = PeerClient(
|
|
330
|
+
coordinator=self._standalone_p2p,
|
|
331
|
+
agent_id=self.agent_id,
|
|
332
|
+
agent_role=self.role,
|
|
333
|
+
agent_registry={self.role: [self]},
|
|
334
|
+
node_id=node_id
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
# Register PeerClient with coordinator for message routing
|
|
338
|
+
self._standalone_p2p.register_peer_client(self.agent_id, self.peers)
|
|
339
|
+
|
|
340
|
+
self._mesh_connected = True
|
|
341
|
+
self._logger.info(f"Successfully joined mesh as {self.role} ({self.agent_id})")
|
|
342
|
+
|
|
343
|
+
return True
|
|
344
|
+
|
|
345
|
+
async def leave_mesh(self) -> bool:
|
|
346
|
+
"""
|
|
347
|
+
Gracefully deregister from mesh.
|
|
348
|
+
|
|
349
|
+
Called when agent is shutting down to notify other nodes
|
|
350
|
+
that this agent is no longer available.
|
|
351
|
+
|
|
352
|
+
Returns:
|
|
353
|
+
True if successfully left the mesh
|
|
354
|
+
|
|
355
|
+
Example:
|
|
356
|
+
try:
|
|
357
|
+
await agent.run()
|
|
358
|
+
finally:
|
|
359
|
+
await agent.leave_mesh()
|
|
360
|
+
"""
|
|
361
|
+
if not self._mesh_connected:
|
|
362
|
+
return True
|
|
363
|
+
|
|
364
|
+
self._logger.info("Leaving mesh...")
|
|
365
|
+
|
|
366
|
+
# 1. Deannounce capabilities (notify mesh we're leaving)
|
|
367
|
+
if self._standalone_p2p:
|
|
368
|
+
try:
|
|
369
|
+
await self._standalone_p2p.deannounce_capabilities()
|
|
370
|
+
except Exception as e:
|
|
371
|
+
self._logger.warning(f"Error deannouncing capabilities: {e}")
|
|
372
|
+
|
|
373
|
+
# 2. Unregister peer client
|
|
374
|
+
if self._standalone_p2p:
|
|
375
|
+
self._standalone_p2p.unregister_peer_client(self.agent_id)
|
|
376
|
+
|
|
377
|
+
# 3. Stop P2P coordinator
|
|
378
|
+
if self._standalone_p2p:
|
|
379
|
+
await self._standalone_p2p.stop()
|
|
380
|
+
self._standalone_p2p = None
|
|
381
|
+
|
|
382
|
+
# 4. Teardown agent (call teardown hook)
|
|
383
|
+
await self.teardown()
|
|
384
|
+
|
|
385
|
+
self._mesh_connected = False
|
|
386
|
+
self.peers = None
|
|
387
|
+
self._logger.info("Successfully left mesh")
|
|
388
|
+
|
|
389
|
+
return True
|
|
390
|
+
|
|
391
|
+
@property
|
|
392
|
+
def is_mesh_connected(self) -> bool:
|
|
393
|
+
"""Check if agent is currently connected to a mesh."""
|
|
394
|
+
return self._mesh_connected
|
|
395
|
+
|
|
396
|
+
async def run_standalone(self):
|
|
397
|
+
"""
|
|
398
|
+
Run agent in standalone mode with automatic mesh cleanup.
|
|
399
|
+
|
|
400
|
+
Combines run() loop with graceful leave_mesh() on exit.
|
|
401
|
+
Use this as the main entrypoint for containerized agents.
|
|
402
|
+
|
|
403
|
+
Example - Container Entrypoint:
|
|
404
|
+
async def main():
|
|
405
|
+
agent = ProcessorAgent()
|
|
406
|
+
await agent.join_mesh()
|
|
407
|
+
await agent.run_standalone() # Blocks until shutdown
|
|
408
|
+
|
|
409
|
+
if __name__ == "__main__":
|
|
410
|
+
asyncio.run(main())
|
|
411
|
+
"""
|
|
412
|
+
if not self._mesh_connected:
|
|
413
|
+
raise RuntimeError(
|
|
414
|
+
"Not connected to mesh. Call join_mesh() first."
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
try:
|
|
418
|
+
# Run the agent's main loop
|
|
419
|
+
if hasattr(self, 'run') and asyncio.iscoroutinefunction(self.run):
|
|
420
|
+
await self.run()
|
|
421
|
+
else:
|
|
422
|
+
# No run() method - just wait for shutdown signal
|
|
423
|
+
while not self.shutdown_requested:
|
|
424
|
+
await asyncio.sleep(0.1)
|
|
425
|
+
|
|
426
|
+
except asyncio.CancelledError:
|
|
427
|
+
self._logger.info("Agent cancelled, cleaning up...")
|
|
428
|
+
except Exception as e:
|
|
429
|
+
self._logger.error(f"Agent error: {e}")
|
|
430
|
+
raise
|
|
431
|
+
finally:
|
|
432
|
+
# Always leave mesh gracefully
|
|
433
|
+
await self.leave_mesh()
|