jarviscore-framework 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cloud_deployment_example.py +162 -0
- examples/customagent_cognitive_discovery_example.py +343 -0
- examples/fastapi_integration_example.py +570 -0
- jarviscore/__init__.py +19 -5
- jarviscore/cli/smoketest.py +8 -4
- jarviscore/core/agent.py +227 -0
- jarviscore/core/mesh.py +9 -0
- jarviscore/data/examples/cloud_deployment_example.py +162 -0
- jarviscore/data/examples/custom_profile_decorator.py +134 -0
- jarviscore/data/examples/custom_profile_wrap.py +168 -0
- jarviscore/data/examples/customagent_cognitive_discovery_example.py +343 -0
- jarviscore/data/examples/fastapi_integration_example.py +570 -0
- jarviscore/docs/API_REFERENCE.md +283 -3
- jarviscore/docs/CHANGELOG.md +139 -0
- jarviscore/docs/CONFIGURATION.md +1 -1
- jarviscore/docs/CUSTOMAGENT_GUIDE.md +997 -85
- jarviscore/docs/GETTING_STARTED.md +228 -267
- jarviscore/docs/TROUBLESHOOTING.md +1 -1
- jarviscore/docs/USER_GUIDE.md +153 -8
- jarviscore/integrations/__init__.py +16 -0
- jarviscore/integrations/fastapi.py +247 -0
- jarviscore/p2p/broadcaster.py +10 -3
- jarviscore/p2p/coordinator.py +310 -14
- jarviscore/p2p/keepalive.py +45 -23
- jarviscore/p2p/peer_client.py +311 -12
- jarviscore/p2p/swim_manager.py +9 -4
- jarviscore/profiles/__init__.py +7 -1
- jarviscore/profiles/customagent.py +295 -74
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/METADATA +66 -18
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/RECORD +37 -22
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/WHEEL +1 -1
- tests/test_13_dx_improvements.py +554 -0
- tests/test_14_cloud_deployment.py +403 -0
- tests/test_15_llm_cognitive_discovery.py +684 -0
- tests/test_16_unified_dx_flow.py +947 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {jarviscore_framework-0.2.1.dist-info → jarviscore_framework-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom Profile Example: Using wrap() Function
|
|
3
|
+
|
|
4
|
+
This example shows how to use the wrap() function to convert
|
|
5
|
+
an existing instance into a JarvisCore agent.
|
|
6
|
+
|
|
7
|
+
Use Case: You have an already-instantiated object (like a LangChain
|
|
8
|
+
agent, CrewAI agent, or any configured instance) and want to use it
|
|
9
|
+
with JarvisCore orchestration.
|
|
10
|
+
"""
|
|
11
|
+
import asyncio
|
|
12
|
+
from jarviscore import Mesh, wrap, JarvisContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Simulate an existing "LangChain-like" agent
|
|
16
|
+
class ExternalLLMAgent:
|
|
17
|
+
"""
|
|
18
|
+
Simulates an external LLM agent (like LangChain).
|
|
19
|
+
In real usage, this would be your actual LangChain/CrewAI agent.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, model_name: str, temperature: float = 0.7):
|
|
23
|
+
self.model_name = model_name
|
|
24
|
+
self.temperature = temperature
|
|
25
|
+
print(f" Initialized ExternalLLMAgent with {model_name}")
|
|
26
|
+
|
|
27
|
+
def invoke(self, query: str) -> dict:
|
|
28
|
+
"""LangChain-style invoke method."""
|
|
29
|
+
# Simulate LLM response
|
|
30
|
+
return {
|
|
31
|
+
"answer": f"Response to '{query}' from {self.model_name}",
|
|
32
|
+
"model": self.model_name,
|
|
33
|
+
"tokens_used": len(query.split()) * 10
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# Simulate a data processing service
|
|
38
|
+
class DataService:
|
|
39
|
+
"""Simulates an external data processing service."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, api_url: str):
|
|
42
|
+
self.api_url = api_url
|
|
43
|
+
print(f" Initialized DataService with {api_url}")
|
|
44
|
+
|
|
45
|
+
def run(self, data):
|
|
46
|
+
"""Process data through the service."""
|
|
47
|
+
if isinstance(data, list):
|
|
48
|
+
return {
|
|
49
|
+
"transformed": [x ** 2 for x in data],
|
|
50
|
+
"source": self.api_url
|
|
51
|
+
}
|
|
52
|
+
return {"transformed": data ** 2, "source": self.api_url}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# Simulate an agent that needs context
|
|
56
|
+
class ContextAwareProcessor:
|
|
57
|
+
"""Agent that uses JarvisContext to access previous results."""
|
|
58
|
+
|
|
59
|
+
def run(self, task, ctx: JarvisContext):
|
|
60
|
+
"""Process with context access."""
|
|
61
|
+
# Get all previous results
|
|
62
|
+
all_previous = ctx.all_previous()
|
|
63
|
+
|
|
64
|
+
summary = {
|
|
65
|
+
"task": task,
|
|
66
|
+
"previous_steps": list(all_previous.keys()),
|
|
67
|
+
"combined_data": {}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
for step_id, output in all_previous.items():
|
|
71
|
+
if isinstance(output, dict):
|
|
72
|
+
summary["combined_data"][step_id] = output
|
|
73
|
+
|
|
74
|
+
return summary
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def main():
|
|
78
|
+
"""Demonstrate wrapping existing instances."""
|
|
79
|
+
print("=" * 60)
|
|
80
|
+
print(" Custom Profile Example: wrap() Function")
|
|
81
|
+
print("=" * 60)
|
|
82
|
+
|
|
83
|
+
# Create instances of "external" agents
|
|
84
|
+
print("\nCreating external agent instances...")
|
|
85
|
+
llm_agent = ExternalLLMAgent(model_name="gpt-4-turbo", temperature=0.3)
|
|
86
|
+
data_service = DataService(api_url="https://api.example.com/process")
|
|
87
|
+
context_processor = ContextAwareProcessor()
|
|
88
|
+
|
|
89
|
+
# Wrap them for JarvisCore
|
|
90
|
+
print("\nWrapping instances for JarvisCore...")
|
|
91
|
+
|
|
92
|
+
wrapped_llm = wrap(
|
|
93
|
+
llm_agent,
|
|
94
|
+
role="llm_assistant",
|
|
95
|
+
capabilities=["chat", "qa"],
|
|
96
|
+
execute_method="invoke" # LangChain uses "invoke"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
wrapped_data = wrap(
|
|
100
|
+
data_service,
|
|
101
|
+
role="data_processor",
|
|
102
|
+
capabilities=["data_processing", "transformation"]
|
|
103
|
+
# execute_method auto-detected as "run"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
wrapped_context = wrap(
|
|
107
|
+
context_processor,
|
|
108
|
+
role="context_aggregator",
|
|
109
|
+
capabilities=["aggregation", "summary"]
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Create mesh and add wrapped agents
|
|
113
|
+
mesh = Mesh(mode="autonomous")
|
|
114
|
+
mesh.add(wrapped_llm)
|
|
115
|
+
mesh.add(wrapped_data)
|
|
116
|
+
mesh.add(wrapped_context)
|
|
117
|
+
|
|
118
|
+
await mesh.start()
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
print("\nExecuting workflow with wrapped agents...\n")
|
|
122
|
+
|
|
123
|
+
results = await mesh.workflow("wrap-demo", [
|
|
124
|
+
{
|
|
125
|
+
"id": "llm_step",
|
|
126
|
+
"agent": "llm_assistant",
|
|
127
|
+
"task": "What is the capital of France?",
|
|
128
|
+
"params": {"query": "What is the capital of France?"}
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
"id": "data_step",
|
|
132
|
+
"agent": "data_processor",
|
|
133
|
+
"task": "Transform numbers",
|
|
134
|
+
"params": {"data": [1, 2, 3, 4, 5]}
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
"id": "summary_step",
|
|
138
|
+
"agent": "context_aggregator",
|
|
139
|
+
"task": "Summarize all results",
|
|
140
|
+
"depends_on": ["llm_step", "data_step"]
|
|
141
|
+
}
|
|
142
|
+
])
|
|
143
|
+
|
|
144
|
+
# Print results
|
|
145
|
+
print("Results:")
|
|
146
|
+
print("-" * 40)
|
|
147
|
+
|
|
148
|
+
step_names = ["LLM Assistant", "Data Processor", "Context Aggregator"]
|
|
149
|
+
for i, result in enumerate(results):
|
|
150
|
+
print(f"\n{step_names[i]}:")
|
|
151
|
+
print(f" Status: {result.get('status')}")
|
|
152
|
+
output = result.get('output', {})
|
|
153
|
+
if isinstance(output, dict):
|
|
154
|
+
for key, value in output.items():
|
|
155
|
+
print(f" {key}: {value}")
|
|
156
|
+
else:
|
|
157
|
+
print(f" Output: {output}")
|
|
158
|
+
|
|
159
|
+
print("\n" + "=" * 60)
|
|
160
|
+
print(" Workflow with wrapped instances completed!")
|
|
161
|
+
print("=" * 60)
|
|
162
|
+
|
|
163
|
+
finally:
|
|
164
|
+
await mesh.stop()
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CustomAgent + Cognitive Discovery Example
|
|
3
|
+
|
|
4
|
+
Demonstrates two v0.3.0 features:
|
|
5
|
+
|
|
6
|
+
1. CustomAgent - Handler-based P2P agents (no run() loop needed)
|
|
7
|
+
- on_peer_request() handles incoming requests
|
|
8
|
+
- on_peer_notify() handles broadcast notifications
|
|
9
|
+
|
|
10
|
+
2. Cognitive Discovery - Dynamic peer awareness for LLMs
|
|
11
|
+
- get_cognitive_context() generates LLM-ready peer descriptions
|
|
12
|
+
- No hardcoded agent names in prompts
|
|
13
|
+
- LLM autonomously decides when to delegate
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
python examples/listeneragent_cognitive_discovery_example.py
|
|
17
|
+
|
|
18
|
+
Prerequisites:
|
|
19
|
+
- .env file with CLAUDE_API_KEY (or other LLM provider)
|
|
20
|
+
"""
|
|
21
|
+
import asyncio
|
|
22
|
+
import sys
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
26
|
+
|
|
27
|
+
from jarviscore import Mesh
|
|
28
|
+
from jarviscore.profiles import CustomAgent
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
32
|
+
# SPECIALIST AGENT - Responds to requests from other agents
|
|
33
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
34
|
+
|
|
35
|
+
class AnalystAgent(CustomAgent):
|
|
36
|
+
"""
|
|
37
|
+
Specialist agent that handles analysis requests.
|
|
38
|
+
|
|
39
|
+
Uses CustomAgent profile - just implement handlers, no run() loop needed.
|
|
40
|
+
"""
|
|
41
|
+
role = "analyst"
|
|
42
|
+
capabilities = ["data_analysis", "statistics", "insights"]
|
|
43
|
+
description = "Analyzes data and provides statistical insights"
|
|
44
|
+
|
|
45
|
+
async def on_peer_request(self, msg):
|
|
46
|
+
"""Handle incoming analysis requests."""
|
|
47
|
+
query = msg.data.get("question", msg.data.get("query", ""))
|
|
48
|
+
print(f"\n[Analyst] Received request: {query[:50]}...")
|
|
49
|
+
|
|
50
|
+
# Simulate analysis (in real usage, this would use an LLM)
|
|
51
|
+
result = {
|
|
52
|
+
"analysis": f"Analysis of '{query}': The data shows positive trends.",
|
|
53
|
+
"confidence": 0.85,
|
|
54
|
+
"insights": ["Trend is upward", "Growth rate: 15%", "Recommendation: Continue"]
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
print(f"[Analyst] Sending response with {len(result['insights'])} insights")
|
|
58
|
+
return result
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
62
|
+
# COORDINATOR AGENT - Uses LLM with cognitive discovery
|
|
63
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
64
|
+
|
|
65
|
+
class CoordinatorAgent(CustomAgent):
|
|
66
|
+
"""
|
|
67
|
+
Coordinator agent that uses LLM with dynamic peer discovery.
|
|
68
|
+
|
|
69
|
+
Key pattern:
|
|
70
|
+
1. Uses get_cognitive_context() to learn about available peers
|
|
71
|
+
2. Injects peer context into LLM system prompt
|
|
72
|
+
3. LLM decides when to delegate to specialists
|
|
73
|
+
"""
|
|
74
|
+
role = "coordinator"
|
|
75
|
+
capabilities = ["coordination", "delegation", "chat"]
|
|
76
|
+
description = "Coordinates tasks and delegates to specialists"
|
|
77
|
+
|
|
78
|
+
async def setup(self):
|
|
79
|
+
await super().setup()
|
|
80
|
+
self.llm = self._create_llm_client()
|
|
81
|
+
|
|
82
|
+
def _create_llm_client(self):
|
|
83
|
+
"""Create LLM client with fallback to mock."""
|
|
84
|
+
try:
|
|
85
|
+
from anthropic import Anthropic
|
|
86
|
+
from jarviscore.config import settings
|
|
87
|
+
import os
|
|
88
|
+
|
|
89
|
+
api_key = settings.claude_api_key or os.environ.get("CLAUDE_API_KEY")
|
|
90
|
+
if not api_key:
|
|
91
|
+
raise RuntimeError("No API key")
|
|
92
|
+
|
|
93
|
+
# Check for custom endpoint (e.g., Azure-hosted Claude)
|
|
94
|
+
endpoint = settings.claude_endpoint or os.environ.get("CLAUDE_ENDPOINT")
|
|
95
|
+
model = settings.claude_model or os.environ.get("CLAUDE_MODEL") or "claude-sonnet-4-20250514"
|
|
96
|
+
|
|
97
|
+
if endpoint:
|
|
98
|
+
client = Anthropic(api_key=api_key, base_url=endpoint)
|
|
99
|
+
else:
|
|
100
|
+
client = Anthropic(api_key=api_key)
|
|
101
|
+
|
|
102
|
+
# Test the API key with a minimal request
|
|
103
|
+
try:
|
|
104
|
+
client.messages.create(
|
|
105
|
+
model=model,
|
|
106
|
+
max_tokens=10,
|
|
107
|
+
messages=[{"role": "user", "content": "Hi"}]
|
|
108
|
+
)
|
|
109
|
+
except Exception as e:
|
|
110
|
+
raise RuntimeError(f"API key validation failed: {e}")
|
|
111
|
+
|
|
112
|
+
print(f"[Coordinator] LLM initialized: {model}")
|
|
113
|
+
return {"client": client, "model": model, "available": True}
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f"[Coordinator] LLM not available ({e}), using mock responses")
|
|
116
|
+
return {"available": False}
|
|
117
|
+
|
|
118
|
+
def _build_dynamic_prompt(self, base_prompt: str) -> str:
|
|
119
|
+
"""
|
|
120
|
+
Build system prompt with dynamic peer awareness.
|
|
121
|
+
|
|
122
|
+
THIS IS THE KEY PATTERN - the LLM learns about peers dynamically!
|
|
123
|
+
"""
|
|
124
|
+
if not self.peers:
|
|
125
|
+
return base_prompt
|
|
126
|
+
|
|
127
|
+
# Use get_cognitive_context() for dynamic peer discovery
|
|
128
|
+
peer_context = self.peers.get_cognitive_context(
|
|
129
|
+
format="markdown",
|
|
130
|
+
include_capabilities=True,
|
|
131
|
+
include_description=True,
|
|
132
|
+
tool_name="ask_peer"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return f"{base_prompt}\n\n{peer_context}"
|
|
136
|
+
|
|
137
|
+
async def process_query(self, user_query: str) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Process a user query using LLM with peer awareness.
|
|
140
|
+
|
|
141
|
+
The LLM sees available peers and can decide to delegate.
|
|
142
|
+
"""
|
|
143
|
+
base_prompt = """You are a coordinator assistant that delegates tasks to specialists.
|
|
144
|
+
|
|
145
|
+
IMPORTANT: You MUST use the ask_peer tool to delegate to specialists. You cannot perform analysis yourself.
|
|
146
|
+
|
|
147
|
+
When a user asks for data analysis, statistics, or insights:
|
|
148
|
+
1. Use the ask_peer tool with role="analyst"
|
|
149
|
+
2. Pass their question to the analyst
|
|
150
|
+
3. Report the analyst's findings
|
|
151
|
+
|
|
152
|
+
Never try to do analysis yourself - always delegate to the analyst."""
|
|
153
|
+
|
|
154
|
+
# Build prompt with dynamic peer discovery
|
|
155
|
+
system_prompt = self._build_dynamic_prompt(base_prompt)
|
|
156
|
+
|
|
157
|
+
print(f"\n[Coordinator] System prompt includes peer context:")
|
|
158
|
+
print("-" * 40)
|
|
159
|
+
# Show just the peer context part
|
|
160
|
+
if "AVAILABLE MESH PEERS" in system_prompt:
|
|
161
|
+
peer_section = system_prompt.split("AVAILABLE MESH PEERS")[1][:200]
|
|
162
|
+
print(f"...AVAILABLE MESH PEERS{peer_section}...")
|
|
163
|
+
print("-" * 40)
|
|
164
|
+
|
|
165
|
+
# Check if LLM is available
|
|
166
|
+
if not self.llm.get("available"):
|
|
167
|
+
# Mock: simulate LLM deciding to delegate
|
|
168
|
+
if any(word in user_query.lower() for word in ["analyze", "analysis", "statistics", "data"]):
|
|
169
|
+
print("[Coordinator] Mock LLM decides to delegate to analyst")
|
|
170
|
+
response = await self.peers.request(
|
|
171
|
+
"analyst",
|
|
172
|
+
{"question": user_query},
|
|
173
|
+
timeout=30
|
|
174
|
+
)
|
|
175
|
+
return f"Based on the analyst's findings: {response.get('analysis', 'No response')}"
|
|
176
|
+
return f"I can help with: {user_query}"
|
|
177
|
+
|
|
178
|
+
# Real LLM call with tools
|
|
179
|
+
tools = self._get_tools()
|
|
180
|
+
messages = [{"role": "user", "content": user_query}]
|
|
181
|
+
|
|
182
|
+
print(f"[Coordinator] Calling LLM with {len(tools)} tools: {[t['name'] for t in tools]}")
|
|
183
|
+
|
|
184
|
+
response = self.llm["client"].messages.create(
|
|
185
|
+
model=self.llm["model"],
|
|
186
|
+
max_tokens=1024,
|
|
187
|
+
system=system_prompt,
|
|
188
|
+
messages=messages,
|
|
189
|
+
tools=tools
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
print(f"[Coordinator] LLM stop_reason: {response.stop_reason}")
|
|
193
|
+
print(f"[Coordinator] Response blocks: {[b.type for b in response.content]}")
|
|
194
|
+
|
|
195
|
+
# Handle tool use - check for tool_use FIRST (prioritize over text)
|
|
196
|
+
tool_use_block = None
|
|
197
|
+
text_content = None
|
|
198
|
+
|
|
199
|
+
for block in response.content:
|
|
200
|
+
if block.type == "tool_use" and block.name == "ask_peer":
|
|
201
|
+
tool_use_block = block
|
|
202
|
+
elif hasattr(block, 'text'):
|
|
203
|
+
text_content = block.text
|
|
204
|
+
|
|
205
|
+
# If there's a tool use, execute it
|
|
206
|
+
if tool_use_block:
|
|
207
|
+
print(f"[Coordinator] LLM decided to use ask_peer tool")
|
|
208
|
+
peer_response = await self._execute_peer_tool(tool_use_block.input)
|
|
209
|
+
|
|
210
|
+
# Continue conversation with tool result
|
|
211
|
+
messages.append({"role": "assistant", "content": response.content})
|
|
212
|
+
messages.append({
|
|
213
|
+
"role": "user",
|
|
214
|
+
"content": [{
|
|
215
|
+
"type": "tool_result",
|
|
216
|
+
"tool_use_id": tool_use_block.id,
|
|
217
|
+
"content": str(peer_response)
|
|
218
|
+
}]
|
|
219
|
+
})
|
|
220
|
+
|
|
221
|
+
final_response = self.llm["client"].messages.create(
|
|
222
|
+
model=self.llm["model"],
|
|
223
|
+
max_tokens=1024,
|
|
224
|
+
system=system_prompt,
|
|
225
|
+
messages=messages
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
for final_block in final_response.content:
|
|
229
|
+
if hasattr(final_block, 'text'):
|
|
230
|
+
return final_block.text
|
|
231
|
+
|
|
232
|
+
# No tool use, return text content
|
|
233
|
+
if text_content:
|
|
234
|
+
return text_content
|
|
235
|
+
|
|
236
|
+
return "I processed your request."
|
|
237
|
+
|
|
238
|
+
def _get_tools(self) -> list:
|
|
239
|
+
"""Get tools for LLM, including peer tools."""
|
|
240
|
+
return [{
|
|
241
|
+
"name": "ask_peer",
|
|
242
|
+
"description": "Ask a specialist agent for help. Use this to delegate tasks to experts.",
|
|
243
|
+
"input_schema": {
|
|
244
|
+
"type": "object",
|
|
245
|
+
"properties": {
|
|
246
|
+
"role": {
|
|
247
|
+
"type": "string",
|
|
248
|
+
"description": "Role of the agent to ask (e.g., 'analyst')"
|
|
249
|
+
},
|
|
250
|
+
"question": {
|
|
251
|
+
"type": "string",
|
|
252
|
+
"description": "The question or task for the specialist"
|
|
253
|
+
}
|
|
254
|
+
},
|
|
255
|
+
"required": ["role", "question"]
|
|
256
|
+
}
|
|
257
|
+
}]
|
|
258
|
+
|
|
259
|
+
async def _execute_peer_tool(self, args: dict) -> dict:
|
|
260
|
+
"""Execute ask_peer tool."""
|
|
261
|
+
role = args.get("role", "")
|
|
262
|
+
question = args.get("question", "")
|
|
263
|
+
|
|
264
|
+
print(f"[Coordinator] Asking {role}: {question[:50]}...")
|
|
265
|
+
|
|
266
|
+
response = await self.peers.request(
|
|
267
|
+
role,
|
|
268
|
+
{"question": question},
|
|
269
|
+
timeout=30
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return response
|
|
273
|
+
|
|
274
|
+
async def on_peer_request(self, msg):
|
|
275
|
+
"""Handle incoming peer requests (for workflow compatibility)."""
|
|
276
|
+
query = msg.data.get("query", msg.data.get("question", ""))
|
|
277
|
+
result = await self.process_query(query)
|
|
278
|
+
return {"response": result}
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
282
|
+
# MAIN - Demonstrate cognitive discovery
|
|
283
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
284
|
+
|
|
285
|
+
async def main():
|
|
286
|
+
print("=" * 60)
|
|
287
|
+
print("LLM Cognitive Discovery Example")
|
|
288
|
+
print("=" * 60)
|
|
289
|
+
|
|
290
|
+
# Create mesh with both agents
|
|
291
|
+
mesh = Mesh(mode="p2p", config={"bind_port": 7960})
|
|
292
|
+
|
|
293
|
+
analyst = mesh.add(AnalystAgent())
|
|
294
|
+
coordinator = mesh.add(CoordinatorAgent())
|
|
295
|
+
|
|
296
|
+
await mesh.start()
|
|
297
|
+
|
|
298
|
+
print(f"\n[Setup] Mesh started with agents:")
|
|
299
|
+
print(f" - {analyst.role}: {analyst.capabilities}")
|
|
300
|
+
print(f" - {coordinator.role}: {coordinator.capabilities}")
|
|
301
|
+
|
|
302
|
+
# Start analyst listener in background
|
|
303
|
+
analyst_task = asyncio.create_task(analyst.run())
|
|
304
|
+
|
|
305
|
+
# Give time for setup
|
|
306
|
+
await asyncio.sleep(0.5)
|
|
307
|
+
|
|
308
|
+
# Show cognitive context that LLM will see
|
|
309
|
+
print("\n" + "=" * 60)
|
|
310
|
+
print("COGNITIVE CONTEXT (what LLM sees about peers)")
|
|
311
|
+
print("=" * 60)
|
|
312
|
+
context = coordinator.peers.get_cognitive_context()
|
|
313
|
+
print(context)
|
|
314
|
+
|
|
315
|
+
# Test queries - one that should trigger delegation, one that shouldn't
|
|
316
|
+
test_queries = [
|
|
317
|
+
"Please analyze the Q4 sales data and give me insights",
|
|
318
|
+
"What time is it?",
|
|
319
|
+
]
|
|
320
|
+
|
|
321
|
+
print("\n" + "=" * 60)
|
|
322
|
+
print("PROCESSING QUERIES")
|
|
323
|
+
print("=" * 60)
|
|
324
|
+
|
|
325
|
+
for query in test_queries:
|
|
326
|
+
print(f"\n>>> User: {query}")
|
|
327
|
+
response = await coordinator.process_query(query)
|
|
328
|
+
print(f"<<< Coordinator: {response}")
|
|
329
|
+
|
|
330
|
+
# Cleanup
|
|
331
|
+
analyst.request_shutdown()
|
|
332
|
+
analyst_task.cancel()
|
|
333
|
+
try:
|
|
334
|
+
await analyst_task
|
|
335
|
+
except asyncio.CancelledError:
|
|
336
|
+
pass
|
|
337
|
+
|
|
338
|
+
await mesh.stop()
|
|
339
|
+
print("\n[Done] Example completed!")
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
if __name__ == "__main__":
|
|
343
|
+
asyncio.run(main())
|