jarviscore-framework 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/calculator_agent_example.py +77 -0
- examples/multi_agent_workflow.py +132 -0
- examples/research_agent_example.py +76 -0
- jarviscore/__init__.py +54 -0
- jarviscore/cli/__init__.py +7 -0
- jarviscore/cli/__main__.py +33 -0
- jarviscore/cli/check.py +404 -0
- jarviscore/cli/smoketest.py +371 -0
- jarviscore/config/__init__.py +7 -0
- jarviscore/config/settings.py +128 -0
- jarviscore/core/__init__.py +7 -0
- jarviscore/core/agent.py +163 -0
- jarviscore/core/mesh.py +463 -0
- jarviscore/core/profile.py +64 -0
- jarviscore/docs/API_REFERENCE.md +932 -0
- jarviscore/docs/CONFIGURATION.md +753 -0
- jarviscore/docs/GETTING_STARTED.md +600 -0
- jarviscore/docs/TROUBLESHOOTING.md +424 -0
- jarviscore/docs/USER_GUIDE.md +983 -0
- jarviscore/execution/__init__.py +94 -0
- jarviscore/execution/code_registry.py +298 -0
- jarviscore/execution/generator.py +268 -0
- jarviscore/execution/llm.py +430 -0
- jarviscore/execution/repair.py +283 -0
- jarviscore/execution/result_handler.py +332 -0
- jarviscore/execution/sandbox.py +555 -0
- jarviscore/execution/search.py +281 -0
- jarviscore/orchestration/__init__.py +18 -0
- jarviscore/orchestration/claimer.py +101 -0
- jarviscore/orchestration/dependency.py +143 -0
- jarviscore/orchestration/engine.py +292 -0
- jarviscore/orchestration/status.py +96 -0
- jarviscore/p2p/__init__.py +23 -0
- jarviscore/p2p/broadcaster.py +353 -0
- jarviscore/p2p/coordinator.py +364 -0
- jarviscore/p2p/keepalive.py +361 -0
- jarviscore/p2p/swim_manager.py +290 -0
- jarviscore/profiles/__init__.py +6 -0
- jarviscore/profiles/autoagent.py +264 -0
- jarviscore/profiles/customagent.py +137 -0
- jarviscore_framework-0.1.0.dist-info/METADATA +136 -0
- jarviscore_framework-0.1.0.dist-info/RECORD +55 -0
- jarviscore_framework-0.1.0.dist-info/WHEEL +5 -0
- jarviscore_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
- jarviscore_framework-0.1.0.dist-info/top_level.txt +3 -0
- tests/conftest.py +44 -0
- tests/test_agent.py +165 -0
- tests/test_autoagent.py +140 -0
- tests/test_autoagent_day4.py +186 -0
- tests/test_customagent.py +248 -0
- tests/test_integration.py +293 -0
- tests/test_llm_fallback.py +185 -0
- tests/test_mesh.py +356 -0
- tests/test_p2p_integration.py +375 -0
- tests/test_remote_sandbox.py +116 -0
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Internet Search - Zero-config web search and content extraction
|
|
3
|
+
Uses DuckDuckGo (no API key required)
|
|
4
|
+
"""
|
|
5
|
+
import logging
|
|
6
|
+
import aiohttp
|
|
7
|
+
import asyncio
|
|
8
|
+
import re
|
|
9
|
+
from typing import Dict, Any, List, Optional
|
|
10
|
+
from bs4 import BeautifulSoup
|
|
11
|
+
from urllib.parse import quote_plus, urlparse
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class InternetSearch:
|
|
17
|
+
"""
|
|
18
|
+
Zero-config internet search with content extraction.
|
|
19
|
+
|
|
20
|
+
Features:
|
|
21
|
+
- Search web using DuckDuckGo (no API key needed)
|
|
22
|
+
- Extract clean text from web pages
|
|
23
|
+
- Automatic error handling and retries
|
|
24
|
+
- Combined search + extract in one call
|
|
25
|
+
|
|
26
|
+
Example:
|
|
27
|
+
search = InternetSearch()
|
|
28
|
+
results = await search.search("Python async programming")
|
|
29
|
+
content = await search.extract_content(results[0]['url'])
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, user_agent: Optional[str] = None):
|
|
33
|
+
"""
|
|
34
|
+
Initialize search module.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
user_agent: Optional custom user agent (auto-set if None)
|
|
38
|
+
"""
|
|
39
|
+
self.session = None
|
|
40
|
+
self.user_agent = user_agent or (
|
|
41
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
42
|
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
43
|
+
"Chrome/120.0.0.0 Safari/537.36"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
async def initialize(self):
|
|
47
|
+
"""Initialize HTTP session (auto-called on first use)."""
|
|
48
|
+
if self.session is None or self.session.closed:
|
|
49
|
+
self.session = aiohttp.ClientSession(
|
|
50
|
+
timeout=aiohttp.ClientTimeout(total=30),
|
|
51
|
+
headers={
|
|
52
|
+
"User-Agent": self.user_agent,
|
|
53
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
|
54
|
+
"Accept-Language": "en-US,en;q=0.9"
|
|
55
|
+
}
|
|
56
|
+
)
|
|
57
|
+
logger.debug("HTTP session initialized")
|
|
58
|
+
|
|
59
|
+
async def close(self):
|
|
60
|
+
"""Close HTTP session."""
|
|
61
|
+
if self.session and not self.session.closed:
|
|
62
|
+
await self.session.close()
|
|
63
|
+
self.session = None
|
|
64
|
+
logger.debug("HTTP session closed")
|
|
65
|
+
|
|
66
|
+
async def __aenter__(self):
|
|
67
|
+
"""Async context manager entry."""
|
|
68
|
+
await self.initialize()
|
|
69
|
+
return self
|
|
70
|
+
|
|
71
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
72
|
+
"""Async context manager exit."""
|
|
73
|
+
await self.close()
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
async def search(self, query: str, max_results: int = 5) -> List[Dict[str, Any]]:
|
|
77
|
+
"""
|
|
78
|
+
Search the web using DuckDuckGo.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
query: Search query
|
|
82
|
+
max_results: Maximum number of results (default 5)
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
List of results: [{"title": "...", "snippet": "...", "url": "..."}]
|
|
86
|
+
|
|
87
|
+
Example:
|
|
88
|
+
results = await search.search("Python async tutorial")
|
|
89
|
+
print(results[0]['title'])
|
|
90
|
+
"""
|
|
91
|
+
await self.initialize()
|
|
92
|
+
|
|
93
|
+
encoded_query = quote_plus(query)
|
|
94
|
+
logger.info(f"Searching: {query}")
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
results = await self._search_duckduckgo(encoded_query)
|
|
98
|
+
|
|
99
|
+
# Normalize URLs
|
|
100
|
+
for result in results:
|
|
101
|
+
url = result.get('url', '')
|
|
102
|
+
if url and not url.startswith(('http://', 'https://')):
|
|
103
|
+
result['url'] = 'https://' + url
|
|
104
|
+
|
|
105
|
+
results = results[:max_results]
|
|
106
|
+
logger.info(f"Found {len(results)} results")
|
|
107
|
+
return results
|
|
108
|
+
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.error(f"Search failed: {e}")
|
|
111
|
+
return []
|
|
112
|
+
|
|
113
|
+
async def _search_duckduckgo(self, encoded_query: str) -> List[Dict[str, Any]]:
|
|
114
|
+
"""Perform DuckDuckGo search."""
|
|
115
|
+
url = f"https://html.duckduckgo.com/html/?q={encoded_query}"
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
async with self.session.get(url) as response:
|
|
119
|
+
if response.status != 200:
|
|
120
|
+
raise RuntimeError(f"DuckDuckGo returned {response.status}")
|
|
121
|
+
|
|
122
|
+
html = await response.text()
|
|
123
|
+
soup = BeautifulSoup(html, 'html.parser')
|
|
124
|
+
|
|
125
|
+
results = []
|
|
126
|
+
for result_elem in soup.select('.result'):
|
|
127
|
+
title_elem = result_elem.select_one('.result__title')
|
|
128
|
+
snippet_elem = result_elem.select_one('.result__snippet')
|
|
129
|
+
url_elem = result_elem.select_one('.result__url')
|
|
130
|
+
|
|
131
|
+
if title_elem and url_elem:
|
|
132
|
+
# Clean up DuckDuckGo's tracking URLs
|
|
133
|
+
url_text = url_elem.get_text(strip=True)
|
|
134
|
+
# Remove "duckduckgo.com" prefix if present
|
|
135
|
+
url_text = re.sub(r'^.*?://', '', url_text)
|
|
136
|
+
url_text = url_text.split('?')[0] # Remove query params
|
|
137
|
+
|
|
138
|
+
results.append({
|
|
139
|
+
"title": title_elem.get_text(strip=True),
|
|
140
|
+
"snippet": snippet_elem.get_text(strip=True) if snippet_elem else "",
|
|
141
|
+
"url": url_text
|
|
142
|
+
})
|
|
143
|
+
|
|
144
|
+
return results
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
logger.error(f"DuckDuckGo search error: {e}")
|
|
148
|
+
return []
|
|
149
|
+
|
|
150
|
+
async def extract_content(self, url: str, max_length: int = 10000) -> Dict[str, Any]:
|
|
151
|
+
"""
|
|
152
|
+
Extract clean text content from a webpage.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
url: URL to extract from
|
|
156
|
+
max_length: Maximum content length (default 10k chars)
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
{
|
|
160
|
+
"url": "https://...",
|
|
161
|
+
"title": "Page Title",
|
|
162
|
+
"content": "Clean extracted text...",
|
|
163
|
+
"success": True,
|
|
164
|
+
"word_count": 1234
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
Example:
|
|
168
|
+
content = await search.extract_content("https://example.com")
|
|
169
|
+
print(content['content'])
|
|
170
|
+
"""
|
|
171
|
+
await self.initialize()
|
|
172
|
+
|
|
173
|
+
if not url.startswith(('http://', 'https://')):
|
|
174
|
+
url = 'https://' + url
|
|
175
|
+
|
|
176
|
+
logger.info(f"Extracting content from: {url}")
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
async with self.session.get(url) as response:
|
|
180
|
+
if response.status != 200:
|
|
181
|
+
return {
|
|
182
|
+
"url": url,
|
|
183
|
+
"success": False,
|
|
184
|
+
"error": f"HTTP {response.status}"
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
html = await response.text()
|
|
188
|
+
soup = BeautifulSoup(html, 'html.parser')
|
|
189
|
+
|
|
190
|
+
# Extract title
|
|
191
|
+
title = soup.title.string if soup.title else urlparse(url).netloc
|
|
192
|
+
|
|
193
|
+
# Remove script, style, and other non-content elements
|
|
194
|
+
for element in soup(['script', 'style', 'nav', 'footer', 'header', 'aside']):
|
|
195
|
+
element.decompose()
|
|
196
|
+
|
|
197
|
+
# Extract main content
|
|
198
|
+
# Try to find main content area
|
|
199
|
+
main_content = (
|
|
200
|
+
soup.find('main') or
|
|
201
|
+
soup.find('article') or
|
|
202
|
+
soup.find('div', class_=re.compile(r'content|main|article', re.I)) or
|
|
203
|
+
soup.body
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
if main_content:
|
|
207
|
+
text = main_content.get_text(separator='\n', strip=True)
|
|
208
|
+
else:
|
|
209
|
+
text = soup.get_text(separator='\n', strip=True)
|
|
210
|
+
|
|
211
|
+
# Clean up text
|
|
212
|
+
lines = [line.strip() for line in text.split('\n') if line.strip()]
|
|
213
|
+
clean_text = '\n'.join(lines)
|
|
214
|
+
|
|
215
|
+
# Truncate if too long
|
|
216
|
+
if len(clean_text) > max_length:
|
|
217
|
+
clean_text = clean_text[:max_length] + "... [truncated]"
|
|
218
|
+
|
|
219
|
+
word_count = len(clean_text.split())
|
|
220
|
+
|
|
221
|
+
logger.info(f"Extracted {word_count} words from {url}")
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"url": url,
|
|
225
|
+
"title": title.strip() if title else "",
|
|
226
|
+
"content": clean_text,
|
|
227
|
+
"success": True,
|
|
228
|
+
"word_count": word_count
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
except Exception as e:
|
|
232
|
+
logger.error(f"Content extraction failed for {url}: {e}")
|
|
233
|
+
return {
|
|
234
|
+
"url": url,
|
|
235
|
+
"success": False,
|
|
236
|
+
"error": str(e)
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
async def search_and_extract(
|
|
240
|
+
self,
|
|
241
|
+
query: str,
|
|
242
|
+
num_results: int = 3,
|
|
243
|
+
max_content_length: int = 5000
|
|
244
|
+
) -> List[Dict[str, Any]]:
|
|
245
|
+
"""
|
|
246
|
+
Search and automatically extract content from top results.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
query: Search query
|
|
250
|
+
num_results: Number of results to extract (default 3)
|
|
251
|
+
max_content_length: Max content per page (default 5k chars)
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
List of extracted content from top search results
|
|
255
|
+
|
|
256
|
+
Example:
|
|
257
|
+
results = await search.search_and_extract("Python asyncio tutorial")
|
|
258
|
+
for result in results:
|
|
259
|
+
print(f"{result['title']}: {result['content'][:100]}...")
|
|
260
|
+
"""
|
|
261
|
+
# Search first
|
|
262
|
+
search_results = await self.search(query, max_results=num_results)
|
|
263
|
+
|
|
264
|
+
# Extract content from each result
|
|
265
|
+
extracted = []
|
|
266
|
+
for result in search_results:
|
|
267
|
+
content = await self.extract_content(result['url'], max_content_length)
|
|
268
|
+
if content.get('success'):
|
|
269
|
+
# Merge search metadata with extracted content
|
|
270
|
+
extracted.append({
|
|
271
|
+
**result,
|
|
272
|
+
**content
|
|
273
|
+
})
|
|
274
|
+
|
|
275
|
+
logger.info(f"Extracted content from {len(extracted)}/{len(search_results)} results")
|
|
276
|
+
return extracted
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def create_search_client() -> InternetSearch:
|
|
280
|
+
"""Factory function to create search client."""
|
|
281
|
+
return InternetSearch()
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestration module for JarvisCore Framework
|
|
3
|
+
|
|
4
|
+
Workflow execution engine with dependency management.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .engine import WorkflowEngine
|
|
8
|
+
from .claimer import StepClaimer
|
|
9
|
+
from .dependency import DependencyManager
|
|
10
|
+
from .status import StatusManager, StepStatus
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
'WorkflowEngine',
|
|
14
|
+
'StepClaimer',
|
|
15
|
+
'DependencyManager',
|
|
16
|
+
'StatusManager',
|
|
17
|
+
'StepStatus',
|
|
18
|
+
]
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Step Claimer - Matches workflow steps to capable agents
|
|
3
|
+
|
|
4
|
+
Simplified from integration-agent (260 lines → 80 lines)
|
|
5
|
+
Focused on capability matching without Kafka integration.
|
|
6
|
+
"""
|
|
7
|
+
import logging
|
|
8
|
+
from typing import List, Dict, Any, Optional
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class StepClaimer:
|
|
14
|
+
"""
|
|
15
|
+
Matches workflow steps to agents based on capabilities.
|
|
16
|
+
|
|
17
|
+
Simplified from integration-agent's version.
|
|
18
|
+
Removes: Lazy evaluation, external dependency tracking, Kafka claims
|
|
19
|
+
Keeps: Core capability matching logic
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, agents: List):
|
|
23
|
+
"""
|
|
24
|
+
Initialize step claimer with available agents.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
agents: List of Agent instances available for claiming
|
|
28
|
+
"""
|
|
29
|
+
self.agents = agents
|
|
30
|
+
self._capability_index = self._build_capability_index()
|
|
31
|
+
logger.info(f"Step claimer initialized with {len(agents)} agents")
|
|
32
|
+
|
|
33
|
+
def _build_capability_index(self) -> Dict[str, List]:
|
|
34
|
+
"""Build index of capabilities to agents."""
|
|
35
|
+
index = {}
|
|
36
|
+
for agent in self.agents:
|
|
37
|
+
# Index by role
|
|
38
|
+
if agent.role not in index:
|
|
39
|
+
index[agent.role] = []
|
|
40
|
+
index[agent.role].append(agent)
|
|
41
|
+
|
|
42
|
+
# Index by each capability
|
|
43
|
+
for cap in agent.capabilities:
|
|
44
|
+
if cap not in index:
|
|
45
|
+
index[cap] = []
|
|
46
|
+
index[cap].append(agent)
|
|
47
|
+
|
|
48
|
+
logger.debug(f"Built capability index with {len(index)} entries")
|
|
49
|
+
return index
|
|
50
|
+
|
|
51
|
+
def find_agent(self, step: Dict[str, Any]) -> Optional[Any]:
|
|
52
|
+
"""
|
|
53
|
+
Find agent capable of executing this step.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
step: Step specification containing:
|
|
57
|
+
- agent: Role or capability required
|
|
58
|
+
- role: Alternative key for agent role
|
|
59
|
+
- capability: Alternative key for capability
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Agent instance that can handle the step, or None
|
|
63
|
+
|
|
64
|
+
Example:
|
|
65
|
+
step = {"agent": "scraper", "task": "Scrape website"}
|
|
66
|
+
agent = claimer.find_agent(step)
|
|
67
|
+
"""
|
|
68
|
+
# Try different keys for agent requirement
|
|
69
|
+
required = (
|
|
70
|
+
step.get("agent") or
|
|
71
|
+
step.get("role") or
|
|
72
|
+
step.get("capability")
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if not required:
|
|
76
|
+
logger.warning(f"Step has no agent/role/capability specified: {step}")
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
# Look up in capability index
|
|
80
|
+
agents = self._capability_index.get(required, [])
|
|
81
|
+
|
|
82
|
+
if not agents:
|
|
83
|
+
logger.warning(f"No agent found for requirement: {required}")
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
# Return first matching agent
|
|
87
|
+
agent = agents[0]
|
|
88
|
+
logger.debug(f"Matched step to agent: {agent.agent_id}")
|
|
89
|
+
return agent
|
|
90
|
+
|
|
91
|
+
def find_all_agents(self, capability: str) -> List[Any]:
|
|
92
|
+
"""
|
|
93
|
+
Find all agents with a specific capability.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
capability: Required capability
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
List of agents with the capability
|
|
100
|
+
"""
|
|
101
|
+
return self._capability_index.get(capability, [])
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dependency Manager - Resolves step dependencies
|
|
3
|
+
|
|
4
|
+
Simplified from integration-agent
|
|
5
|
+
Removes: Kafka integration, complex P2P queries
|
|
6
|
+
Keeps: Memory cache, basic waiting logic
|
|
7
|
+
"""
|
|
8
|
+
import logging
|
|
9
|
+
import asyncio
|
|
10
|
+
from typing import Dict, List, Any, Optional
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DependencyManager:
|
|
16
|
+
"""
|
|
17
|
+
Manages step dependencies and resolution.
|
|
18
|
+
|
|
19
|
+
Simplified from integration-agent's 3-tier system:
|
|
20
|
+
- Tier 1: Memory cache (kept)
|
|
21
|
+
- Tier 2: P2P queries (simplified - future)
|
|
22
|
+
- Tier 3: Kafka (removed for MVP)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, memory_cache: Optional[Dict] = None):
|
|
26
|
+
"""
|
|
27
|
+
Initialize dependency manager.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
memory_cache: Optional shared memory cache for step outputs
|
|
31
|
+
"""
|
|
32
|
+
self.memory = memory_cache or {}
|
|
33
|
+
self.waiting_steps: Dict[str, List[str]] = {} # step_id -> [dep_ids]
|
|
34
|
+
logger.info("Dependency manager initialized")
|
|
35
|
+
|
|
36
|
+
async def wait_for(
|
|
37
|
+
self,
|
|
38
|
+
dependencies: List[str],
|
|
39
|
+
memory: Dict[str, Any],
|
|
40
|
+
timeout: float = 300.0
|
|
41
|
+
) -> Dict[str, Any]:
|
|
42
|
+
"""
|
|
43
|
+
Wait for dependencies to be satisfied.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
dependencies: List of step IDs this step depends on
|
|
47
|
+
memory: Workflow memory containing step outputs
|
|
48
|
+
timeout: Maximum time to wait in seconds
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Dictionary of dependency_id -> output
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
TimeoutError: If dependencies not satisfied within timeout
|
|
55
|
+
ValueError: If required dependency not found
|
|
56
|
+
|
|
57
|
+
Example:
|
|
58
|
+
# Step 2 depends on step 1
|
|
59
|
+
deps = await manager.wait_for(['step1'], memory)
|
|
60
|
+
input_data = deps['step1']['output']
|
|
61
|
+
"""
|
|
62
|
+
if not dependencies:
|
|
63
|
+
return {}
|
|
64
|
+
|
|
65
|
+
logger.info(f"Waiting for {len(dependencies)} dependencies: {dependencies}")
|
|
66
|
+
|
|
67
|
+
start_time = asyncio.get_event_loop().time()
|
|
68
|
+
resolved = {}
|
|
69
|
+
|
|
70
|
+
for dep_id in dependencies:
|
|
71
|
+
# Check if already in memory
|
|
72
|
+
if dep_id in memory:
|
|
73
|
+
resolved[dep_id] = memory[dep_id]
|
|
74
|
+
logger.debug(f"Dependency {dep_id} found in memory")
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
# Wait for dependency to appear in memory
|
|
78
|
+
logger.info(f"Waiting for dependency: {dep_id}")
|
|
79
|
+
while dep_id not in memory:
|
|
80
|
+
if asyncio.get_event_loop().time() - start_time > timeout:
|
|
81
|
+
raise TimeoutError(
|
|
82
|
+
f"Dependency {dep_id} not satisfied within {timeout}s"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
await asyncio.sleep(0.5) # Poll every 500ms
|
|
86
|
+
|
|
87
|
+
resolved[dep_id] = memory[dep_id]
|
|
88
|
+
logger.debug(f"Dependency {dep_id} satisfied")
|
|
89
|
+
|
|
90
|
+
logger.info(f"All dependencies satisfied: {list(resolved.keys())}")
|
|
91
|
+
return resolved
|
|
92
|
+
|
|
93
|
+
def check_dependencies(
|
|
94
|
+
self,
|
|
95
|
+
dependencies: List[str],
|
|
96
|
+
memory: Dict[str, Any]
|
|
97
|
+
) -> tuple[bool, List[str]]:
|
|
98
|
+
"""
|
|
99
|
+
Check if dependencies are satisfied (non-blocking).
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
dependencies: List of step IDs to check
|
|
103
|
+
memory: Workflow memory
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Tuple of (all_satisfied, missing_deps)
|
|
107
|
+
"""
|
|
108
|
+
if not dependencies:
|
|
109
|
+
return True, []
|
|
110
|
+
|
|
111
|
+
missing = [dep for dep in dependencies if dep not in memory]
|
|
112
|
+
|
|
113
|
+
if missing:
|
|
114
|
+
logger.debug(f"Missing dependencies: {missing}")
|
|
115
|
+
return False, missing
|
|
116
|
+
|
|
117
|
+
return True, []
|
|
118
|
+
|
|
119
|
+
def register_waiting(self, step_id: str, dependencies: List[str]):
|
|
120
|
+
"""
|
|
121
|
+
Register a step as waiting for dependencies.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
step_id: Step that is waiting
|
|
125
|
+
dependencies: List of dependency step IDs
|
|
126
|
+
"""
|
|
127
|
+
self.waiting_steps[step_id] = dependencies
|
|
128
|
+
logger.debug(f"Step {step_id} waiting for: {dependencies}")
|
|
129
|
+
|
|
130
|
+
def resolve_step(self, step_id: str):
|
|
131
|
+
"""
|
|
132
|
+
Mark a step as resolved (completed).
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
step_id: Step that has been completed
|
|
136
|
+
"""
|
|
137
|
+
if step_id in self.waiting_steps:
|
|
138
|
+
del self.waiting_steps[step_id]
|
|
139
|
+
logger.debug(f"Step {step_id} resolved")
|
|
140
|
+
|
|
141
|
+
def get_waiting_steps(self) -> Dict[str, List[str]]:
|
|
142
|
+
"""Get all steps currently waiting for dependencies."""
|
|
143
|
+
return self.waiting_steps.copy()
|