sunholo 0.144.11__py3-none-any.whl → 0.145.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sunholo/agents/fastapi/vac_routes.py +14 -5
- sunholo/invoke/async_task_runner.py +619 -0
- sunholo/mcp/vac_mcp_server_fastmcp.py +14 -3
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/METADATA +1 -1
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/RECORD +9 -8
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/WHEEL +0 -0
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/entry_points.txt +0 -0
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/licenses/LICENSE.txt +0 -0
- {sunholo-0.144.11.dist-info → sunholo-0.145.0.dist-info}/top_level.txt +0 -0
@@ -395,11 +395,14 @@ class VACRoutesFastAPI:
|
|
395
395
|
## MCP Server Integration
|
396
396
|
|
397
397
|
When VACMCPServer is available, the following happens automatically:
|
398
|
-
1. MCP server is mounted at /mcp endpoint
|
398
|
+
1. MCP server is mounted at /mcp/mcp endpoint (NOTE: /mcp/mcp not /mcp!)
|
399
399
|
2. Built-in VAC tools are automatically registered:
|
400
400
|
- vac_stream, vac_query, list_available_vacs, get_vac_info
|
401
401
|
3. You can add custom MCP tools using add_mcp_tool()
|
402
|
-
4. Claude Desktop/Code can connect to http://your-server/mcp
|
402
|
+
4. Claude Desktop/Code can connect to http://your-server/mcp/mcp
|
403
|
+
|
404
|
+
IMPORTANT: The endpoint is /mcp/mcp to avoid the MCP app intercepting other routes.
|
405
|
+
DO NOT change the mounting point to "" (root) as it will break other FastAPI routes!
|
403
406
|
|
404
407
|
## Complete Example
|
405
408
|
|
@@ -696,9 +699,15 @@ class VACRoutesFastAPI:
|
|
696
699
|
# so we can't easily check if it's configured. The error will be
|
697
700
|
# caught below if lifespan is missing.
|
698
701
|
|
699
|
-
# Mount at
|
700
|
-
|
701
|
-
|
702
|
+
# CRITICAL: Mount at /mcp/mcp (NOT at root "")
|
703
|
+
# - The MCP app from get_http_app() has path="" configured
|
704
|
+
# - Mounting at "/mcp/mcp" creates the /mcp/mcp endpoint
|
705
|
+
# - This prevents the MCP app from intercepting other routes like /info, /test
|
706
|
+
#
|
707
|
+
# DO NOT mount at "" (root) - it will break all other FastAPI routes!
|
708
|
+
# The endpoint will be available at /mcp/mcp as expected by Claude Code
|
709
|
+
self.app.mount("/mcp/mcp", mcp_app)
|
710
|
+
log.info("✅ MCP server mounted at /mcp/mcp endpoint")
|
702
711
|
|
703
712
|
except RuntimeError as e:
|
704
713
|
if "Task group is not initialized" in str(e):
|
@@ -0,0 +1,619 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Callable, Any, AsyncGenerator, Dict, Optional, Union
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
import time
|
5
|
+
import traceback
|
6
|
+
import logging
|
7
|
+
from ..custom_logging import setup_logging
|
8
|
+
from tenacity import AsyncRetrying, retry_if_exception_type, wait_random_exponential, stop_after_attempt
|
9
|
+
|
10
|
+
log = setup_logging("sunholo_AsyncTaskRunner")
|
11
|
+
|
12
|
+
@dataclass
|
13
|
+
class CallbackContext:
|
14
|
+
"""Context passed to callbacks with task information and shared state."""
|
15
|
+
task_name: str
|
16
|
+
elapsed_time: float = 0
|
17
|
+
task_metadata: Dict[str, Any] = field(default_factory=dict)
|
18
|
+
shared_state: Dict[str, Any] = field(default_factory=dict)
|
19
|
+
result: Any = None
|
20
|
+
error: Exception = None
|
21
|
+
retry_attempt: int = 0
|
22
|
+
message_type: str = ""
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class TaskConfig:
|
26
|
+
"""Per-task configuration for timeout, retry, and callbacks."""
|
27
|
+
timeout: Optional[int] = None
|
28
|
+
retry_enabled: Optional[bool] = None
|
29
|
+
retry_kwargs: Optional[dict] = None
|
30
|
+
heartbeat_extends_timeout: Optional[bool] = None
|
31
|
+
hard_timeout: Optional[int] = None
|
32
|
+
callbacks: Optional[Dict[str, Callable]] = None
|
33
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
34
|
+
|
35
|
+
class AsyncTaskRunner:
|
36
|
+
def __init__(self,
|
37
|
+
retry_enabled: bool = False,
|
38
|
+
retry_kwargs: dict = None,
|
39
|
+
timeout: int = 120,
|
40
|
+
max_concurrency: int = 20,
|
41
|
+
heartbeat_extends_timeout: bool = False,
|
42
|
+
hard_timeout: int = None,
|
43
|
+
callbacks: Optional[Dict[str, Callable]] = None,
|
44
|
+
shared_state: Optional[Dict[str, Any]] = None,
|
45
|
+
use_default_callbacks: bool = True,
|
46
|
+
verbose: bool = True):
|
47
|
+
"""
|
48
|
+
Initialize AsyncTaskRunner with configurable timeout behavior and callbacks.
|
49
|
+
|
50
|
+
By default, AsyncTaskRunner uses built-in callbacks that automatically manage task state,
|
51
|
+
making it easy to use without any configuration. Just create, add tasks, and get results!
|
52
|
+
|
53
|
+
Args:
|
54
|
+
retry_enabled: Whether to enable retries globally
|
55
|
+
retry_kwargs: Global retry configuration for tenacity
|
56
|
+
timeout: Base timeout for tasks in seconds (default: 120)
|
57
|
+
max_concurrency: Maximum concurrent tasks (default: 20)
|
58
|
+
heartbeat_extends_timeout: If True, heartbeats reset the timeout timer
|
59
|
+
hard_timeout: Maximum absolute timeout regardless of heartbeats (seconds).
|
60
|
+
If None, defaults to timeout * 5 when heartbeat_extends_timeout=True
|
61
|
+
callbacks: Dict of custom callbacks to override defaults:
|
62
|
+
- on_heartbeat: async (context: CallbackContext) -> None
|
63
|
+
- on_task_start: async (context: CallbackContext) -> None
|
64
|
+
- on_task_complete: async (context: CallbackContext) -> None
|
65
|
+
- on_task_error: async (context: CallbackContext) -> None
|
66
|
+
- on_retry: async (context: CallbackContext) -> None
|
67
|
+
- on_timeout: async (context: CallbackContext) -> None
|
68
|
+
shared_state: Custom shared state dict. If None, creates default structure with:
|
69
|
+
- results: Dict[str, Any] - Task results by task name
|
70
|
+
- errors: Dict[str, str] - Error messages by task name
|
71
|
+
- completed: List[str] - Completed task names
|
72
|
+
- started: List[str] - Started task names
|
73
|
+
- retries: List[str] - Retry attempt records
|
74
|
+
- timed_out: List[str] - Timed out task names
|
75
|
+
use_default_callbacks: If True (default), use built-in callbacks that:
|
76
|
+
- Automatically populate shared_state with results and errors
|
77
|
+
- Log task progress with emojis (🚀 start, ✅ complete, ❌ error, etc.)
|
78
|
+
- Track task lifecycle (started, completed, retried, timed out)
|
79
|
+
Set to False for full manual control
|
80
|
+
verbose: If True (default), default callbacks print status messages.
|
81
|
+
If False, default callbacks work silently (still populate state)
|
82
|
+
|
83
|
+
Default Callbacks Behavior:
|
84
|
+
When use_default_callbacks=True (default), the following happens automatically:
|
85
|
+
- on_task_start: Adds task to 'started' list, logs "🚀 Starting task: {name}"
|
86
|
+
- on_task_complete: Stores result in 'results', adds to 'completed', logs "✅ {name} completed: {result}"
|
87
|
+
- on_task_error: Stores error in 'errors' (truncated to 500 chars), logs "❌ {name} failed: {error}"
|
88
|
+
- on_retry: Tracks retry attempts in 'retries', logs "🔄 Retry #{n} for {name}"
|
89
|
+
- on_timeout: Adds to 'timed_out', stores timeout error, logs "⏱️ {name} timed out"
|
90
|
+
- on_heartbeat: Silent by default (only logs in DEBUG mode)
|
91
|
+
|
92
|
+
Examples:
|
93
|
+
# Simplest usage - everything automatic
|
94
|
+
>>> runner = AsyncTaskRunner()
|
95
|
+
>>> runner.add_task(fetch_data, "api_endpoint")
|
96
|
+
>>> results = await runner.get_aggregated_results()
|
97
|
+
>>> print(results['results']) # {'fetch_data': 'data from api'}
|
98
|
+
|
99
|
+
# Silent mode - no console output but still collects results
|
100
|
+
>>> runner = AsyncTaskRunner(verbose=False)
|
101
|
+
|
102
|
+
# Override just one callback, keep rest as defaults
|
103
|
+
>>> runner = AsyncTaskRunner(
|
104
|
+
... callbacks={'on_task_complete': my_custom_complete_handler}
|
105
|
+
... )
|
106
|
+
|
107
|
+
# Full manual control - no default callbacks
|
108
|
+
>>> runner = AsyncTaskRunner(use_default_callbacks=False)
|
109
|
+
"""
|
110
|
+
self.tasks = []
|
111
|
+
self.retry_enabled = retry_enabled
|
112
|
+
self.retry_kwargs = retry_kwargs or {}
|
113
|
+
self.timeout = timeout
|
114
|
+
self.semaphore = asyncio.Semaphore(max_concurrency)
|
115
|
+
self.heartbeat_extends_timeout = heartbeat_extends_timeout
|
116
|
+
self.verbose = verbose
|
117
|
+
|
118
|
+
# Initialize default shared_state structure if not provided
|
119
|
+
if shared_state is None:
|
120
|
+
self.shared_state = {
|
121
|
+
'results': {},
|
122
|
+
'errors': {},
|
123
|
+
'completed': [],
|
124
|
+
'started': [],
|
125
|
+
'retries': [],
|
126
|
+
'timed_out': []
|
127
|
+
}
|
128
|
+
else:
|
129
|
+
self.shared_state = shared_state
|
130
|
+
# Ensure basic keys exist even in custom shared_state
|
131
|
+
self.shared_state.setdefault('results', {})
|
132
|
+
self.shared_state.setdefault('errors', {})
|
133
|
+
self.shared_state.setdefault('completed', [])
|
134
|
+
|
135
|
+
# Set up callbacks
|
136
|
+
self.global_callbacks = self._setup_callbacks(callbacks, use_default_callbacks)
|
137
|
+
|
138
|
+
# Set hard timeout
|
139
|
+
if hard_timeout is not None:
|
140
|
+
self.hard_timeout = hard_timeout
|
141
|
+
elif heartbeat_extends_timeout:
|
142
|
+
self.hard_timeout = timeout * 5 # Default to 5x base timeout
|
143
|
+
else:
|
144
|
+
self.hard_timeout = timeout # Same as regular timeout
|
145
|
+
|
146
|
+
def _setup_callbacks(self, user_callbacks: Optional[Dict[str, Callable]], use_defaults: bool) -> Dict[str, Callable]:
|
147
|
+
"""Setup callbacks, using defaults if requested and filling in any missing callbacks."""
|
148
|
+
callbacks = {}
|
149
|
+
|
150
|
+
if use_defaults:
|
151
|
+
# Define default callbacks
|
152
|
+
async def default_on_task_start(ctx: CallbackContext):
|
153
|
+
"""Default callback for task start."""
|
154
|
+
ctx.shared_state.setdefault('started', []).append(ctx.task_name)
|
155
|
+
if self.verbose:
|
156
|
+
log.info(f"🚀 Starting task: {ctx.task_name}")
|
157
|
+
|
158
|
+
async def default_on_task_complete(ctx: CallbackContext):
|
159
|
+
"""Default callback for task completion."""
|
160
|
+
ctx.shared_state.setdefault('results', {})[ctx.task_name] = ctx.result
|
161
|
+
ctx.shared_state.setdefault('completed', []).append(ctx.task_name)
|
162
|
+
if self.verbose:
|
163
|
+
log.info(f"✅ {ctx.task_name} completed: {ctx.result}")
|
164
|
+
|
165
|
+
async def default_on_task_error(ctx: CallbackContext):
|
166
|
+
"""Default callback for task errors."""
|
167
|
+
# Store truncated error to avoid huge state
|
168
|
+
error_str = str(ctx.error)[:500] if ctx.error else "Unknown error"
|
169
|
+
ctx.shared_state.setdefault('errors', {})[ctx.task_name] = error_str
|
170
|
+
if self.verbose:
|
171
|
+
log.warning(f"❌ {ctx.task_name} failed: {error_str[:100]}")
|
172
|
+
|
173
|
+
async def default_on_retry(ctx: CallbackContext):
|
174
|
+
"""Default callback for retry attempts."""
|
175
|
+
retry_info = f"{ctx.task_name}_attempt_{ctx.retry_attempt}"
|
176
|
+
ctx.shared_state.setdefault('retries', []).append(retry_info)
|
177
|
+
if self.verbose:
|
178
|
+
log.info(f"🔄 Retry #{ctx.retry_attempt} for {ctx.task_name}")
|
179
|
+
|
180
|
+
async def default_on_timeout(ctx: CallbackContext):
|
181
|
+
"""Default callback for timeouts."""
|
182
|
+
ctx.shared_state.setdefault('timed_out', []).append(ctx.task_name)
|
183
|
+
ctx.shared_state.setdefault('errors', {})[ctx.task_name] = f"Timeout after {ctx.elapsed_time}s"
|
184
|
+
if self.verbose:
|
185
|
+
log.warning(f"⏱️ {ctx.task_name} timed out after {ctx.elapsed_time}s")
|
186
|
+
|
187
|
+
async def default_on_heartbeat(ctx: CallbackContext):
|
188
|
+
"""Default callback for heartbeats - only log in debug mode."""
|
189
|
+
if log.isEnabledFor(logging.DEBUG):
|
190
|
+
log.debug(f"💓 Heartbeat for {ctx.task_name}: {ctx.elapsed_time}s")
|
191
|
+
|
192
|
+
# Set default callbacks
|
193
|
+
callbacks = {
|
194
|
+
'on_task_start': default_on_task_start,
|
195
|
+
'on_task_complete': default_on_task_complete,
|
196
|
+
'on_task_error': default_on_task_error,
|
197
|
+
'on_retry': default_on_retry,
|
198
|
+
'on_timeout': default_on_timeout,
|
199
|
+
'on_heartbeat': default_on_heartbeat
|
200
|
+
}
|
201
|
+
|
202
|
+
# Override with user callbacks if provided
|
203
|
+
if user_callbacks:
|
204
|
+
callbacks.update(user_callbacks)
|
205
|
+
|
206
|
+
return callbacks
|
207
|
+
|
208
|
+
def add_task(self,
|
209
|
+
func: Callable[..., Any],
|
210
|
+
*args: Any,
|
211
|
+
task_config: Optional[TaskConfig] = None,
|
212
|
+
**kwargs: Any):
|
213
|
+
"""
|
214
|
+
Adds a task to the list of tasks to be executed, with optional per-task configuration.
|
215
|
+
|
216
|
+
Args:
|
217
|
+
func: The function to be executed.
|
218
|
+
*args: Positional arguments for the function.
|
219
|
+
task_config: Optional per-task configuration for timeout, retry, and callbacks.
|
220
|
+
**kwargs: Keyword arguments for the function.
|
221
|
+
"""
|
222
|
+
log.info(f"Adding task: {func.__name__} with args: {args}, kwargs: {kwargs}, config: {task_config}")
|
223
|
+
self.tasks.append((func.__name__, func, args, kwargs, task_config))
|
224
|
+
|
225
|
+
async def run_async_with_callbacks(self) -> AsyncGenerator[CallbackContext, None]:
|
226
|
+
"""
|
227
|
+
Runs all tasks and automatically processes messages through callbacks.
|
228
|
+
Yields CallbackContext after each callback invocation for monitoring.
|
229
|
+
"""
|
230
|
+
async for message in self.run_async_as_completed():
|
231
|
+
context = await self._process_message_with_callbacks(message)
|
232
|
+
if context:
|
233
|
+
yield context
|
234
|
+
|
235
|
+
async def _process_message_with_callbacks(self, message: Dict[str, Any]) -> Optional[CallbackContext]:
|
236
|
+
"""Process a message and invoke appropriate callbacks."""
|
237
|
+
message_type = message.get('type')
|
238
|
+
func_name = message.get('func_name') or message.get('name', 'unknown')
|
239
|
+
|
240
|
+
# Find task config for this function
|
241
|
+
task_config = None
|
242
|
+
task_metadata = {}
|
243
|
+
for name, _, args, kwargs, config in self.tasks:
|
244
|
+
if name == func_name:
|
245
|
+
task_config = config
|
246
|
+
task_metadata = {'args': args, 'kwargs': kwargs}
|
247
|
+
if config and config.metadata:
|
248
|
+
task_metadata.update(config.metadata)
|
249
|
+
break
|
250
|
+
|
251
|
+
# Create callback context
|
252
|
+
context = CallbackContext(
|
253
|
+
task_name=func_name,
|
254
|
+
elapsed_time=message.get('elapsed_time', 0),
|
255
|
+
task_metadata=task_metadata,
|
256
|
+
shared_state=self.shared_state,
|
257
|
+
message_type=message_type
|
258
|
+
)
|
259
|
+
|
260
|
+
# Determine which callback to use (task-specific overrides global)
|
261
|
+
callback = None
|
262
|
+
task_callbacks = task_config.callbacks if task_config and task_config.callbacks else {}
|
263
|
+
|
264
|
+
if message_type == 'heartbeat':
|
265
|
+
callback = task_callbacks.get('on_heartbeat') or self.global_callbacks.get('on_heartbeat')
|
266
|
+
context.elapsed_time = message.get('elapsed_time', 0)
|
267
|
+
|
268
|
+
elif message_type == 'task_complete':
|
269
|
+
callback = task_callbacks.get('on_task_complete') or self.global_callbacks.get('on_task_complete')
|
270
|
+
context.result = message.get('result')
|
271
|
+
|
272
|
+
elif message_type == 'task_error':
|
273
|
+
callback = task_callbacks.get('on_task_error') or self.global_callbacks.get('on_task_error')
|
274
|
+
context.error = message.get('error')
|
275
|
+
|
276
|
+
elif message_type == 'task_start':
|
277
|
+
callback = task_callbacks.get('on_task_start') or self.global_callbacks.get('on_task_start')
|
278
|
+
|
279
|
+
elif message_type == 'retry':
|
280
|
+
callback = task_callbacks.get('on_retry') or self.global_callbacks.get('on_retry')
|
281
|
+
context.retry_attempt = message.get('retry_attempt', 0)
|
282
|
+
context.error = message.get('error')
|
283
|
+
|
284
|
+
elif message_type == 'timeout':
|
285
|
+
callback = task_callbacks.get('on_timeout') or self.global_callbacks.get('on_timeout')
|
286
|
+
context.elapsed_time = message.get('elapsed_time', self.timeout)
|
287
|
+
|
288
|
+
# Invoke callback if found
|
289
|
+
if callback and asyncio.iscoroutinefunction(callback):
|
290
|
+
try:
|
291
|
+
await callback(context)
|
292
|
+
return context
|
293
|
+
except Exception as e:
|
294
|
+
log.error(f"Error in callback for {message_type}: {e}\n{traceback.format_exc()}")
|
295
|
+
|
296
|
+
return context if callback else None
|
297
|
+
|
298
|
+
async def get_aggregated_results(self) -> Dict[str, Any]:
|
299
|
+
"""
|
300
|
+
Run all tasks with callbacks and return the shared_state with aggregated results.
|
301
|
+
|
302
|
+
This is a convenience method that runs all tasks and returns the populated shared_state.
|
303
|
+
When using default callbacks, the returned dict will contain:
|
304
|
+
- results: Dict[str, Any] with task results keyed by task name
|
305
|
+
- errors: Dict[str, str] with error messages for failed tasks
|
306
|
+
- completed: List[str] of completed task names
|
307
|
+
- started: List[str] of started task names
|
308
|
+
- retries: List[str] of retry attempt records
|
309
|
+
- timed_out: List[str] of timed out task names
|
310
|
+
|
311
|
+
Returns:
|
312
|
+
Dict containing the shared_state with all task results and metadata
|
313
|
+
|
314
|
+
Example:
|
315
|
+
>>> runner = AsyncTaskRunner()
|
316
|
+
>>> runner.add_task(fetch_data, "api")
|
317
|
+
>>> runner.add_task(process_data, "raw_data")
|
318
|
+
>>> results = await runner.get_aggregated_results()
|
319
|
+
>>> print(results['results']['fetch_data']) # Access specific result
|
320
|
+
>>> if results['errors']: # Check for any errors
|
321
|
+
... print(f"Errors occurred: {results['errors']}")
|
322
|
+
"""
|
323
|
+
async for _ in self.run_async_with_callbacks():
|
324
|
+
pass # Callbacks handle state updates
|
325
|
+
|
326
|
+
return self.shared_state
|
327
|
+
|
328
|
+
async def run_async_as_completed(self) -> AsyncGenerator[Dict[str, Any], None]:
|
329
|
+
"""
|
330
|
+
Runs all tasks concurrently and yields results as they complete,
|
331
|
+
while periodically sending heartbeat messages.
|
332
|
+
|
333
|
+
This is the low-level API that yields raw messages.
|
334
|
+
For a higher-level API with automatic callback processing, use run_async_with_callbacks().
|
335
|
+
"""
|
336
|
+
log.info("Running tasks asynchronously and yielding results as they complete")
|
337
|
+
queue = asyncio.Queue()
|
338
|
+
task_infos = []
|
339
|
+
|
340
|
+
for name, func, args, kwargs, config in self.tasks:
|
341
|
+
log.info(f"Executing task: {name=}, {func=} with args: {args}, kwargs: {kwargs}, config: {config}")
|
342
|
+
completion_event = asyncio.Event()
|
343
|
+
last_heartbeat = {'time': time.time()} # Shared mutable object for heartbeat tracking
|
344
|
+
|
345
|
+
# Send task_start message
|
346
|
+
await queue.put({'type': 'task_start', 'func_name': name})
|
347
|
+
|
348
|
+
task_coro = self._run_with_retries_and_timeout(name, func, args, kwargs, config, queue, completion_event, last_heartbeat)
|
349
|
+
task = asyncio.create_task(task_coro)
|
350
|
+
heartbeat_coro = self._send_heartbeat(name, config, completion_event, queue, last_heartbeat)
|
351
|
+
heartbeat_task = asyncio.create_task(heartbeat_coro)
|
352
|
+
task_infos.append({
|
353
|
+
'name': name,
|
354
|
+
'task': task,
|
355
|
+
'heartbeat_task': heartbeat_task,
|
356
|
+
'completion_event': completion_event
|
357
|
+
})
|
358
|
+
log.info(f"Started task '{name}' and its heartbeat")
|
359
|
+
|
360
|
+
log.info(f"Started async run with {len(self.tasks)} tasks and heartbeats")
|
361
|
+
monitor = asyncio.create_task(self._monitor_tasks(task_infos, queue))
|
362
|
+
|
363
|
+
while True:
|
364
|
+
message = await queue.get()
|
365
|
+
if message is None:
|
366
|
+
log.info("Received sentinel. Exiting message loop.")
|
367
|
+
break
|
368
|
+
log.info(f"Received message from queue: {message}")
|
369
|
+
yield message
|
370
|
+
|
371
|
+
await monitor
|
372
|
+
log.info("All tasks and heartbeats have completed")
|
373
|
+
|
374
|
+
async def _monitor_tasks(self, task_infos, queue):
|
375
|
+
"""
|
376
|
+
Monitors the tasks and heartbeats, and sends a sentinel to the queue when done.
|
377
|
+
"""
|
378
|
+
# Wait for all main tasks to complete
|
379
|
+
main_tasks = [info['task'] for info in task_infos]
|
380
|
+
log.info("Monitor: Waiting for all main tasks to complete")
|
381
|
+
await asyncio.gather(*main_tasks, return_exceptions=True)
|
382
|
+
log.info("Monitor: All main tasks have completed")
|
383
|
+
|
384
|
+
# Cancel all heartbeat tasks
|
385
|
+
for info in task_infos:
|
386
|
+
info['heartbeat_task'].cancel()
|
387
|
+
try:
|
388
|
+
await info['heartbeat_task']
|
389
|
+
except asyncio.CancelledError:
|
390
|
+
pass
|
391
|
+
log.info(f"Monitor: Heartbeat for task '{info['name']}' has been canceled")
|
392
|
+
|
393
|
+
await queue.put(None)
|
394
|
+
log.info("Monitor: Sent sentinel to queue")
|
395
|
+
|
396
|
+
async def _run_with_retries_and_timeout(self,
|
397
|
+
name: str,
|
398
|
+
func: Callable[..., Any],
|
399
|
+
args: tuple,
|
400
|
+
kwargs: dict,
|
401
|
+
config: Optional[TaskConfig],
|
402
|
+
queue: asyncio.Queue,
|
403
|
+
completion_event: asyncio.Event,
|
404
|
+
last_heartbeat: dict) -> None:
|
405
|
+
# Determine effective configuration (per-task overrides global)
|
406
|
+
retry_enabled = config.retry_enabled if config and config.retry_enabled is not None else self.retry_enabled
|
407
|
+
retry_kwargs = config.retry_kwargs if config and config.retry_kwargs else self.retry_kwargs
|
408
|
+
timeout = config.timeout if config and config.timeout is not None else self.timeout
|
409
|
+
heartbeat_extends = config.heartbeat_extends_timeout if config and config.heartbeat_extends_timeout is not None else self.heartbeat_extends_timeout
|
410
|
+
|
411
|
+
# Calculate hard_timeout based on effective settings
|
412
|
+
if config and config.hard_timeout is not None:
|
413
|
+
hard_timeout = config.hard_timeout
|
414
|
+
elif heartbeat_extends:
|
415
|
+
hard_timeout = timeout * 5 # Default to 5x the effective timeout when heartbeat extends
|
416
|
+
else:
|
417
|
+
hard_timeout = timeout # Same as effective timeout when no heartbeat extension
|
418
|
+
|
419
|
+
try:
|
420
|
+
log.info(f"run_with_retries_and_timeout: {name=}, {func=} with args: {args}, kwargs: {kwargs}")
|
421
|
+
log.info(f"Effective config - timeout: {timeout}s, retry: {retry_enabled}, heartbeat_extends: {heartbeat_extends}, hard_timeout: {hard_timeout}s")
|
422
|
+
|
423
|
+
if retry_enabled:
|
424
|
+
retry_kwargs_final = {
|
425
|
+
'wait': wait_random_exponential(multiplier=1, max=60),
|
426
|
+
'stop': stop_after_attempt(5),
|
427
|
+
'retry': retry_if_exception_type(Exception),
|
428
|
+
}
|
429
|
+
# Override with custom retry kwargs if provided
|
430
|
+
if retry_kwargs:
|
431
|
+
retry_kwargs_final.update(retry_kwargs)
|
432
|
+
|
433
|
+
retry_attempt = 0
|
434
|
+
last_exception = None
|
435
|
+
|
436
|
+
try:
|
437
|
+
async for attempt in AsyncRetrying(**retry_kwargs_final):
|
438
|
+
with attempt:
|
439
|
+
retry_attempt = attempt.retry_state.attempt_number
|
440
|
+
|
441
|
+
# Send retry message for attempts > 1
|
442
|
+
if retry_attempt > 1:
|
443
|
+
await queue.put({
|
444
|
+
'type': 'retry',
|
445
|
+
'func_name': name,
|
446
|
+
'retry_attempt': retry_attempt,
|
447
|
+
'error': str(last_exception) if last_exception else None
|
448
|
+
})
|
449
|
+
|
450
|
+
log.info(f"Starting task '{name}' with retry (attempt {retry_attempt})")
|
451
|
+
|
452
|
+
try:
|
453
|
+
result = await self._execute_task_with_timeout(
|
454
|
+
func, name, last_heartbeat, timeout, heartbeat_extends, hard_timeout, *args, **kwargs
|
455
|
+
)
|
456
|
+
await queue.put({'type': 'task_complete', 'func_name': name, 'result': result})
|
457
|
+
log.info(f"Sent 'task_complete' message for task '{name}'")
|
458
|
+
return
|
459
|
+
except Exception as e:
|
460
|
+
last_exception = e
|
461
|
+
raise # Re-raise to trigger retry
|
462
|
+
except Exception as final_error:
|
463
|
+
# All retries exhausted
|
464
|
+
log.error(f"All retry attempts failed for task '{name}': {final_error}")
|
465
|
+
raise
|
466
|
+
else:
|
467
|
+
log.info(f"Starting task '{name}' with no retry")
|
468
|
+
result = await self._execute_task_with_timeout(
|
469
|
+
func, name, last_heartbeat, timeout, heartbeat_extends, hard_timeout, *args, **kwargs
|
470
|
+
)
|
471
|
+
await queue.put({'type': 'task_complete', 'func_name': name, 'result': result})
|
472
|
+
log.info(f"Sent 'task_complete' message for task '{name}'")
|
473
|
+
except asyncio.TimeoutError as e:
|
474
|
+
log.error(f"Task '{name}' timed out: {e}")
|
475
|
+
await queue.put({
|
476
|
+
'type': 'timeout',
|
477
|
+
'func_name': name,
|
478
|
+
'elapsed_time': timeout,
|
479
|
+
'error': str(e)
|
480
|
+
})
|
481
|
+
await queue.put({'type': 'task_error', 'func_name': name, 'error': str(e)})
|
482
|
+
except Exception as e:
|
483
|
+
log.error(f"Error in task '{name}': {e}\n{traceback.format_exc()}")
|
484
|
+
await queue.put({'type': 'task_error', 'func_name': name, 'error': f'{e}\n{traceback.format_exc()}'})
|
485
|
+
finally:
|
486
|
+
log.info(f"Task '{name}' completed.")
|
487
|
+
completion_event.set()
|
488
|
+
|
489
|
+
async def _execute_task_with_timeout(self,
|
490
|
+
func: Callable[..., Any],
|
491
|
+
name: str,
|
492
|
+
last_heartbeat: dict,
|
493
|
+
timeout: int,
|
494
|
+
heartbeat_extends: bool,
|
495
|
+
hard_timeout: int,
|
496
|
+
*args: Any,
|
497
|
+
**kwargs: Any) -> Any:
|
498
|
+
"""
|
499
|
+
Execute task with either fixed timeout or heartbeat-extendable timeout.
|
500
|
+
"""
|
501
|
+
if not heartbeat_extends:
|
502
|
+
# Original behavior - fixed timeout
|
503
|
+
return await asyncio.wait_for(self._execute_task(func, *args, **kwargs), timeout=timeout)
|
504
|
+
else:
|
505
|
+
# New behavior - heartbeat extends timeout
|
506
|
+
return await self._execute_task_with_heartbeat_timeout(
|
507
|
+
func, name, last_heartbeat, timeout, hard_timeout, *args, **kwargs
|
508
|
+
)
|
509
|
+
|
510
|
+
async def _execute_task_with_heartbeat_timeout(self,
|
511
|
+
func: Callable[..., Any],
|
512
|
+
name: str,
|
513
|
+
last_heartbeat: dict,
|
514
|
+
timeout: int,
|
515
|
+
hard_timeout: int,
|
516
|
+
*args: Any,
|
517
|
+
**kwargs: Any) -> Any:
|
518
|
+
"""
|
519
|
+
Execute task with heartbeat-extendable timeout and hard timeout limit.
|
520
|
+
"""
|
521
|
+
start_time = time.time()
|
522
|
+
task = asyncio.create_task(self._execute_task(func, *args, **kwargs))
|
523
|
+
|
524
|
+
while not task.done():
|
525
|
+
current_time = time.time()
|
526
|
+
|
527
|
+
# Check hard timeout first (absolute limit)
|
528
|
+
if current_time - start_time > hard_timeout:
|
529
|
+
task.cancel()
|
530
|
+
try:
|
531
|
+
await task
|
532
|
+
except asyncio.CancelledError:
|
533
|
+
pass
|
534
|
+
raise asyncio.TimeoutError(f"Hard timeout exceeded ({hard_timeout}s)")
|
535
|
+
|
536
|
+
# Check soft timeout (extends with heartbeats)
|
537
|
+
time_since_heartbeat = current_time - last_heartbeat['time']
|
538
|
+
if time_since_heartbeat > timeout:
|
539
|
+
task.cancel()
|
540
|
+
try:
|
541
|
+
await task
|
542
|
+
except asyncio.CancelledError:
|
543
|
+
pass
|
544
|
+
raise asyncio.TimeoutError(f"Timeout exceeded - no heartbeat for {timeout}s")
|
545
|
+
|
546
|
+
# Wait a bit before checking again
|
547
|
+
try:
|
548
|
+
await asyncio.wait_for(asyncio.shield(task), timeout=1.0)
|
549
|
+
break # Task completed
|
550
|
+
except asyncio.TimeoutError:
|
551
|
+
continue # Check timeouts again
|
552
|
+
|
553
|
+
return await task
|
554
|
+
|
555
|
+
async def _execute_task(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
|
556
|
+
"""
|
557
|
+
Executes the given task function and returns its result.
|
558
|
+
|
559
|
+
Args:
|
560
|
+
func (Callable): The callable to execute.
|
561
|
+
*args: Positional arguments to pass to the callable.
|
562
|
+
**kwargs: Keyword arguments to pass to the callable.
|
563
|
+
|
564
|
+
Returns:
|
565
|
+
Any: The result of the task.
|
566
|
+
"""
|
567
|
+
async with self.semaphore: # Use semaphore to limit concurrent executions
|
568
|
+
if asyncio.iscoroutinefunction(func):
|
569
|
+
return await func(*args, **kwargs)
|
570
|
+
else:
|
571
|
+
return await asyncio.to_thread(func, *args, **kwargs)
|
572
|
+
|
573
|
+
async def _send_heartbeat(self,
|
574
|
+
func_name: str,
|
575
|
+
config: Optional[TaskConfig],
|
576
|
+
completion_event: asyncio.Event,
|
577
|
+
queue: asyncio.Queue,
|
578
|
+
last_heartbeat: dict,
|
579
|
+
interval: int = 2):
|
580
|
+
"""
|
581
|
+
Sends periodic heartbeat updates to indicate the task is still in progress.
|
582
|
+
Updates last_heartbeat time if heartbeat_extends_timeout is enabled.
|
583
|
+
|
584
|
+
Args:
|
585
|
+
func_name (str): The name of the task function.
|
586
|
+
config (Optional[TaskConfig]): Per-task configuration.
|
587
|
+
completion_event (asyncio.Event): Event to signal when the task is completed.
|
588
|
+
queue (asyncio.Queue): The queue to send heartbeat messages to.
|
589
|
+
last_heartbeat (dict): Mutable dict containing the last heartbeat time.
|
590
|
+
interval (int): How frequently to send heartbeat messages (in seconds).
|
591
|
+
"""
|
592
|
+
# Determine if heartbeat extends timeout for this task
|
593
|
+
heartbeat_extends = config.heartbeat_extends_timeout if config and config.heartbeat_extends_timeout is not None else self.heartbeat_extends_timeout
|
594
|
+
|
595
|
+
start_time = time.time()
|
596
|
+
log.info(f"Starting heartbeat for task '{func_name}' with interval {interval} seconds")
|
597
|
+
try:
|
598
|
+
while not completion_event.is_set():
|
599
|
+
await asyncio.sleep(interval)
|
600
|
+
current_time = time.time()
|
601
|
+
elapsed_time = int(current_time - start_time)
|
602
|
+
|
603
|
+
# Update last heartbeat time if heartbeat extends timeout
|
604
|
+
if heartbeat_extends:
|
605
|
+
last_heartbeat['time'] = current_time
|
606
|
+
log.debug(f"Updated heartbeat time for task '{func_name}' at {current_time}")
|
607
|
+
|
608
|
+
heartbeat_message = {
|
609
|
+
'type': 'heartbeat',
|
610
|
+
'name': func_name,
|
611
|
+
'interval': interval,
|
612
|
+
'elapsed_time': elapsed_time
|
613
|
+
}
|
614
|
+
log.info(f"Sending heartbeat for task '{func_name}', running for {elapsed_time} seconds")
|
615
|
+
await queue.put(heartbeat_message)
|
616
|
+
except asyncio.CancelledError:
|
617
|
+
log.info(f"Heartbeat for task '{func_name}' has been canceled")
|
618
|
+
finally:
|
619
|
+
log.info(f"Heartbeat for task '{func_name}' stopped")
|
@@ -72,9 +72,20 @@ class VACMCPServer:
|
|
72
72
|
return self.server
|
73
73
|
|
74
74
|
def get_http_app(self):
|
75
|
-
"""Get the HTTP app for mounting in FastAPI.
|
76
|
-
|
77
|
-
|
75
|
+
"""Get the HTTP app for mounting in FastAPI.
|
76
|
+
|
77
|
+
IMPORTANT: This returns an app with path="" configured.
|
78
|
+
The VACRoutesFastAPI class mounts this at "/mcp/mcp" to create the /mcp/mcp endpoint.
|
79
|
+
|
80
|
+
DO NOT change this to path="/mcp" as that would create double nesting when mounted.
|
81
|
+
DO NOT mount at root "" as that would intercept all other FastAPI routes.
|
82
|
+
|
83
|
+
The correct configuration is:
|
84
|
+
- This method: path=""
|
85
|
+
- VACRoutesFastAPI: mount at "/mcp/mcp"
|
86
|
+
- Result: MCP endpoint at /mcp/mcp without breaking other routes
|
87
|
+
"""
|
88
|
+
return self.server.http_app(path="")
|
78
89
|
|
79
90
|
def add_tool(self, func: Callable, name: str = None, description: str = None):
|
80
91
|
"""
|
@@ -16,7 +16,7 @@ sunholo/agents/swagger.py,sha256=2tzGmpveUMmTREykZvVnDj3j295wyOMu7mUFDnXdY3c,106
|
|
16
16
|
sunholo/agents/fastapi/__init__.py,sha256=f7x7kiEjaNyBiOwJHLJ4vdOiePqkXdI52sIAAHtS-ms,141
|
17
17
|
sunholo/agents/fastapi/base.py,sha256=W-cyF8ZDUH40rc-c-Apw3-_8IIi2e4Y9qRtnoVnsc1Q,2521
|
18
18
|
sunholo/agents/fastapi/qna_routes.py,sha256=lKHkXPmwltu9EH3RMwmD153-J6pE7kWQ4BhBlV3to-s,3864
|
19
|
-
sunholo/agents/fastapi/vac_routes.py,sha256=
|
19
|
+
sunholo/agents/fastapi/vac_routes.py,sha256=5byAgoNXMUXt8tfTRZsJJZkZGuIvCx3wcQLtHH33vP0,61283
|
20
20
|
sunholo/agents/flask/__init__.py,sha256=dEoByI3gDNUOjpX1uVKP7uPjhfFHJubbiaAv3xLopnk,63
|
21
21
|
sunholo/agents/flask/base.py,sha256=vnpxFEOnCmt9humqj-jYPLfJcdwzsop9NorgkJ-tSaU,1756
|
22
22
|
sunholo/agents/flask/vac_routes.py,sha256=kaPUDyIH5KhCgeCEtag97qErGVZfqpY1ZEiX3y1_r-s,57505
|
@@ -102,6 +102,7 @@ sunholo/genai/process_funcs_cls.py,sha256=D6eNrc3vtTZzwdkacZNOSfit499N_o0C5AHspy
|
|
102
102
|
sunholo/genai/safety.py,sha256=mkFDO_BeEgiKjQd9o2I4UxB6XI7a9U-oOFjZ8LGRUC4,1238
|
103
103
|
sunholo/invoke/__init__.py,sha256=o1RhwBGOtVK0MIdD55fAIMCkJsxTksi8GD5uoqVKI-8,184
|
104
104
|
sunholo/invoke/async_class.py,sha256=ZMzxKQtelbYibu9Fac7P9OU3GorH8KxawZxSMv5EO9A,12514
|
105
|
+
sunholo/invoke/async_task_runner.py,sha256=8FjZMKAxFkb6qr0EZlSI2Likc2B7BRNUggf9jebfa9c,30407
|
105
106
|
sunholo/invoke/direct_vac_func.py,sha256=dACx3Zh7uZnuWLIFYiyLoyXUhh5-eUpd2RatDUd9ov8,9753
|
106
107
|
sunholo/invoke/invoke_vac_utils.py,sha256=sJc1edHTHMzMGXjji1N67c3iUaP7BmAL5nj82Qof63M,2053
|
107
108
|
sunholo/langfuse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -123,7 +124,7 @@ sunholo/mcp/mcp_manager.py,sha256=g75vv6XvM24U7uz366slE-p76Qs4AvVcsarHSF9qIvE,50
|
|
123
124
|
sunholo/mcp/sse_utils.py,sha256=LBugTxAIccQmcU2ueKIcvVlR2GjhVajwqHDnVn2s6e8,3173
|
124
125
|
sunholo/mcp/stdio_http_bridge.py,sha256=IunHOtnjKAkRWef3SJnqnAL2r2qBRpCH2k_Q_y0Tdf8,3237
|
125
126
|
sunholo/mcp/vac_mcp_server.py,sha256=MotoCw5lDsxCeVtwh1499yGFku9w-78xXhGkIHTUo3w,838
|
126
|
-
sunholo/mcp/vac_mcp_server_fastmcp.py,sha256=
|
127
|
+
sunholo/mcp/vac_mcp_server_fastmcp.py,sha256=3hOlrUtdw0L_x4pP9ViaQHff5Bw1dwE4YRfOsQBySTc,5016
|
127
128
|
sunholo/mcp/vac_tools.py,sha256=EznRzkWUYiby218kJlLPVq0fOI0aC4jj-oQLFDraWBM,6886
|
128
129
|
sunholo/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
129
130
|
sunholo/ollama/ollama_images.py,sha256=H2cpcNu88R4TwyfL_nnqkQhdvBQ2FPCAy4Ok__0yQmo,2351
|
@@ -182,9 +183,9 @@ sunholo/vertex/init.py,sha256=1OQwcPBKZYBTDPdyU7IM4X4OmiXLdsNV30C-fee2scQ,2875
|
|
182
183
|
sunholo/vertex/memory_tools.py,sha256=tBZxqVZ4InTmdBvLlOYwoSEWu4-kGquc-gxDwZCC4FA,7667
|
183
184
|
sunholo/vertex/safety.py,sha256=S9PgQT1O_BQAkcqauWncRJaydiP8Q_Jzmu9gxYfy1VA,2482
|
184
185
|
sunholo/vertex/type_dict_to_json.py,sha256=uTzL4o9tJRao4u-gJOFcACgWGkBOtqACmb6ihvCErL8,4694
|
185
|
-
sunholo-0.
|
186
|
-
sunholo-0.
|
187
|
-
sunholo-0.
|
188
|
-
sunholo-0.
|
189
|
-
sunholo-0.
|
190
|
-
sunholo-0.
|
186
|
+
sunholo-0.145.0.dist-info/licenses/LICENSE.txt,sha256=SdE3QjnD3GEmqqg9EX3TM9f7WmtOzqS1KJve8rhbYmU,11345
|
187
|
+
sunholo-0.145.0.dist-info/METADATA,sha256=2pEOf1GedxRXJQEgTMJGgwWQkhG3FclpsvyqgzWhHNg,18700
|
188
|
+
sunholo-0.145.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
189
|
+
sunholo-0.145.0.dist-info/entry_points.txt,sha256=bZuN5AIHingMPt4Ro1b_T-FnQvZ3teBes-3OyO0asl4,49
|
190
|
+
sunholo-0.145.0.dist-info/top_level.txt,sha256=wt5tadn5--5JrZsjJz2LceoUvcrIvxjHJe-RxuudxAk,8
|
191
|
+
sunholo-0.145.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|