foundry-mcp 0.8.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of foundry-mcp might be problematic. Click here for more details.

Files changed (153) hide show
  1. foundry_mcp/__init__.py +13 -0
  2. foundry_mcp/cli/__init__.py +67 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +640 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +667 -0
  15. foundry_mcp/cli/commands/session.py +472 -0
  16. foundry_mcp/cli/commands/specs.py +686 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +298 -0
  22. foundry_mcp/cli/logging.py +212 -0
  23. foundry_mcp/cli/main.py +44 -0
  24. foundry_mcp/cli/output.py +122 -0
  25. foundry_mcp/cli/registry.py +110 -0
  26. foundry_mcp/cli/resilience.py +178 -0
  27. foundry_mcp/cli/transcript.py +217 -0
  28. foundry_mcp/config.py +1454 -0
  29. foundry_mcp/core/__init__.py +144 -0
  30. foundry_mcp/core/ai_consultation.py +1773 -0
  31. foundry_mcp/core/batch_operations.py +1202 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/health.py +749 -0
  40. foundry_mcp/core/intake.py +933 -0
  41. foundry_mcp/core/journal.py +700 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1376 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +146 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +387 -0
  57. foundry_mcp/core/prometheus.py +564 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +691 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
  61. foundry_mcp/core/prompts/plan_review.py +627 -0
  62. foundry_mcp/core/providers/__init__.py +237 -0
  63. foundry_mcp/core/providers/base.py +515 -0
  64. foundry_mcp/core/providers/claude.py +472 -0
  65. foundry_mcp/core/providers/codex.py +637 -0
  66. foundry_mcp/core/providers/cursor_agent.py +630 -0
  67. foundry_mcp/core/providers/detectors.py +515 -0
  68. foundry_mcp/core/providers/gemini.py +426 -0
  69. foundry_mcp/core/providers/opencode.py +718 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +308 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +857 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/research/__init__.py +68 -0
  78. foundry_mcp/core/research/memory.py +528 -0
  79. foundry_mcp/core/research/models.py +1234 -0
  80. foundry_mcp/core/research/providers/__init__.py +40 -0
  81. foundry_mcp/core/research/providers/base.py +242 -0
  82. foundry_mcp/core/research/providers/google.py +507 -0
  83. foundry_mcp/core/research/providers/perplexity.py +442 -0
  84. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  85. foundry_mcp/core/research/providers/tavily.py +383 -0
  86. foundry_mcp/core/research/workflows/__init__.py +25 -0
  87. foundry_mcp/core/research/workflows/base.py +298 -0
  88. foundry_mcp/core/research/workflows/chat.py +271 -0
  89. foundry_mcp/core/research/workflows/consensus.py +539 -0
  90. foundry_mcp/core/research/workflows/deep_research.py +4142 -0
  91. foundry_mcp/core/research/workflows/ideate.py +682 -0
  92. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  93. foundry_mcp/core/resilience.py +600 -0
  94. foundry_mcp/core/responses.py +1624 -0
  95. foundry_mcp/core/review.py +366 -0
  96. foundry_mcp/core/security.py +438 -0
  97. foundry_mcp/core/spec.py +4119 -0
  98. foundry_mcp/core/task.py +2463 -0
  99. foundry_mcp/core/testing.py +839 -0
  100. foundry_mcp/core/validation.py +2357 -0
  101. foundry_mcp/dashboard/__init__.py +32 -0
  102. foundry_mcp/dashboard/app.py +119 -0
  103. foundry_mcp/dashboard/components/__init__.py +17 -0
  104. foundry_mcp/dashboard/components/cards.py +88 -0
  105. foundry_mcp/dashboard/components/charts.py +177 -0
  106. foundry_mcp/dashboard/components/filters.py +136 -0
  107. foundry_mcp/dashboard/components/tables.py +195 -0
  108. foundry_mcp/dashboard/data/__init__.py +11 -0
  109. foundry_mcp/dashboard/data/stores.py +433 -0
  110. foundry_mcp/dashboard/launcher.py +300 -0
  111. foundry_mcp/dashboard/views/__init__.py +12 -0
  112. foundry_mcp/dashboard/views/errors.py +217 -0
  113. foundry_mcp/dashboard/views/metrics.py +164 -0
  114. foundry_mcp/dashboard/views/overview.py +96 -0
  115. foundry_mcp/dashboard/views/providers.py +83 -0
  116. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  117. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  118. foundry_mcp/prompts/__init__.py +9 -0
  119. foundry_mcp/prompts/workflows.py +525 -0
  120. foundry_mcp/resources/__init__.py +9 -0
  121. foundry_mcp/resources/specs.py +591 -0
  122. foundry_mcp/schemas/__init__.py +38 -0
  123. foundry_mcp/schemas/intake-schema.json +89 -0
  124. foundry_mcp/schemas/sdd-spec-schema.json +414 -0
  125. foundry_mcp/server.py +150 -0
  126. foundry_mcp/tools/__init__.py +10 -0
  127. foundry_mcp/tools/unified/__init__.py +92 -0
  128. foundry_mcp/tools/unified/authoring.py +3620 -0
  129. foundry_mcp/tools/unified/context_helpers.py +98 -0
  130. foundry_mcp/tools/unified/documentation_helpers.py +268 -0
  131. foundry_mcp/tools/unified/environment.py +1341 -0
  132. foundry_mcp/tools/unified/error.py +479 -0
  133. foundry_mcp/tools/unified/health.py +225 -0
  134. foundry_mcp/tools/unified/journal.py +841 -0
  135. foundry_mcp/tools/unified/lifecycle.py +640 -0
  136. foundry_mcp/tools/unified/metrics.py +777 -0
  137. foundry_mcp/tools/unified/plan.py +876 -0
  138. foundry_mcp/tools/unified/pr.py +294 -0
  139. foundry_mcp/tools/unified/provider.py +589 -0
  140. foundry_mcp/tools/unified/research.py +1283 -0
  141. foundry_mcp/tools/unified/review.py +1042 -0
  142. foundry_mcp/tools/unified/review_helpers.py +314 -0
  143. foundry_mcp/tools/unified/router.py +102 -0
  144. foundry_mcp/tools/unified/server.py +565 -0
  145. foundry_mcp/tools/unified/spec.py +1283 -0
  146. foundry_mcp/tools/unified/task.py +3846 -0
  147. foundry_mcp/tools/unified/test.py +431 -0
  148. foundry_mcp/tools/unified/verification.py +520 -0
  149. foundry_mcp-0.8.22.dist-info/METADATA +344 -0
  150. foundry_mcp-0.8.22.dist-info/RECORD +153 -0
  151. foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
  152. foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
  153. foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,898 @@
1
+ """
2
+ Concurrency utilities for foundry-mcp.
3
+
4
+ Provides concurrency limiting, cancellation handling, and request context
5
+ management for async MCP tool operations.
6
+
7
+ See docs/mcp_best_practices/15-concurrency-patterns.md for guidance.
8
+
9
+ Example:
10
+ from foundry_mcp.core.concurrency import (
11
+ ConcurrencyLimiter, with_cancellation, request_context
12
+ )
13
+
14
+ # Limit concurrent operations
15
+ limiter = ConcurrencyLimiter(max_concurrent=10)
16
+ results = await limiter.gather([fetch(url) for url in urls])
17
+
18
+ # Handle cancellation gracefully
19
+ @with_cancellation
20
+ async def long_task():
21
+ ...
22
+
23
+ # Track request context
24
+ async with request_context(request_id="abc", client_id="client1"):
25
+ await process()
26
+ """
27
+
28
+ import asyncio
29
+ import logging
30
+ import time
31
+ import uuid
32
+ from contextlib import asynccontextmanager
33
+ from contextvars import ContextVar
34
+ from dataclasses import dataclass, field
35
+ from datetime import datetime, timezone
36
+ from functools import wraps
37
+ from typing import Any, Callable, Coroutine, Dict, List, Optional, TypeVar
38
+
39
+ logger = logging.getLogger(__name__)
40
+
41
+ # Schema version for concurrency module
42
+ SCHEMA_VERSION = "1.0.0"
43
+
44
+ # Context variables for request-scoped state
45
+ request_id: ContextVar[str] = ContextVar("request_id", default="")
46
+ client_id: ContextVar[str] = ContextVar("client_id", default="anonymous")
47
+ start_time: ContextVar[float] = ContextVar("start_time", default=0.0)
48
+
49
+ # Type variable for async functions
50
+ T = TypeVar("T")
51
+
52
+
53
+ # -----------------------------------------------------------------------------
54
+ # Request Context Management
55
+ # -----------------------------------------------------------------------------
56
+
57
+
58
+ @dataclass
59
+ class RequestContext:
60
+ """Snapshot of request context for logging and tracking.
61
+
62
+ Attributes:
63
+ request_id: Unique identifier for this request
64
+ client_id: Client making the request
65
+ start_time: When the request started (monotonic time)
66
+ start_timestamp: When the request started (wall clock)
67
+ """
68
+
69
+ request_id: str
70
+ client_id: str
71
+ start_time: float
72
+ start_timestamp: datetime = field(
73
+ default_factory=lambda: datetime.now(timezone.utc)
74
+ )
75
+
76
+ @property
77
+ def elapsed_seconds(self) -> float:
78
+ """Get elapsed time since request started."""
79
+ return time.monotonic() - self.start_time
80
+
81
+ @property
82
+ def elapsed_ms(self) -> float:
83
+ """Get elapsed time in milliseconds."""
84
+ return self.elapsed_seconds * 1000
85
+
86
+ def to_dict(self) -> Dict[str, Any]:
87
+ """Convert to dictionary for logging/serialization."""
88
+ return {
89
+ "request_id": self.request_id,
90
+ "client_id": self.client_id,
91
+ "elapsed_ms": round(self.elapsed_ms, 2),
92
+ "start_timestamp": self.start_timestamp.isoformat(),
93
+ }
94
+
95
+
96
+ @asynccontextmanager
97
+ async def request_context(
98
+ req_id: Optional[str] = None,
99
+ cli_id: Optional[str] = None,
100
+ ):
101
+ """Set up request context for async operations.
102
+
103
+ Context automatically propagates through all nested async calls.
104
+
105
+ Args:
106
+ req_id: Request ID (auto-generated UUID if not provided)
107
+ cli_id: Client ID (defaults to "anonymous")
108
+
109
+ Yields:
110
+ RequestContext object with current context values
111
+
112
+ Example:
113
+ >>> async with request_context(cli_id="user123") as ctx:
114
+ ... print(f"Request {ctx.request_id} started")
115
+ ... await do_work()
116
+ ... print(f"Completed in {ctx.elapsed_ms:.2f}ms")
117
+ """
118
+ # Generate request ID if not provided
119
+ actual_req_id = req_id or str(uuid.uuid4())
120
+ actual_cli_id = cli_id or "anonymous"
121
+ actual_start = time.monotonic()
122
+
123
+ # Set context variables
124
+ token1 = request_id.set(actual_req_id)
125
+ token2 = client_id.set(actual_cli_id)
126
+ token3 = start_time.set(actual_start)
127
+
128
+ ctx = RequestContext(
129
+ request_id=actual_req_id,
130
+ client_id=actual_cli_id,
131
+ start_time=actual_start,
132
+ )
133
+
134
+ try:
135
+ logger.debug(
136
+ "Request context started",
137
+ extra={"request_id": actual_req_id, "client_id": actual_cli_id},
138
+ )
139
+ yield ctx
140
+ finally:
141
+ elapsed = ctx.elapsed_ms
142
+ logger.debug(
143
+ "Request context ended",
144
+ extra={
145
+ "request_id": actual_req_id,
146
+ "client_id": actual_cli_id,
147
+ "elapsed_ms": round(elapsed, 2),
148
+ },
149
+ )
150
+ # Reset context variables
151
+ request_id.reset(token1)
152
+ client_id.reset(token2)
153
+ start_time.reset(token3)
154
+
155
+
156
+ def get_current_context() -> RequestContext:
157
+ """Get the current request context.
158
+
159
+ Returns:
160
+ RequestContext with current context variable values
161
+
162
+ Raises:
163
+ RuntimeError: If called outside of a request context
164
+ """
165
+ req_id = request_id.get()
166
+ if not req_id:
167
+ raise RuntimeError(
168
+ "get_current_context() called outside of request_context"
169
+ )
170
+
171
+ return RequestContext(
172
+ request_id=req_id,
173
+ client_id=client_id.get(),
174
+ start_time=start_time.get(),
175
+ )
176
+
177
+
178
+ def get_current_context_or_none() -> Optional[RequestContext]:
179
+ """Get the current request context, or None if not in a context.
180
+
181
+ Returns:
182
+ RequestContext if in a request context, None otherwise
183
+ """
184
+ req_id = request_id.get()
185
+ if not req_id:
186
+ return None
187
+
188
+ return RequestContext(
189
+ request_id=req_id,
190
+ client_id=client_id.get(),
191
+ start_time=start_time.get(),
192
+ )
193
+
194
+
195
+ def get_request_id() -> str:
196
+ """Get the current request ID.
197
+
198
+ Returns:
199
+ Current request ID or empty string if not in context
200
+ """
201
+ return request_id.get()
202
+
203
+
204
+ def get_client_id() -> str:
205
+ """Get the current client ID.
206
+
207
+ Returns:
208
+ Current client ID or "anonymous" if not in context
209
+ """
210
+ return client_id.get()
211
+
212
+
213
+ def get_elapsed_time() -> float:
214
+ """Get elapsed time since request started.
215
+
216
+ Returns:
217
+ Elapsed time in seconds, or 0.0 if not in context
218
+ """
219
+ start = start_time.get()
220
+ if start == 0.0:
221
+ return 0.0
222
+ return time.monotonic() - start
223
+
224
+
225
+ def log_with_context(
226
+ level: int,
227
+ message: str,
228
+ **extra: Any,
229
+ ) -> None:
230
+ """Log a message with request context automatically included.
231
+
232
+ Args:
233
+ level: Logging level (e.g., logging.INFO)
234
+ message: Log message
235
+ **extra: Additional fields to include in log record
236
+ """
237
+ ctx = get_current_context_or_none()
238
+ if ctx:
239
+ extra.update(ctx.to_dict())
240
+ logger.log(level, message, extra=extra)
241
+
242
+
243
+ @dataclass
244
+ class ConcurrencyConfig:
245
+ """Configuration for a concurrency limiter.
246
+
247
+ Attributes:
248
+ max_concurrent: Maximum number of concurrent operations
249
+ name: Optional name for logging and identification
250
+ timeout: Optional timeout per operation in seconds
251
+ """
252
+
253
+ max_concurrent: int = 10
254
+ name: str = ""
255
+ timeout: Optional[float] = None
256
+
257
+
258
+ @dataclass
259
+ class ConcurrencyStats:
260
+ """Statistics from concurrent operation execution.
261
+
262
+ Attributes:
263
+ total: Total operations attempted
264
+ succeeded: Operations completed successfully
265
+ failed: Operations that raised exceptions
266
+ cancelled: Operations that were cancelled
267
+ timed_out: Operations that timed out
268
+ elapsed_seconds: Total execution time
269
+ """
270
+
271
+ total: int = 0
272
+ succeeded: int = 0
273
+ failed: int = 0
274
+ cancelled: int = 0
275
+ timed_out: int = 0
276
+ elapsed_seconds: float = 0.0
277
+
278
+
279
+ @dataclass
280
+ class GatherResult:
281
+ """Result of a gather operation with detailed status.
282
+
283
+ Attributes:
284
+ results: List of successful results (None for failed operations)
285
+ errors: List of errors (None for successful operations)
286
+ stats: Execution statistics
287
+ """
288
+
289
+ results: List[Any] = field(default_factory=list)
290
+ errors: List[Optional[Exception]] = field(default_factory=list)
291
+ stats: ConcurrencyStats = field(default_factory=ConcurrencyStats)
292
+
293
+ @property
294
+ def all_succeeded(self) -> bool:
295
+ """Check if all operations succeeded."""
296
+ return self.stats.failed == 0 and self.stats.cancelled == 0
297
+
298
+ def successful_results(self) -> List[Any]:
299
+ """Get only the successful results."""
300
+ return [r for r, e in zip(self.results, self.errors) if e is None]
301
+
302
+ def failed_results(self) -> List[tuple[int, Exception]]:
303
+ """Get failed results with their indices."""
304
+ return [(i, e) for i, e in enumerate(self.errors) if e is not None]
305
+
306
+
307
+ class ConcurrencyLimiter:
308
+ """Limit concurrent async operations using a semaphore.
309
+
310
+ Provides controlled concurrency for parallel operations like HTTP requests,
311
+ database queries, or file operations to prevent resource exhaustion.
312
+
313
+ Example:
314
+ >>> limiter = ConcurrencyLimiter(max_concurrent=5)
315
+ >>> results = await limiter.gather([fetch(url) for url in urls])
316
+ >>> print(f"Completed {results.stats.succeeded}/{results.stats.total}")
317
+
318
+ >>> # With timeout per operation
319
+ >>> limiter = ConcurrencyLimiter(max_concurrent=3, timeout=30.0)
320
+ >>> async with limiter.acquire():
321
+ ... await slow_operation()
322
+ """
323
+
324
+ def __init__(
325
+ self,
326
+ max_concurrent: int = 10,
327
+ *,
328
+ name: str = "",
329
+ timeout: Optional[float] = None,
330
+ ):
331
+ """Initialize concurrency limiter.
332
+
333
+ Args:
334
+ max_concurrent: Maximum number of concurrent operations (default: 10)
335
+ name: Optional name for logging
336
+ timeout: Optional timeout per operation in seconds
337
+ """
338
+ self.config = ConcurrencyConfig(
339
+ max_concurrent=max_concurrent,
340
+ name=name,
341
+ timeout=timeout,
342
+ )
343
+ self._semaphore = asyncio.Semaphore(max_concurrent)
344
+ self._active_count = 0
345
+ self._total_count = 0
346
+
347
+ @property
348
+ def max_concurrent(self) -> int:
349
+ """Get maximum concurrent operations."""
350
+ return self.config.max_concurrent
351
+
352
+ @property
353
+ def active_count(self) -> int:
354
+ """Get current number of active operations."""
355
+ return self._active_count
356
+
357
+ @asynccontextmanager
358
+ async def acquire(self):
359
+ """Acquire a slot for concurrent execution.
360
+
361
+ Use as async context manager for single operations:
362
+
363
+ async with limiter.acquire():
364
+ await do_something()
365
+
366
+ Yields:
367
+ None (the slot is held until context exit)
368
+ """
369
+ async with self._semaphore:
370
+ self._active_count += 1
371
+ self._total_count += 1
372
+ try:
373
+ yield
374
+ finally:
375
+ self._active_count -= 1
376
+
377
+ async def run(
378
+ self,
379
+ coro: Coroutine[Any, Any, T],
380
+ *,
381
+ timeout: Optional[float] = None,
382
+ ) -> T:
383
+ """Run a coroutine with concurrency limiting.
384
+
385
+ Args:
386
+ coro: The coroutine to run
387
+ timeout: Optional timeout override (uses limiter default if not provided)
388
+
389
+ Returns:
390
+ The result of the coroutine
391
+
392
+ Raises:
393
+ asyncio.TimeoutError: If operation times out
394
+ asyncio.CancelledError: If operation is cancelled
395
+ """
396
+ effective_timeout = timeout if timeout is not None else self.config.timeout
397
+
398
+ async with self.acquire():
399
+ if effective_timeout:
400
+ return await asyncio.wait_for(coro, timeout=effective_timeout)
401
+ return await coro
402
+
403
+ async def gather(
404
+ self,
405
+ coros: List[Coroutine[Any, Any, T]],
406
+ *,
407
+ return_exceptions: bool = False,
408
+ timeout: Optional[float] = None,
409
+ ) -> GatherResult:
410
+ """Run multiple coroutines with concurrency limiting.
411
+
412
+ Unlike asyncio.gather, this limits how many operations run in parallel.
413
+
414
+ Args:
415
+ coros: List of coroutines to execute
416
+ return_exceptions: If True, exceptions are captured in results;
417
+ if False, first exception stops execution
418
+ timeout: Optional timeout per operation
419
+
420
+ Returns:
421
+ GatherResult with results, errors, and statistics
422
+
423
+ Example:
424
+ >>> limiter = ConcurrencyLimiter(max_concurrent=5)
425
+ >>> result = await limiter.gather([
426
+ ... fetch(url) for url in urls
427
+ ... ])
428
+ >>> if result.all_succeeded:
429
+ ... process(result.results)
430
+ ... else:
431
+ ... handle_errors(result.failed_results())
432
+ """
433
+ start = time.monotonic()
434
+ stats = ConcurrencyStats(total=len(coros))
435
+ results: List[Any] = [None] * len(coros)
436
+ errors: List[Optional[Exception]] = [None] * len(coros)
437
+
438
+ async def run_one(index: int, coro: Coroutine[Any, Any, T]) -> None:
439
+ try:
440
+ result = await self.run(coro, timeout=timeout)
441
+ results[index] = result
442
+ stats.succeeded += 1
443
+ except asyncio.TimeoutError as e:
444
+ errors[index] = e
445
+ stats.timed_out += 1
446
+ stats.failed += 1
447
+ if not return_exceptions:
448
+ raise
449
+ except asyncio.CancelledError as e:
450
+ errors[index] = e
451
+ stats.cancelled += 1
452
+ stats.failed += 1
453
+ if not return_exceptions:
454
+ raise
455
+ except Exception as e:
456
+ errors[index] = e
457
+ stats.failed += 1
458
+ if not return_exceptions:
459
+ raise
460
+
461
+ try:
462
+ tasks = [
463
+ asyncio.create_task(run_one(i, coro))
464
+ for i, coro in enumerate(coros)
465
+ ]
466
+ await asyncio.gather(*tasks, return_exceptions=return_exceptions)
467
+ except Exception:
468
+ # Cancel remaining tasks on failure
469
+ for task in tasks:
470
+ if not task.done():
471
+ task.cancel()
472
+ raise
473
+ finally:
474
+ stats.elapsed_seconds = time.monotonic() - start
475
+
476
+ return GatherResult(results=results, errors=errors, stats=stats)
477
+
478
+ async def map(
479
+ self,
480
+ func: Callable[[T], Coroutine[Any, Any, Any]],
481
+ items: List[T],
482
+ *,
483
+ return_exceptions: bool = False,
484
+ timeout: Optional[float] = None,
485
+ ) -> GatherResult:
486
+ """Apply an async function to items with concurrency limiting.
487
+
488
+ Convenience wrapper around gather for mapping operations.
489
+
490
+ Args:
491
+ func: Async function to apply to each item
492
+ items: List of items to process
493
+ return_exceptions: If True, capture exceptions in results
494
+ timeout: Optional timeout per operation
495
+
496
+ Returns:
497
+ GatherResult with results
498
+
499
+ Example:
500
+ >>> async def fetch(url: str) -> dict:
501
+ ... async with aiohttp.get(url) as resp:
502
+ ... return await resp.json()
503
+ >>> limiter = ConcurrencyLimiter(max_concurrent=10)
504
+ >>> result = await limiter.map(fetch, urls)
505
+ """
506
+ coros = [func(item) for item in items]
507
+ return await self.gather(
508
+ coros,
509
+ return_exceptions=return_exceptions,
510
+ timeout=timeout,
511
+ )
512
+
513
+ def get_stats(self) -> Dict[str, Any]:
514
+ """Get current limiter statistics.
515
+
516
+ Returns:
517
+ Dictionary with limiter state information
518
+ """
519
+ return {
520
+ "max_concurrent": self.config.max_concurrent,
521
+ "active_count": self._active_count,
522
+ "total_processed": self._total_count,
523
+ "name": self.config.name,
524
+ "timeout": self.config.timeout,
525
+ }
526
+
527
+
528
+ # Registry of per-tool concurrency limiters
529
+ _tool_limiters: Dict[str, ConcurrencyLimiter] = {}
530
+
531
+
532
+ def get_tool_limiter(
533
+ tool_name: str,
534
+ default_limit: int = 10,
535
+ ) -> ConcurrencyLimiter:
536
+ """Get or create a concurrency limiter for a tool.
537
+
538
+ Args:
539
+ tool_name: Name of the tool
540
+ default_limit: Default max concurrent if not configured
541
+
542
+ Returns:
543
+ ConcurrencyLimiter instance for the tool
544
+ """
545
+ if tool_name not in _tool_limiters:
546
+ _tool_limiters[tool_name] = ConcurrencyLimiter(
547
+ max_concurrent=default_limit,
548
+ name=tool_name,
549
+ )
550
+ return _tool_limiters[tool_name]
551
+
552
+
553
+ def configure_tool_limiter(
554
+ tool_name: str,
555
+ max_concurrent: int,
556
+ *,
557
+ timeout: Optional[float] = None,
558
+ ) -> ConcurrencyLimiter:
559
+ """Configure a concurrency limiter for a tool.
560
+
561
+ Args:
562
+ tool_name: Name of the tool
563
+ max_concurrent: Maximum concurrent operations
564
+ timeout: Optional timeout per operation
565
+
566
+ Returns:
567
+ Configured ConcurrencyLimiter instance
568
+ """
569
+ limiter = ConcurrencyLimiter(
570
+ max_concurrent=max_concurrent,
571
+ name=tool_name,
572
+ timeout=timeout,
573
+ )
574
+ _tool_limiters[tool_name] = limiter
575
+ logger.debug(
576
+ f"Configured limiter for {tool_name}: max_concurrent={max_concurrent}"
577
+ )
578
+ return limiter
579
+
580
+
581
+ def get_all_limiter_stats() -> Dict[str, Dict[str, Any]]:
582
+ """Get statistics for all configured tool limiters.
583
+
584
+ Returns:
585
+ Dictionary mapping tool names to their limiter stats
586
+ """
587
+ return {name: limiter.get_stats() for name, limiter in _tool_limiters.items()}
588
+
589
+
590
+ # -----------------------------------------------------------------------------
591
+ # Cancellation Handling
592
+ # -----------------------------------------------------------------------------
593
+
594
+
595
+ @dataclass
596
+ class CancellationResult:
597
+ """Result of a cancellable operation.
598
+
599
+ Attributes:
600
+ completed: Whether the operation completed successfully
601
+ was_cancelled: Whether the operation was cancelled
602
+ partial_results: Any partial results available if cancelled
603
+ processed_count: Number of items processed before cancellation
604
+ total_count: Total items that were to be processed
605
+ """
606
+
607
+ completed: bool = False
608
+ was_cancelled: bool = False
609
+ partial_results: List[Any] = field(default_factory=list)
610
+ processed_count: int = 0
611
+ total_count: int = 0
612
+
613
+
614
+ @asynccontextmanager
615
+ async def cancellable_scope(
616
+ cleanup_func: Optional[Callable[[], Coroutine[Any, Any, None]]] = None,
617
+ ):
618
+ """Context manager for operations that may be cancelled.
619
+
620
+ Ensures proper cleanup when a cancellation occurs.
621
+
622
+ Args:
623
+ cleanup_func: Optional async function to call on cancellation
624
+
625
+ Example:
626
+ >>> async def cleanup():
627
+ ... await close_connections()
628
+ ...
629
+ >>> async with cancellable_scope(cleanup_func=cleanup):
630
+ ... await long_running_operation()
631
+
632
+ Yields:
633
+ None
634
+ """
635
+ try:
636
+ yield
637
+ except asyncio.CancelledError:
638
+ logger.debug("Operation cancelled, performing cleanup")
639
+ if cleanup_func:
640
+ try:
641
+ await cleanup_func()
642
+ except Exception as e:
643
+ logger.warning(f"Cleanup failed during cancellation: {e}")
644
+ raise # Always re-raise CancelledError
645
+
646
+
647
+ def with_cancellation(
648
+ cleanup_func: Optional[Callable[[], Coroutine[Any, Any, None]]] = None,
649
+ ):
650
+ """Decorator for async functions that handles cancellation gracefully.
651
+
652
+ Ensures cleanup is performed when the function is cancelled.
653
+
654
+ Args:
655
+ cleanup_func: Optional async function to call on cancellation
656
+
657
+ Example:
658
+ >>> async def close_db():
659
+ ... await db.close()
660
+ ...
661
+ >>> @with_cancellation(cleanup_func=close_db)
662
+ ... async def query_database():
663
+ ... return await db.query("SELECT * FROM users")
664
+ """
665
+
666
+ def decorator(func: Callable[..., Coroutine[Any, Any, T]]) -> Callable[..., Coroutine[Any, Any, T]]:
667
+ @wraps(func)
668
+ async def wrapper(*args: Any, **kwargs: Any) -> T:
669
+ async with cancellable_scope(cleanup_func=cleanup_func):
670
+ return await func(*args, **kwargs)
671
+
672
+ return wrapper
673
+
674
+ return decorator
675
+
676
+
677
+ async def run_with_cancellation_checkpoints(
678
+ items: List[T],
679
+ process_func: Callable[[T], Coroutine[Any, Any, Any]],
680
+ *,
681
+ checkpoint_interval: int = 10,
682
+ return_partial: bool = True,
683
+ ) -> CancellationResult:
684
+ """Process items with periodic cancellation checkpoints.
685
+
686
+ Allows long-running batch operations to be cancelled cleanly,
687
+ optionally returning partial results.
688
+
689
+ Args:
690
+ items: List of items to process
691
+ process_func: Async function to process each item
692
+ checkpoint_interval: Check for cancellation every N items
693
+ return_partial: If True, return partial results on cancellation
694
+
695
+ Returns:
696
+ CancellationResult with completion status and any partial results
697
+
698
+ Example:
699
+ >>> async def process_item(item: str) -> dict:
700
+ ... return {"item": item, "processed": True}
701
+ ...
702
+ >>> result = await run_with_cancellation_checkpoints(
703
+ ... items=["a", "b", "c"],
704
+ ... process_func=process_item,
705
+ ... checkpoint_interval=1,
706
+ ... )
707
+ >>> if result.was_cancelled:
708
+ ... print(f"Processed {result.processed_count}/{result.total_count}")
709
+ """
710
+ results: List[Any] = []
711
+ total = len(items)
712
+
713
+ for i, item in enumerate(items):
714
+ # Cancellation checkpoint
715
+ if i % checkpoint_interval == 0:
716
+ await asyncio.sleep(0) # Yield to allow cancellation
717
+
718
+ try:
719
+ result = await process_func(item)
720
+ results.append(result)
721
+ except asyncio.CancelledError:
722
+ logger.info(
723
+ f"Operation cancelled at item {i}/{total}"
724
+ )
725
+ if return_partial:
726
+ return CancellationResult(
727
+ completed=False,
728
+ was_cancelled=True,
729
+ partial_results=results,
730
+ processed_count=len(results),
731
+ total_count=total,
732
+ )
733
+ raise
734
+
735
+ return CancellationResult(
736
+ completed=True,
737
+ was_cancelled=False,
738
+ partial_results=results,
739
+ processed_count=len(results),
740
+ total_count=total,
741
+ )
742
+
743
+
744
+ async def cancel_tasks_gracefully(
745
+ tasks: List[asyncio.Task],
746
+ *,
747
+ timeout: float = 5.0,
748
+ ) -> List[Optional[Exception]]:
749
+ """Cancel multiple tasks gracefully with timeout.
750
+
751
+ Attempts to cancel all tasks and waits for them to complete,
752
+ with a timeout to prevent hanging.
753
+
754
+ Args:
755
+ tasks: List of asyncio.Task objects to cancel
756
+ timeout: Maximum time to wait for tasks to finish cancelling
757
+
758
+ Returns:
759
+ List of exceptions from cancelled tasks (None if clean cancellation)
760
+
761
+ Example:
762
+ >>> tasks = [asyncio.create_task(op()) for op in operations]
763
+ >>> # Later, need to cancel all
764
+ >>> errors = await cancel_tasks_gracefully(tasks, timeout=10.0)
765
+ """
766
+ if not tasks:
767
+ return []
768
+
769
+ # Request cancellation for all tasks
770
+ for task in tasks:
771
+ if not task.done():
772
+ task.cancel()
773
+
774
+ # Wait for all tasks to complete with timeout
775
+ errors: List[Optional[Exception]] = []
776
+ try:
777
+ done, pending = await asyncio.wait(
778
+ tasks,
779
+ timeout=timeout,
780
+ return_when=asyncio.ALL_COMPLETED,
781
+ )
782
+
783
+ # Collect results/exceptions
784
+ for task in tasks:
785
+ if task.done():
786
+ try:
787
+ task.result()
788
+ errors.append(None)
789
+ except asyncio.CancelledError:
790
+ errors.append(None) # Clean cancellation
791
+ except Exception as e:
792
+ errors.append(e)
793
+ else:
794
+ # Task didn't finish in time
795
+ errors.append(asyncio.TimeoutError("Task did not finish cancelling"))
796
+
797
+ # Force cancel any remaining
798
+ for task in pending:
799
+ task.cancel()
800
+
801
+ except Exception as e:
802
+ logger.error(f"Error during graceful cancellation: {e}")
803
+ errors = [e] * len(tasks)
804
+
805
+ return errors
806
+
807
+
808
+ class CancellationToken:
809
+ """Token for cooperative cancellation of async operations.
810
+
811
+ Allows multiple operations to check for cancellation requests
812
+ without relying solely on asyncio.CancelledError.
813
+
814
+ Example:
815
+ >>> token = CancellationToken()
816
+ >>>
817
+ >>> async def worker():
818
+ ... while not token.is_cancelled:
819
+ ... await do_work()
820
+ ... await token.check() # Raises if cancelled
821
+ ...
822
+ >>> # Later, from another task:
823
+ >>> token.cancel()
824
+ """
825
+
826
+ def __init__(self):
827
+ """Initialize cancellation token."""
828
+ self._cancelled = False
829
+ self._cancel_event = asyncio.Event()
830
+
831
+ @property
832
+ def is_cancelled(self) -> bool:
833
+ """Check if cancellation was requested."""
834
+ return self._cancelled
835
+
836
+ def cancel(self) -> None:
837
+ """Request cancellation."""
838
+ self._cancelled = True
839
+ self._cancel_event.set()
840
+
841
+ async def check(self) -> None:
842
+ """Check for cancellation and raise if requested.
843
+
844
+ Raises:
845
+ asyncio.CancelledError: If cancellation was requested
846
+ """
847
+ if self._cancelled:
848
+ raise asyncio.CancelledError("Cancellation requested via token")
849
+
850
+ async def wait_for_cancel(self, timeout: Optional[float] = None) -> bool:
851
+ """Wait for cancellation to be requested.
852
+
853
+ Args:
854
+ timeout: Maximum time to wait (None for indefinite)
855
+
856
+ Returns:
857
+ True if cancelled, False if timeout reached
858
+ """
859
+ try:
860
+ await asyncio.wait_for(self._cancel_event.wait(), timeout=timeout)
861
+ return True
862
+ except asyncio.TimeoutError:
863
+ return False
864
+
865
+
866
+ # Export all public symbols
867
+ __all__ = [
868
+ # Schema
869
+ "SCHEMA_VERSION",
870
+ # Concurrency limiting
871
+ "ConcurrencyConfig",
872
+ "ConcurrencyStats",
873
+ "GatherResult",
874
+ "ConcurrencyLimiter",
875
+ "get_tool_limiter",
876
+ "configure_tool_limiter",
877
+ "get_all_limiter_stats",
878
+ # Cancellation handling
879
+ "CancellationResult",
880
+ "CancellationToken",
881
+ "cancellable_scope",
882
+ "with_cancellation",
883
+ "run_with_cancellation_checkpoints",
884
+ "cancel_tasks_gracefully",
885
+ # Request context management
886
+ "RequestContext",
887
+ "request_context",
888
+ "get_current_context",
889
+ "get_current_context_or_none",
890
+ "get_request_id",
891
+ "get_client_id",
892
+ "get_elapsed_time",
893
+ "log_with_context",
894
+ # Context variables (raw access)
895
+ "request_id",
896
+ "client_id",
897
+ "start_time",
898
+ ]