foundry-mcp 0.8.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of foundry-mcp might be problematic. Click here for more details.

Files changed (153) hide show
  1. foundry_mcp/__init__.py +13 -0
  2. foundry_mcp/cli/__init__.py +67 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +640 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +667 -0
  15. foundry_mcp/cli/commands/session.py +472 -0
  16. foundry_mcp/cli/commands/specs.py +686 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +298 -0
  22. foundry_mcp/cli/logging.py +212 -0
  23. foundry_mcp/cli/main.py +44 -0
  24. foundry_mcp/cli/output.py +122 -0
  25. foundry_mcp/cli/registry.py +110 -0
  26. foundry_mcp/cli/resilience.py +178 -0
  27. foundry_mcp/cli/transcript.py +217 -0
  28. foundry_mcp/config.py +1454 -0
  29. foundry_mcp/core/__init__.py +144 -0
  30. foundry_mcp/core/ai_consultation.py +1773 -0
  31. foundry_mcp/core/batch_operations.py +1202 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/health.py +749 -0
  40. foundry_mcp/core/intake.py +933 -0
  41. foundry_mcp/core/journal.py +700 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1376 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +146 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +387 -0
  57. foundry_mcp/core/prometheus.py +564 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +691 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
  61. foundry_mcp/core/prompts/plan_review.py +627 -0
  62. foundry_mcp/core/providers/__init__.py +237 -0
  63. foundry_mcp/core/providers/base.py +515 -0
  64. foundry_mcp/core/providers/claude.py +472 -0
  65. foundry_mcp/core/providers/codex.py +637 -0
  66. foundry_mcp/core/providers/cursor_agent.py +630 -0
  67. foundry_mcp/core/providers/detectors.py +515 -0
  68. foundry_mcp/core/providers/gemini.py +426 -0
  69. foundry_mcp/core/providers/opencode.py +718 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +308 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +857 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/research/__init__.py +68 -0
  78. foundry_mcp/core/research/memory.py +528 -0
  79. foundry_mcp/core/research/models.py +1234 -0
  80. foundry_mcp/core/research/providers/__init__.py +40 -0
  81. foundry_mcp/core/research/providers/base.py +242 -0
  82. foundry_mcp/core/research/providers/google.py +507 -0
  83. foundry_mcp/core/research/providers/perplexity.py +442 -0
  84. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  85. foundry_mcp/core/research/providers/tavily.py +383 -0
  86. foundry_mcp/core/research/workflows/__init__.py +25 -0
  87. foundry_mcp/core/research/workflows/base.py +298 -0
  88. foundry_mcp/core/research/workflows/chat.py +271 -0
  89. foundry_mcp/core/research/workflows/consensus.py +539 -0
  90. foundry_mcp/core/research/workflows/deep_research.py +4142 -0
  91. foundry_mcp/core/research/workflows/ideate.py +682 -0
  92. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  93. foundry_mcp/core/resilience.py +600 -0
  94. foundry_mcp/core/responses.py +1624 -0
  95. foundry_mcp/core/review.py +366 -0
  96. foundry_mcp/core/security.py +438 -0
  97. foundry_mcp/core/spec.py +4119 -0
  98. foundry_mcp/core/task.py +2463 -0
  99. foundry_mcp/core/testing.py +839 -0
  100. foundry_mcp/core/validation.py +2357 -0
  101. foundry_mcp/dashboard/__init__.py +32 -0
  102. foundry_mcp/dashboard/app.py +119 -0
  103. foundry_mcp/dashboard/components/__init__.py +17 -0
  104. foundry_mcp/dashboard/components/cards.py +88 -0
  105. foundry_mcp/dashboard/components/charts.py +177 -0
  106. foundry_mcp/dashboard/components/filters.py +136 -0
  107. foundry_mcp/dashboard/components/tables.py +195 -0
  108. foundry_mcp/dashboard/data/__init__.py +11 -0
  109. foundry_mcp/dashboard/data/stores.py +433 -0
  110. foundry_mcp/dashboard/launcher.py +300 -0
  111. foundry_mcp/dashboard/views/__init__.py +12 -0
  112. foundry_mcp/dashboard/views/errors.py +217 -0
  113. foundry_mcp/dashboard/views/metrics.py +164 -0
  114. foundry_mcp/dashboard/views/overview.py +96 -0
  115. foundry_mcp/dashboard/views/providers.py +83 -0
  116. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  117. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  118. foundry_mcp/prompts/__init__.py +9 -0
  119. foundry_mcp/prompts/workflows.py +525 -0
  120. foundry_mcp/resources/__init__.py +9 -0
  121. foundry_mcp/resources/specs.py +591 -0
  122. foundry_mcp/schemas/__init__.py +38 -0
  123. foundry_mcp/schemas/intake-schema.json +89 -0
  124. foundry_mcp/schemas/sdd-spec-schema.json +414 -0
  125. foundry_mcp/server.py +150 -0
  126. foundry_mcp/tools/__init__.py +10 -0
  127. foundry_mcp/tools/unified/__init__.py +92 -0
  128. foundry_mcp/tools/unified/authoring.py +3620 -0
  129. foundry_mcp/tools/unified/context_helpers.py +98 -0
  130. foundry_mcp/tools/unified/documentation_helpers.py +268 -0
  131. foundry_mcp/tools/unified/environment.py +1341 -0
  132. foundry_mcp/tools/unified/error.py +479 -0
  133. foundry_mcp/tools/unified/health.py +225 -0
  134. foundry_mcp/tools/unified/journal.py +841 -0
  135. foundry_mcp/tools/unified/lifecycle.py +640 -0
  136. foundry_mcp/tools/unified/metrics.py +777 -0
  137. foundry_mcp/tools/unified/plan.py +876 -0
  138. foundry_mcp/tools/unified/pr.py +294 -0
  139. foundry_mcp/tools/unified/provider.py +589 -0
  140. foundry_mcp/tools/unified/research.py +1283 -0
  141. foundry_mcp/tools/unified/review.py +1042 -0
  142. foundry_mcp/tools/unified/review_helpers.py +314 -0
  143. foundry_mcp/tools/unified/router.py +102 -0
  144. foundry_mcp/tools/unified/server.py +565 -0
  145. foundry_mcp/tools/unified/spec.py +1283 -0
  146. foundry_mcp/tools/unified/task.py +3846 -0
  147. foundry_mcp/tools/unified/test.py +431 -0
  148. foundry_mcp/tools/unified/verification.py +520 -0
  149. foundry_mcp-0.8.22.dist-info/METADATA +344 -0
  150. foundry_mcp-0.8.22.dist-info/RECORD +153 -0
  151. foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
  152. foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
  153. foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1202 @@
1
+ """
2
+ Batch operations for parallel task execution in SDD workflows.
3
+
4
+ Provides functions to identify independent tasks that can run in parallel
5
+ and manage batch state transitions.
6
+
7
+ Security Note:
8
+ This module validates file paths to ensure they are within the project root.
9
+ See docs/mcp_best_practices/08-security-trust-boundaries.md for guidance.
10
+ """
11
+
12
+ import os
13
+ from pathlib import Path
14
+ from typing import Any, Dict, List, Optional, Tuple
15
+
16
+ from foundry_mcp.core.spec import load_spec, find_specs_directory
17
+ from foundry_mcp.core.task import is_unblocked
18
+
19
+ # Constants for batch operations
20
+ DEFAULT_MAX_TASKS = 3
21
+ """Default maximum number of tasks to return in a batch."""
22
+
23
+ MAX_RETRY_COUNT = 3
24
+ """Maximum retry count before excluding a task from batch selection."""
25
+
26
+
27
+ def _get_active_phases(spec_data: Dict[str, Any]) -> List[str]:
28
+ """
29
+ Get phases that are eligible for task selection.
30
+
31
+ Returns phases in priority order: in_progress first, then pending.
32
+ Phases with status 'completed' or 'blocked' are excluded.
33
+
34
+ Args:
35
+ spec_data: Loaded spec data dictionary
36
+
37
+ Returns:
38
+ List of phase IDs in priority order
39
+ """
40
+ hierarchy = spec_data.get("hierarchy", {})
41
+ spec_root = hierarchy.get("spec-root", {})
42
+ phase_order = spec_root.get("children", [])
43
+
44
+ active_phases: List[str] = []
45
+
46
+ # First pass: in_progress phases (highest priority)
47
+ for phase_id in phase_order:
48
+ phase = hierarchy.get(phase_id, {})
49
+ if phase.get("type") == "phase" and phase.get("status") == "in_progress":
50
+ active_phases.append(phase_id)
51
+
52
+ # Second pass: pending phases
53
+ for phase_id in phase_order:
54
+ phase = hierarchy.get(phase_id, {})
55
+ if phase.get("type") == "phase" and phase.get("status") == "pending":
56
+ active_phases.append(phase_id)
57
+
58
+ return active_phases
59
+
60
+
61
+ def _is_path_ancestor(parent_path: str, child_path: str) -> bool:
62
+ """
63
+ Check if parent_path is an ancestor directory of child_path.
64
+
65
+ Args:
66
+ parent_path: Potential parent directory path
67
+ child_path: Potential child path
68
+
69
+ Returns:
70
+ True if parent_path is an ancestor of child_path
71
+ """
72
+ # Normalize paths
73
+ parent_abs = os.path.abspath(parent_path)
74
+ child_abs = os.path.abspath(child_path)
75
+
76
+ # Ensure parent ends with separator for proper prefix matching
77
+ if not parent_abs.endswith(os.sep):
78
+ parent_abs += os.sep
79
+
80
+ return child_abs.startswith(parent_abs)
81
+
82
+
83
+ def _paths_conflict(path_a: Optional[str], path_b: Optional[str]) -> bool:
84
+ """
85
+ Check if two file paths conflict for parallel execution.
86
+
87
+ Conflicts occur when:
88
+ - Both paths are the same file
89
+ - One path is an ancestor directory of the other
90
+
91
+ Args:
92
+ path_a: First file path (may be None)
93
+ path_b: Second file path (may be None)
94
+
95
+ Returns:
96
+ True if paths conflict and tasks cannot run in parallel
97
+ """
98
+ if not path_a or not path_b:
99
+ # If either path is missing, handled by barrier logic elsewhere
100
+ return False
101
+
102
+ abs_a = os.path.abspath(path_a)
103
+ abs_b = os.path.abspath(path_b)
104
+
105
+ # Same file
106
+ if abs_a == abs_b:
107
+ return True
108
+
109
+ # Check ancestry in both directions
110
+ if _is_path_ancestor(abs_a, abs_b) or _is_path_ancestor(abs_b, abs_a):
111
+ return True
112
+
113
+ return False
114
+
115
+
116
+ def _is_within_project_root(file_path: str, project_root: Optional[Path] = None) -> bool:
117
+ """
118
+ Validate that a file path is within the project root.
119
+
120
+ Security measure to prevent path traversal attacks.
121
+
122
+ Args:
123
+ file_path: Path to validate
124
+ project_root: Project root directory (auto-detected if None)
125
+
126
+ Returns:
127
+ True if path is within project root
128
+ """
129
+ if project_root is None:
130
+ specs_dir = find_specs_directory()
131
+ if specs_dir:
132
+ # Project root is typically parent of specs directory
133
+ project_root = specs_dir.parent
134
+ else:
135
+ # Fall back to current working directory
136
+ project_root = Path.cwd()
137
+
138
+ abs_path = os.path.abspath(file_path)
139
+ abs_root = os.path.abspath(project_root)
140
+
141
+ # Ensure root ends with separator
142
+ if not abs_root.endswith(os.sep):
143
+ abs_root += os.sep
144
+
145
+ return abs_path.startswith(abs_root) or abs_path == abs_root.rstrip(os.sep)
146
+
147
+
148
+ def _get_task_file_path(task_data: Dict[str, Any]) -> Optional[str]:
149
+ """
150
+ Extract file_path from task metadata.
151
+
152
+ Args:
153
+ task_data: Task data dictionary
154
+
155
+ Returns:
156
+ File path string or None if not set
157
+ """
158
+ return task_data.get("metadata", {}).get("file_path")
159
+
160
+
161
+ def _get_retry_count(task_data: Dict[str, Any]) -> int:
162
+ """
163
+ Get retry count from task metadata.
164
+
165
+ Args:
166
+ task_data: Task data dictionary
167
+
168
+ Returns:
169
+ Retry count (0 if not set)
170
+ """
171
+ return task_data.get("metadata", {}).get("retry_count", 0)
172
+
173
+
174
+ def _has_direct_dependency(
175
+ hierarchy: Dict[str, Any],
176
+ task_a_id: str,
177
+ task_a_data: Dict[str, Any],
178
+ task_b_id: str,
179
+ task_b_data: Dict[str, Any],
180
+ ) -> bool:
181
+ """
182
+ Check if two tasks have a direct dependency relationship.
183
+
184
+ Args:
185
+ hierarchy: Spec hierarchy dictionary
186
+ task_a_id: First task ID
187
+ task_a_data: First task data
188
+ task_b_id: Second task ID
189
+ task_b_data: Second task data
190
+
191
+ Returns:
192
+ True if one task blocks or is blocked by the other
193
+ """
194
+ a_deps = task_a_data.get("dependencies", {})
195
+ b_deps = task_b_data.get("dependencies", {})
196
+
197
+ # Check if A blocks B or B blocks A
198
+ if task_b_id in a_deps.get("blocks", []):
199
+ return True
200
+ if task_a_id in b_deps.get("blocks", []):
201
+ return True
202
+
203
+ # Check blocked_by relationships
204
+ if task_b_id in a_deps.get("blocked_by", []):
205
+ return True
206
+ if task_a_id in b_deps.get("blocked_by", []):
207
+ return True
208
+
209
+ return False
210
+
211
+
212
+ def _is_in_active_phase(
213
+ spec_data: Dict[str, Any],
214
+ task_id: str,
215
+ task_data: Dict[str, Any],
216
+ active_phases: List[str],
217
+ ) -> bool:
218
+ """
219
+ Check if task belongs to one of the active phases.
220
+
221
+ Args:
222
+ spec_data: Loaded spec data
223
+ task_id: Task identifier
224
+ task_data: Task data dictionary
225
+ active_phases: List of active phase IDs
226
+
227
+ Returns:
228
+ True if task is in an active phase
229
+ """
230
+ hierarchy = spec_data.get("hierarchy", {})
231
+
232
+ # Walk up parent chain to find phase
233
+ current = task_data
234
+ while current:
235
+ parent_id = current.get("parent")
236
+ if not parent_id:
237
+ return False
238
+
239
+ if parent_id in active_phases:
240
+ return True
241
+
242
+ parent = hierarchy.get(parent_id)
243
+ if not parent:
244
+ return False
245
+
246
+ if parent.get("type") == "phase":
247
+ # Found phase but it's not in active list
248
+ return parent_id in active_phases
249
+
250
+ current = parent
251
+
252
+ return False
253
+
254
+
255
+ def get_independent_tasks(
256
+ spec_id: str,
257
+ max_tasks: int = DEFAULT_MAX_TASKS,
258
+ specs_dir: Optional[Path] = None,
259
+ project_root: Optional[Path] = None,
260
+ ) -> Tuple[List[Tuple[str, Dict[str, Any]]], Optional[str]]:
261
+ """
262
+ Find multiple independent tasks that can be executed in parallel.
263
+
264
+ Independent tasks have:
265
+ - No dependency relationships between them (blocks/blocked_by)
266
+ - Different file paths (or no path conflicts via ancestry)
267
+ - No tasks without file_path (those are EXCLUSIVE BARRIERS)
268
+
269
+ Tasks are filtered to:
270
+ - Status = pending (not failed, in_progress, or completed)
271
+ - Not blocked by incomplete dependencies
272
+ - Not exceeding retry threshold
273
+ - Within active phases only
274
+ - Leaf tasks preferred (no children)
275
+
276
+ Security:
277
+ - All file paths are validated to be within project root
278
+
279
+ Args:
280
+ spec_id: Specification identifier
281
+ max_tasks: Maximum number of tasks to return (default 3)
282
+ specs_dir: Optional specs directory path
283
+ project_root: Optional project root for path validation
284
+
285
+ Returns:
286
+ Tuple of:
287
+ - List of (task_id, task_data) tuples for independent tasks
288
+ - Error message string if operation failed, None on success
289
+
290
+ Note:
291
+ Independence is file-based only. Tasks may have logical coupling
292
+ that this function cannot detect. The caller should be aware of
293
+ this limitation.
294
+ """
295
+ # Load spec
296
+ spec_data = load_spec(spec_id, specs_dir)
297
+ if not spec_data:
298
+ return [], f"Spec '{spec_id}' not found"
299
+
300
+ hierarchy = spec_data.get("hierarchy", {})
301
+ if not hierarchy:
302
+ return [], f"Spec '{spec_id}' has no hierarchy"
303
+
304
+ # Get active phases
305
+ active_phases = _get_active_phases(spec_data)
306
+ if not active_phases:
307
+ # No error - spec may be complete or all phases blocked
308
+ # Caller should check spec_complete flag
309
+ return [], None
310
+
311
+ # Collect candidate tasks
312
+ candidates: List[Tuple[str, Dict[str, Any]]] = []
313
+
314
+ for task_id, task_data in hierarchy.items():
315
+ # Must be a task type (not phase, spec, etc.)
316
+ if task_data.get("type") not in ("task", "subtask", "verify"):
317
+ continue
318
+
319
+ # Must be pending status
320
+ if task_data.get("status") != "pending":
321
+ continue
322
+
323
+ # Skip if retry count exceeded
324
+ if _get_retry_count(task_data) >= MAX_RETRY_COUNT:
325
+ continue
326
+
327
+ # Must be unblocked
328
+ if not is_unblocked(spec_data, task_id, task_data):
329
+ continue
330
+
331
+ # Must be in an active phase
332
+ if not _is_in_active_phase(spec_data, task_id, task_data, active_phases):
333
+ continue
334
+
335
+ # Prefer leaf tasks (no children)
336
+ children = task_data.get("children", [])
337
+ if children:
338
+ # Skip parent tasks - their children should be selected instead
339
+ continue
340
+
341
+ # Validate file path if present
342
+ file_path = _get_task_file_path(task_data)
343
+ if file_path and not _is_within_project_root(file_path, project_root):
344
+ # Skip tasks with invalid paths (security measure)
345
+ continue
346
+
347
+ candidates.append((task_id, task_data))
348
+
349
+ if not candidates:
350
+ return [], None # No error, just no candidates
351
+
352
+ # Sort candidates by task_id for deterministic ordering
353
+ candidates.sort(key=lambda x: x[0])
354
+
355
+ # Greedy selection of independent tasks
356
+ selected: List[Tuple[str, Dict[str, Any]]] = []
357
+
358
+ for task_id, task_data in candidates:
359
+ if len(selected) >= max_tasks:
360
+ break
361
+
362
+ file_path = _get_task_file_path(task_data)
363
+
364
+ # CRITICAL: Tasks without file_path are EXCLUSIVE BARRIERS
365
+ # They cannot run in parallel with anything
366
+ if not file_path:
367
+ if not selected:
368
+ # If nothing selected yet, this barrier task can be the only one
369
+ selected.append((task_id, task_data))
370
+ # Either way, stop selecting more tasks
371
+ break
372
+
373
+ # Check independence against all already-selected tasks
374
+ is_independent = True
375
+
376
+ for sel_id, sel_data in selected:
377
+ sel_path = _get_task_file_path(sel_data)
378
+
379
+ # Check for direct dependency
380
+ if _has_direct_dependency(hierarchy, task_id, task_data, sel_id, sel_data):
381
+ is_independent = False
382
+ break
383
+
384
+ # Check for file path conflict
385
+ if _paths_conflict(file_path, sel_path):
386
+ is_independent = False
387
+ break
388
+
389
+ if is_independent:
390
+ selected.append((task_id, task_data))
391
+
392
+ return selected, None
393
+
394
+
395
+ # Token budget constants
396
+ DEFAULT_TOKEN_BUDGET = 50000
397
+ """Default token budget for batch context preparation."""
398
+
399
+ TOKEN_SAFETY_MARGIN = 0.15
400
+ """15% safety margin for token estimation."""
401
+
402
+ CHARS_PER_TOKEN = 3.0
403
+ """Conservative character-to-token ratio."""
404
+
405
+ STALE_TASK_THRESHOLD_HOURS = 1.0
406
+ """Hours before an in_progress task is considered stale."""
407
+
408
+ # Autonomous mode guardrail constants
409
+ MAX_CONSECUTIVE_ERRORS = 3
410
+ """Maximum consecutive errors before autonomous mode pauses."""
411
+
412
+ CONTEXT_LIMIT_PERCENTAGE = 85.0
413
+ """Context usage percentage that triggers autonomous mode pause."""
414
+
415
+
416
+ def _estimate_tokens(text: str) -> int:
417
+ """
418
+ Estimate token count from text using conservative heuristic.
419
+
420
+ Uses char_count / 3.0 with 15% safety margin.
421
+
422
+ Args:
423
+ text: Text to estimate tokens for
424
+
425
+ Returns:
426
+ Estimated token count
427
+ """
428
+ if not text:
429
+ return 0
430
+ base_estimate = len(text) / CHARS_PER_TOKEN
431
+ return int(base_estimate * (1 + TOKEN_SAFETY_MARGIN))
432
+
433
+
434
+ def _get_stale_in_progress_tasks(
435
+ spec_data: Dict[str, Any],
436
+ threshold_hours: float = STALE_TASK_THRESHOLD_HOURS,
437
+ ) -> List[Tuple[str, Dict[str, Any]]]:
438
+ """
439
+ Find in_progress tasks that have been stale for too long.
440
+
441
+ Args:
442
+ spec_data: Loaded spec data
443
+ threshold_hours: Hours before a task is considered stale
444
+
445
+ Returns:
446
+ List of (task_id, task_data) tuples for stale tasks
447
+ """
448
+ from datetime import datetime, timezone
449
+
450
+ hierarchy = spec_data.get("hierarchy", {})
451
+ stale_tasks: List[Tuple[str, Dict[str, Any]]] = []
452
+ now = datetime.now(timezone.utc)
453
+
454
+ for task_id, task_data in hierarchy.items():
455
+ if task_data.get("type") not in ("task", "subtask", "verify"):
456
+ continue
457
+ if task_data.get("status") != "in_progress":
458
+ continue
459
+
460
+ # Check started_at timestamp in metadata
461
+ started_at_str = task_data.get("metadata", {}).get("started_at")
462
+ if not started_at_str:
463
+ # No started_at means we can't determine staleness, assume stale
464
+ stale_tasks.append((task_id, task_data))
465
+ continue
466
+
467
+ try:
468
+ # Parse ISO format timestamp
469
+ started_at = datetime.fromisoformat(started_at_str.replace("Z", "+00:00"))
470
+ elapsed_hours = (now - started_at).total_seconds() / 3600
471
+ if elapsed_hours >= threshold_hours:
472
+ stale_tasks.append((task_id, task_data))
473
+ except (ValueError, TypeError):
474
+ # Invalid timestamp, assume stale
475
+ stale_tasks.append((task_id, task_data))
476
+
477
+ return stale_tasks
478
+
479
+
480
+ def _build_dependency_graph(
481
+ spec_data: Dict[str, Any],
482
+ task_ids: List[str],
483
+ ) -> Dict[str, Any]:
484
+ """
485
+ Build a dependency graph for the given tasks.
486
+
487
+ Includes immediate upstream dependencies for context.
488
+
489
+ Args:
490
+ spec_data: Loaded spec data
491
+ task_ids: List of task IDs to include
492
+
493
+ Returns:
494
+ Dependency graph with nodes and edges
495
+ """
496
+ hierarchy = spec_data.get("hierarchy", {})
497
+ nodes: Dict[str, Dict[str, Any]] = {}
498
+ edges: List[Dict[str, str]] = []
499
+
500
+ # Add target tasks
501
+ for task_id in task_ids:
502
+ task_data = hierarchy.get(task_id, {})
503
+ nodes[task_id] = {
504
+ "id": task_id,
505
+ "title": task_data.get("title", ""),
506
+ "status": task_data.get("status", ""),
507
+ "file_path": task_data.get("metadata", {}).get("file_path"),
508
+ "is_target": True,
509
+ }
510
+
511
+ # Add upstream dependencies
512
+ deps = task_data.get("dependencies", {})
513
+ for dep_id in deps.get("blocked_by", []):
514
+ dep_data = hierarchy.get(dep_id, {})
515
+ if dep_id not in nodes:
516
+ nodes[dep_id] = {
517
+ "id": dep_id,
518
+ "title": dep_data.get("title", ""),
519
+ "status": dep_data.get("status", ""),
520
+ "file_path": dep_data.get("metadata", {}).get("file_path"),
521
+ "is_target": False,
522
+ }
523
+ edges.append({"from": dep_id, "to": task_id, "type": "blocks"})
524
+
525
+ return {
526
+ "nodes": list(nodes.values()),
527
+ "edges": edges,
528
+ }
529
+
530
+
531
+ def _check_all_blocked(spec_data: Dict[str, Any]) -> bool:
532
+ """
533
+ Check if all remaining tasks are blocked.
534
+
535
+ Args:
536
+ spec_data: Loaded spec data
537
+
538
+ Returns:
539
+ True if all pending tasks are blocked
540
+ """
541
+ from foundry_mcp.core.task import is_unblocked
542
+
543
+ hierarchy = spec_data.get("hierarchy", {})
544
+
545
+ for task_id, task_data in hierarchy.items():
546
+ if task_data.get("type") not in ("task", "subtask", "verify"):
547
+ continue
548
+ if task_data.get("status") != "pending":
549
+ continue
550
+ # If any task is unblocked, not all are blocked
551
+ if is_unblocked(spec_data, task_id, task_data):
552
+ return False
553
+
554
+ return True
555
+
556
+
557
+ def _check_autonomous_limits(
558
+ autonomous_session: Optional[Any] = None,
559
+ session_stats: Optional[Any] = None,
560
+ session_limits: Optional[Any] = None,
561
+ spec_data: Optional[Dict[str, Any]] = None,
562
+ max_errors: int = MAX_CONSECUTIVE_ERRORS,
563
+ context_limit_pct: float = CONTEXT_LIMIT_PERCENTAGE,
564
+ ) -> Optional[str]:
565
+ """
566
+ Check if autonomous mode should pause due to resource limits.
567
+
568
+ This helper monitors context usage, error rates, and blocking states
569
+ to determine if autonomous execution should pause for user review.
570
+
571
+ Args:
572
+ autonomous_session: AutonomousSession instance (from cli.context)
573
+ session_stats: SessionStats instance with error/consultation counts
574
+ session_limits: SessionLimits instance with max thresholds
575
+ spec_data: Loaded spec data for checking blocked tasks
576
+ max_errors: Maximum consecutive errors before pause (default 3)
577
+ context_limit_pct: Context usage % that triggers pause (default 85.0)
578
+
579
+ Returns:
580
+ pause_reason string if limits hit, None if OK to continue:
581
+ - "error": Too many consecutive errors
582
+ - "context": Context/token budget nearing limit
583
+ - "blocked": All remaining tasks are blocked
584
+ - "limit": Session consultation/token limit reached
585
+ - None: OK to continue autonomous execution
586
+
587
+ Note:
588
+ Updates autonomous_session.pause_reason in-place when limits are hit.
589
+ The caller should check the return value and act accordingly.
590
+ """
591
+ # Early return if no autonomous session
592
+ if autonomous_session is None:
593
+ return None
594
+
595
+ # Check if already paused
596
+ if autonomous_session.pause_reason is not None:
597
+ return autonomous_session.pause_reason
598
+
599
+ # Check if autonomous mode is not enabled
600
+ if not autonomous_session.enabled:
601
+ return None
602
+
603
+ pause_reason: Optional[str] = None
604
+
605
+ # 1. Check error rate (consecutive errors)
606
+ if session_stats is not None:
607
+ errors_encountered = getattr(session_stats, "errors_encountered", 0)
608
+ if errors_encountered >= max_errors:
609
+ pause_reason = "error"
610
+
611
+ # 2. Check context/token usage
612
+ if pause_reason is None and session_stats is not None and session_limits is not None:
613
+ max_tokens = getattr(session_limits, "max_context_tokens", 0)
614
+ used_tokens = getattr(session_stats, "estimated_tokens_used", 0)
615
+
616
+ if max_tokens > 0:
617
+ usage_pct = (used_tokens / max_tokens) * 100
618
+ if usage_pct >= context_limit_pct:
619
+ pause_reason = "context"
620
+
621
+ # Also check consultation limit
622
+ max_consultations = getattr(session_limits, "max_consultations", 0)
623
+ consultation_count = getattr(session_stats, "consultation_count", 0)
624
+
625
+ if max_consultations > 0 and consultation_count >= max_consultations:
626
+ pause_reason = "limit"
627
+
628
+ # 3. Check if all remaining tasks are blocked
629
+ if pause_reason is None and spec_data is not None:
630
+ if _check_all_blocked(spec_data):
631
+ pause_reason = "blocked"
632
+
633
+ # Update pause_reason on the session if limits hit
634
+ if pause_reason is not None:
635
+ autonomous_session.pause_reason = pause_reason
636
+
637
+ return pause_reason
638
+
639
+
640
+ def prepare_batch_context(
641
+ spec_id: str,
642
+ max_tasks: int = DEFAULT_MAX_TASKS,
643
+ token_budget: int = DEFAULT_TOKEN_BUDGET,
644
+ specs_dir: Optional[Path] = None,
645
+ project_root: Optional[Path] = None,
646
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
647
+ """
648
+ Prepare context for batch parallel task execution.
649
+
650
+ Finds independent tasks and prepares minimal context for each,
651
+ staying within token budget.
652
+
653
+ Args:
654
+ spec_id: Specification identifier
655
+ max_tasks: Maximum tasks to include
656
+ token_budget: Maximum tokens for combined context
657
+ specs_dir: Optional specs directory path
658
+ project_root: Optional project root for path validation
659
+
660
+ Returns:
661
+ Tuple of:
662
+ - Batch context dict with tasks, warnings, and metadata
663
+ - Error message string if operation failed, None on success
664
+ """
665
+ from foundry_mcp.core.task import (
666
+ check_dependencies,
667
+ get_parent_context,
668
+ get_phase_context,
669
+ )
670
+
671
+ # Get independent tasks
672
+ tasks, error = get_independent_tasks(
673
+ spec_id=spec_id,
674
+ max_tasks=max_tasks,
675
+ specs_dir=specs_dir,
676
+ project_root=project_root,
677
+ )
678
+
679
+ if error:
680
+ return {}, error
681
+
682
+ # Load spec for additional context
683
+ spec_data = load_spec(spec_id, specs_dir)
684
+ if not spec_data:
685
+ return {}, f"Spec '{spec_id}' not found"
686
+
687
+ # Check for spec completion
688
+ hierarchy = spec_data.get("hierarchy", {})
689
+ all_tasks = [
690
+ node for node in hierarchy.values()
691
+ if node.get("type") in ("task", "subtask", "verify")
692
+ ]
693
+ completed_count = sum(1 for t in all_tasks if t.get("status") == "completed")
694
+ pending_count = sum(1 for t in all_tasks if t.get("status") == "pending")
695
+ spec_complete = pending_count == 0 and completed_count > 0
696
+
697
+ # Check if all remaining are blocked
698
+ all_blocked = _check_all_blocked(spec_data) if not spec_complete else False
699
+
700
+ if not tasks:
701
+ return {
702
+ "tasks": [],
703
+ "task_count": 0,
704
+ "spec_complete": spec_complete,
705
+ "all_blocked": all_blocked,
706
+ "warnings": [],
707
+ "stale_tasks": [],
708
+ "dependency_graph": {"nodes": [], "edges": []},
709
+ }, None
710
+
711
+ # Prepare context for each task with token budgeting
712
+ task_contexts: List[Dict[str, Any]] = []
713
+ used_tokens = 0
714
+ seen_files: set = set() # For deduplication
715
+ warnings: List[str] = []
716
+
717
+ task_ids = [t[0] for t in tasks]
718
+
719
+ for task_id, task_data in tasks:
720
+ task_context: Dict[str, Any] = {
721
+ "task_id": task_id,
722
+ "title": task_data.get("title", ""),
723
+ "type": task_data.get("type", "task"),
724
+ "status": task_data.get("status", "pending"),
725
+ "metadata": task_data.get("metadata", {}),
726
+ }
727
+
728
+ # Add dependencies
729
+ deps = check_dependencies(spec_data, task_id)
730
+ task_context["dependencies"] = deps
731
+
732
+ # Add phase context (shared across batch, deduplicated)
733
+ phase_context = get_phase_context(spec_data, task_id)
734
+ if phase_context:
735
+ phase_id = phase_context.get("id")
736
+ if phase_id not in seen_files:
737
+ task_context["phase"] = phase_context
738
+ seen_files.add(phase_id)
739
+
740
+ # Add parent context
741
+ parent_context = get_parent_context(spec_data, task_id)
742
+ if parent_context:
743
+ task_context["parent"] = {
744
+ "id": parent_context.get("id"),
745
+ "title": parent_context.get("title"),
746
+ "position_label": parent_context.get("position_label"),
747
+ }
748
+
749
+ # Estimate tokens for this task context
750
+ import json
751
+ context_json = json.dumps(task_context)
752
+ context_tokens = _estimate_tokens(context_json)
753
+
754
+ if used_tokens + context_tokens > token_budget:
755
+ warnings.append(
756
+ f"Token budget exceeded at task {len(task_contexts) + 1}. "
757
+ f"Returning {len(task_contexts)} tasks."
758
+ )
759
+ break
760
+
761
+ used_tokens += context_tokens
762
+ task_contexts.append(task_context)
763
+
764
+ # Check for stale in_progress tasks
765
+ stale_tasks = _get_stale_in_progress_tasks(spec_data)
766
+ stale_info = [
767
+ {"task_id": t[0], "title": t[1].get("title", "")}
768
+ for t in stale_tasks
769
+ ]
770
+ if stale_info:
771
+ warnings.append(
772
+ f"Found {len(stale_info)} stale in_progress task(s) (>1hr). "
773
+ "Consider resetting them."
774
+ )
775
+
776
+ # Build dependency graph
777
+ dep_graph = _build_dependency_graph(spec_data, task_ids[:len(task_contexts)])
778
+
779
+ # Add logical coupling warning
780
+ warnings.append(
781
+ "Note: Tasks are file-independent but may have logical coupling "
782
+ "that cannot be detected automatically."
783
+ )
784
+
785
+ return {
786
+ "tasks": task_contexts,
787
+ "task_count": len(task_contexts),
788
+ "spec_complete": spec_complete,
789
+ "all_blocked": all_blocked,
790
+ "warnings": warnings,
791
+ "stale_tasks": stale_info,
792
+ "dependency_graph": dep_graph,
793
+ "token_estimate": used_tokens,
794
+ }, None
795
+
796
+
797
+ def start_batch(
798
+ spec_id: str,
799
+ task_ids: List[str],
800
+ specs_dir: Optional[Path] = None,
801
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
802
+ """
803
+ Atomically start multiple tasks as in_progress.
804
+
805
+ Validates all tasks can be started before making any changes.
806
+ Uses atomic file write to prevent corruption on crash.
807
+
808
+ Args:
809
+ spec_id: Specification identifier
810
+ task_ids: List of task IDs to start
811
+ specs_dir: Optional specs directory path
812
+
813
+ Returns:
814
+ Tuple of:
815
+ - Result dict with started task IDs and any warnings
816
+ - Error message string if operation failed, None on success
817
+ """
818
+ from datetime import datetime, timezone
819
+ from foundry_mcp.core.spec import load_spec, save_spec
820
+ from foundry_mcp.core.task import is_unblocked
821
+
822
+ if not task_ids:
823
+ return {}, "No task IDs provided"
824
+
825
+ # Load spec
826
+ spec_data = load_spec(spec_id, specs_dir)
827
+ if not spec_data:
828
+ return {}, f"Spec '{spec_id}' not found"
829
+
830
+ hierarchy = spec_data.get("hierarchy", {})
831
+
832
+ # Phase 1: Validate ALL tasks can be started (no changes yet)
833
+ validation_errors: List[str] = []
834
+ tasks_to_start: List[Tuple[str, Dict[str, Any]]] = []
835
+
836
+ for task_id in task_ids:
837
+ task_data = hierarchy.get(task_id)
838
+ if not task_data:
839
+ validation_errors.append(f"Task '{task_id}' not found")
840
+ continue
841
+
842
+ task_type = task_data.get("type")
843
+ if task_type not in ("task", "subtask", "verify"):
844
+ validation_errors.append(f"'{task_id}' is not a task type (is {task_type})")
845
+ continue
846
+
847
+ status = task_data.get("status")
848
+ if status == "in_progress":
849
+ validation_errors.append(f"Task '{task_id}' is already in_progress")
850
+ continue
851
+ if status == "completed":
852
+ validation_errors.append(f"Task '{task_id}' is already completed")
853
+ continue
854
+ if status == "blocked":
855
+ validation_errors.append(f"Task '{task_id}' is blocked")
856
+ continue
857
+
858
+ # Check if unblocked
859
+ if not is_unblocked(spec_data, task_id, task_data):
860
+ validation_errors.append(f"Task '{task_id}' has unresolved dependencies")
861
+ continue
862
+
863
+ tasks_to_start.append((task_id, task_data))
864
+
865
+ # Re-validate independence between selected tasks
866
+ for i, (task_id_a, task_data_a) in enumerate(tasks_to_start):
867
+ for task_id_b, task_data_b in tasks_to_start[i + 1:]:
868
+ # Check for direct dependency
869
+ if _has_direct_dependency(hierarchy, task_id_a, task_data_a, task_id_b, task_data_b):
870
+ validation_errors.append(
871
+ f"Tasks '{task_id_a}' and '{task_id_b}' have dependencies between them"
872
+ )
873
+ continue
874
+
875
+ # Check for file path conflict
876
+ path_a = _get_task_file_path(task_data_a)
877
+ path_b = _get_task_file_path(task_data_b)
878
+ if _paths_conflict(path_a, path_b):
879
+ validation_errors.append(
880
+ f"Tasks '{task_id_a}' and '{task_id_b}' target conflicting paths"
881
+ )
882
+
883
+ # Fail if any validation errors (all-or-nothing)
884
+ if validation_errors:
885
+ return {
886
+ "started": [],
887
+ "errors": validation_errors,
888
+ }, f"Validation failed: {len(validation_errors)} error(s)"
889
+
890
+ # Phase 2: Apply changes atomically
891
+ now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
892
+ started_ids: List[str] = []
893
+
894
+ for task_id, _ in tasks_to_start:
895
+ task = hierarchy[task_id]
896
+ task["status"] = "in_progress"
897
+ # Track when task was started for stale detection
898
+ metadata = task.get("metadata")
899
+ if metadata is None:
900
+ metadata = {}
901
+ task["metadata"] = metadata
902
+ metadata["started_at"] = now
903
+ started_ids.append(task_id)
904
+
905
+ # Save atomically (save_spec uses temp file + rename)
906
+ if not save_spec(spec_id, spec_data, specs_dir):
907
+ return {}, "Failed to save spec file atomically"
908
+
909
+ return {
910
+ "started": started_ids,
911
+ "started_count": len(started_ids),
912
+ "started_at": now,
913
+ }, None
914
+
915
+
916
+ def complete_batch(
917
+ spec_id: str,
918
+ completions: List[Dict[str, Any]],
919
+ specs_dir: Optional[Path] = None,
920
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
921
+ """
922
+ Complete multiple tasks with individual completion notes.
923
+
924
+ Handles partial success where some tasks complete and others fail.
925
+ Failed tasks get 'failed' status with retry_count incremented.
926
+
927
+ Args:
928
+ spec_id: Specification identifier
929
+ completions: List of completion dicts with:
930
+ - task_id: Task ID to complete
931
+ - completion_note: Note describing what was accomplished
932
+ - success: True for completed, False for failed
933
+ specs_dir: Optional specs directory path
934
+
935
+ Returns:
936
+ Tuple of:
937
+ - Result dict with per-task results and summary
938
+ - Error message string if entire operation failed, None on success
939
+ """
940
+ from datetime import datetime, timezone
941
+ from foundry_mcp.core.spec import load_spec, save_spec
942
+ from foundry_mcp.core.journal import add_journal_entry
943
+ from foundry_mcp.core.progress import sync_computed_fields, update_parent_status
944
+
945
+ if not completions:
946
+ return {}, "No completions provided"
947
+
948
+ # Validate completions structure
949
+ for i, completion in enumerate(completions):
950
+ if not isinstance(completion, dict):
951
+ return {}, f"Completion {i} must be a dict"
952
+ if "task_id" not in completion:
953
+ return {}, f"Completion {i} missing required 'task_id'"
954
+ if "success" not in completion:
955
+ return {}, f"Completion {i} missing required 'success' flag"
956
+
957
+ # Load spec
958
+ spec_data = load_spec(spec_id, specs_dir)
959
+ if not spec_data:
960
+ return {}, f"Spec '{spec_id}' not found"
961
+
962
+ hierarchy = spec_data.get("hierarchy", {})
963
+ now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
964
+
965
+ # Process each completion
966
+ results: Dict[str, Dict[str, Any]] = {}
967
+ completed_count = 0
968
+ failed_count = 0
969
+
970
+ for completion in completions:
971
+ task_id = completion["task_id"]
972
+ success = completion.get("success", True)
973
+ completion_note = completion.get("completion_note", "")
974
+
975
+ task_data = hierarchy.get(task_id)
976
+ if not task_data:
977
+ results[task_id] = {
978
+ "status": "error",
979
+ "error": f"Task '{task_id}' not found",
980
+ }
981
+ failed_count += 1
982
+ continue
983
+
984
+ task_type = task_data.get("type")
985
+ if task_type not in ("task", "subtask", "verify"):
986
+ results[task_id] = {
987
+ "status": "error",
988
+ "error": f"'{task_id}' is not a task type (is {task_type})",
989
+ }
990
+ failed_count += 1
991
+ continue
992
+
993
+ current_status = task_data.get("status")
994
+ if current_status == "completed":
995
+ results[task_id] = {
996
+ "status": "skipped",
997
+ "error": "Task already completed",
998
+ }
999
+ continue
1000
+
1001
+ if success:
1002
+ # Mark as completed
1003
+ task_data["status"] = "completed"
1004
+ metadata = task_data.get("metadata")
1005
+ if metadata is None:
1006
+ metadata = {}
1007
+ task_data["metadata"] = metadata
1008
+ metadata["completed_at"] = now
1009
+
1010
+ # Auto-calculate actual_hours if started_at exists and not manually set
1011
+ if "started_at" in metadata and "actual_hours" not in metadata:
1012
+ started_at = datetime.fromisoformat(metadata["started_at"].replace("Z", "+00:00"))
1013
+ completed_at = datetime.fromisoformat(now.replace("Z", "+00:00"))
1014
+ metadata["actual_hours"] = round((completed_at - started_at).total_seconds() / 3600, 2)
1015
+
1016
+ # Add journal entry for completion
1017
+ add_journal_entry(
1018
+ spec_data=spec_data,
1019
+ title=f"Completed: {task_data.get('title', task_id)}",
1020
+ content=completion_note or "Task completed",
1021
+ entry_type="status_change",
1022
+ task_id=task_id,
1023
+ )
1024
+
1025
+ # Update parent status
1026
+ update_parent_status(spec_data, task_id)
1027
+
1028
+ results[task_id] = {
1029
+ "status": "completed",
1030
+ "completed_at": now,
1031
+ }
1032
+ completed_count += 1
1033
+ else:
1034
+ # Mark as failed and increment retry count
1035
+ task_data["status"] = "failed"
1036
+ metadata = task_data.get("metadata")
1037
+ if metadata is None:
1038
+ metadata = {}
1039
+ task_data["metadata"] = metadata
1040
+
1041
+ retry_count = metadata.get("retry_count", 0)
1042
+ metadata["retry_count"] = retry_count + 1
1043
+ metadata["failed_at"] = now
1044
+ metadata["failure_reason"] = completion_note or "Task failed"
1045
+
1046
+ # Add journal entry for failure
1047
+ add_journal_entry(
1048
+ spec_data=spec_data,
1049
+ title=f"Failed: {task_data.get('title', task_id)}",
1050
+ content=completion_note or "Task failed",
1051
+ entry_type="blocker",
1052
+ task_id=task_id,
1053
+ metadata={"retry_count": metadata["retry_count"]},
1054
+ )
1055
+
1056
+ results[task_id] = {
1057
+ "status": "failed",
1058
+ "retry_count": metadata["retry_count"],
1059
+ "failed_at": now,
1060
+ }
1061
+ failed_count += 1
1062
+
1063
+ # Recalculate progress
1064
+ sync_computed_fields(spec_data)
1065
+
1066
+ # Save atomically
1067
+ if not save_spec(spec_id, spec_data, specs_dir):
1068
+ return {}, "Failed to save spec file atomically"
1069
+
1070
+ return {
1071
+ "results": results,
1072
+ "completed_count": completed_count,
1073
+ "failed_count": failed_count,
1074
+ "total_processed": len(completions),
1075
+ }, None
1076
+
1077
+
1078
+ def reset_batch(
1079
+ spec_id: str,
1080
+ task_ids: Optional[List[str]] = None,
1081
+ threshold_hours: float = STALE_TASK_THRESHOLD_HOURS,
1082
+ specs_dir: Optional[Path] = None,
1083
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
1084
+ """
1085
+ Reset stale or specified in_progress tasks back to pending.
1086
+
1087
+ If task_ids is provided, resets those specific tasks.
1088
+ If task_ids is not provided, finds and resets stale in_progress tasks
1089
+ that exceed the threshold_hours.
1090
+
1091
+ Args:
1092
+ spec_id: Specification identifier
1093
+ task_ids: Optional list of specific task IDs to reset
1094
+ threshold_hours: Hours before a task is considered stale (default 1.0)
1095
+ specs_dir: Optional specs directory path
1096
+
1097
+ Returns:
1098
+ Tuple of:
1099
+ - Result dict with reset task IDs and count
1100
+ - Error message string if operation failed, None on success
1101
+ """
1102
+ from foundry_mcp.core.spec import load_spec, save_spec
1103
+
1104
+ # Load spec
1105
+ spec_data = load_spec(spec_id, specs_dir)
1106
+ if not spec_data:
1107
+ return {}, f"Spec '{spec_id}' not found"
1108
+
1109
+ hierarchy = spec_data.get("hierarchy", {})
1110
+
1111
+ # Determine which tasks to reset
1112
+ if task_ids:
1113
+ # Reset specific tasks
1114
+ tasks_to_reset: List[Tuple[str, Dict[str, Any]]] = []
1115
+ validation_errors: List[str] = []
1116
+
1117
+ for task_id in task_ids:
1118
+ task_data = hierarchy.get(task_id)
1119
+ if not task_data:
1120
+ validation_errors.append(f"Task '{task_id}' not found")
1121
+ continue
1122
+
1123
+ task_type = task_data.get("type")
1124
+ if task_type not in ("task", "subtask", "verify"):
1125
+ validation_errors.append(f"'{task_id}' is not a task type (is {task_type})")
1126
+ continue
1127
+
1128
+ status = task_data.get("status")
1129
+ if status != "in_progress":
1130
+ validation_errors.append(
1131
+ f"Task '{task_id}' is not in_progress (status: {status})"
1132
+ )
1133
+ continue
1134
+
1135
+ tasks_to_reset.append((task_id, task_data))
1136
+
1137
+ if validation_errors and not tasks_to_reset:
1138
+ return {
1139
+ "reset": [],
1140
+ "errors": validation_errors,
1141
+ }, f"No valid tasks to reset: {len(validation_errors)} error(s)"
1142
+ else:
1143
+ # Find stale tasks automatically
1144
+ tasks_to_reset = _get_stale_in_progress_tasks(spec_data, threshold_hours)
1145
+ validation_errors = []
1146
+
1147
+ if not tasks_to_reset:
1148
+ return {
1149
+ "reset": [],
1150
+ "reset_count": 0,
1151
+ "message": "No stale in_progress tasks found",
1152
+ }, None
1153
+
1154
+ # Reset each task
1155
+ reset_ids: List[str] = []
1156
+
1157
+ for task_id, task_data in tasks_to_reset:
1158
+ task_data["status"] = "pending"
1159
+ metadata = task_data.get("metadata")
1160
+ if metadata:
1161
+ # Clear started_at timestamp
1162
+ metadata.pop("started_at", None)
1163
+ reset_ids.append(task_id)
1164
+
1165
+ # Save atomically
1166
+ if not save_spec(spec_id, spec_data, specs_dir):
1167
+ return {}, "Failed to save spec file atomically"
1168
+
1169
+ result: Dict[str, Any] = {
1170
+ "reset": reset_ids,
1171
+ "reset_count": len(reset_ids),
1172
+ }
1173
+
1174
+ if validation_errors:
1175
+ result["errors"] = validation_errors
1176
+
1177
+ return result, None
1178
+
1179
+
1180
+ __all__ = [
1181
+ "get_independent_tasks",
1182
+ "prepare_batch_context",
1183
+ "start_batch",
1184
+ "complete_batch",
1185
+ "reset_batch",
1186
+ "DEFAULT_MAX_TASKS",
1187
+ "MAX_RETRY_COUNT",
1188
+ "DEFAULT_TOKEN_BUDGET",
1189
+ "STALE_TASK_THRESHOLD_HOURS",
1190
+ # Autonomous mode guardrails
1191
+ "MAX_CONSECUTIVE_ERRORS",
1192
+ "CONTEXT_LIMIT_PERCENTAGE",
1193
+ "_check_autonomous_limits",
1194
+ # Private helpers exposed for testing
1195
+ "_get_active_phases",
1196
+ "_paths_conflict",
1197
+ "_is_within_project_root",
1198
+ "_has_direct_dependency",
1199
+ "_estimate_tokens",
1200
+ "_get_stale_in_progress_tasks",
1201
+ "_check_all_blocked",
1202
+ ]