foundry-mcp 0.7.0__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. foundry_mcp/cli/__init__.py +0 -13
  2. foundry_mcp/cli/commands/session.py +1 -8
  3. foundry_mcp/cli/context.py +39 -0
  4. foundry_mcp/config.py +381 -7
  5. foundry_mcp/core/batch_operations.py +1196 -0
  6. foundry_mcp/core/discovery.py +1 -1
  7. foundry_mcp/core/llm_config.py +8 -0
  8. foundry_mcp/core/naming.py +25 -2
  9. foundry_mcp/core/prometheus.py +0 -13
  10. foundry_mcp/core/providers/__init__.py +12 -0
  11. foundry_mcp/core/providers/base.py +39 -0
  12. foundry_mcp/core/providers/claude.py +45 -1
  13. foundry_mcp/core/providers/codex.py +64 -3
  14. foundry_mcp/core/providers/cursor_agent.py +22 -3
  15. foundry_mcp/core/providers/detectors.py +34 -7
  16. foundry_mcp/core/providers/gemini.py +63 -1
  17. foundry_mcp/core/providers/opencode.py +95 -71
  18. foundry_mcp/core/providers/package-lock.json +4 -4
  19. foundry_mcp/core/providers/package.json +1 -1
  20. foundry_mcp/core/providers/validation.py +128 -0
  21. foundry_mcp/core/research/memory.py +103 -0
  22. foundry_mcp/core/research/models.py +783 -0
  23. foundry_mcp/core/research/providers/__init__.py +40 -0
  24. foundry_mcp/core/research/providers/base.py +242 -0
  25. foundry_mcp/core/research/providers/google.py +507 -0
  26. foundry_mcp/core/research/providers/perplexity.py +442 -0
  27. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  28. foundry_mcp/core/research/providers/tavily.py +383 -0
  29. foundry_mcp/core/research/workflows/__init__.py +5 -2
  30. foundry_mcp/core/research/workflows/base.py +106 -12
  31. foundry_mcp/core/research/workflows/consensus.py +160 -17
  32. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  33. foundry_mcp/core/responses.py +240 -0
  34. foundry_mcp/core/spec.py +1 -0
  35. foundry_mcp/core/task.py +141 -12
  36. foundry_mcp/core/validation.py +6 -1
  37. foundry_mcp/server.py +0 -52
  38. foundry_mcp/tools/unified/__init__.py +37 -18
  39. foundry_mcp/tools/unified/authoring.py +0 -33
  40. foundry_mcp/tools/unified/environment.py +202 -29
  41. foundry_mcp/tools/unified/plan.py +20 -1
  42. foundry_mcp/tools/unified/provider.py +0 -40
  43. foundry_mcp/tools/unified/research.py +644 -19
  44. foundry_mcp/tools/unified/review.py +5 -2
  45. foundry_mcp/tools/unified/review_helpers.py +16 -1
  46. foundry_mcp/tools/unified/server.py +9 -24
  47. foundry_mcp/tools/unified/task.py +528 -9
  48. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +2 -1
  49. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/RECORD +52 -46
  50. foundry_mcp/cli/flags.py +0 -266
  51. foundry_mcp/core/feature_flags.py +0 -592
  52. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  53. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  54. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -57,6 +57,19 @@ from foundry_mcp.core.task import (
57
57
  update_task_metadata,
58
58
  update_task_requirements,
59
59
  )
60
+ from foundry_mcp.core.batch_operations import (
61
+ prepare_batch_context,
62
+ start_batch,
63
+ complete_batch,
64
+ reset_batch,
65
+ DEFAULT_MAX_TASKS,
66
+ DEFAULT_TOKEN_BUDGET,
67
+ STALE_TASK_THRESHOLD_HOURS,
68
+ )
69
+ from foundry_mcp.cli.context import (
70
+ AutonomousSession,
71
+ get_context_tracker,
72
+ )
60
73
  from foundry_mcp.core.validation import (
61
74
  VALID_VERIFICATION_TYPES,
62
75
  VERIFICATION_TYPE_MAPPING,
@@ -258,6 +271,303 @@ def _handle_prepare(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
258
271
  return _attach_meta(result, request_id=request_id, duration_ms=elapsed_ms)
259
272
 
260
273
 
274
+ def _handle_prepare_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
275
+ """
276
+ Handle prepare-batch action for parallel task execution.
277
+
278
+ Returns multiple independent tasks with context for parallel implementation.
279
+ """
280
+ request_id = _request_id()
281
+ action = "prepare-batch"
282
+ spec_id = payload.get("spec_id")
283
+ if not isinstance(spec_id, str) or not spec_id.strip():
284
+ return _validation_error(
285
+ field="spec_id",
286
+ action=action,
287
+ message="Provide a non-empty spec identifier",
288
+ request_id=request_id,
289
+ )
290
+
291
+ # Optional parameters with defaults
292
+ max_tasks = payload.get("max_tasks", DEFAULT_MAX_TASKS)
293
+ if not isinstance(max_tasks, int) or max_tasks < 1:
294
+ return _validation_error(
295
+ field="max_tasks",
296
+ action=action,
297
+ message="max_tasks must be a positive integer",
298
+ request_id=request_id,
299
+ code=ErrorCode.VALIDATION_ERROR,
300
+ )
301
+
302
+ token_budget = payload.get("token_budget", DEFAULT_TOKEN_BUDGET)
303
+ if not isinstance(token_budget, int) or token_budget < 1000:
304
+ return _validation_error(
305
+ field="token_budget",
306
+ action=action,
307
+ message="token_budget must be an integer >= 1000",
308
+ request_id=request_id,
309
+ code=ErrorCode.VALIDATION_ERROR,
310
+ )
311
+
312
+ workspace = payload.get("workspace")
313
+ specs_dir = _resolve_specs_dir(config, workspace)
314
+ if specs_dir is None:
315
+ return _specs_dir_missing_error(request_id)
316
+
317
+ start = time.perf_counter()
318
+ result, error = prepare_batch_context(
319
+ spec_id=spec_id.strip(),
320
+ max_tasks=max_tasks,
321
+ token_budget=token_budget,
322
+ specs_dir=specs_dir,
323
+ )
324
+ elapsed_ms = (time.perf_counter() - start) * 1000
325
+
326
+ if error:
327
+ _metrics.counter(_metric(action), labels={"status": "error"})
328
+ return asdict(
329
+ error_response(
330
+ error,
331
+ error_code=ErrorCode.OPERATION_FAILED,
332
+ error_type=ErrorType.VALIDATION,
333
+ request_id=request_id,
334
+ )
335
+ )
336
+
337
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
338
+ _metrics.counter(_metric(action), labels={"status": "success"})
339
+
340
+ # Build response with batch context
341
+ response = success_response(
342
+ spec_id=spec_id.strip(),
343
+ tasks=result.get("tasks", []),
344
+ task_count=result.get("task_count", 0),
345
+ spec_complete=result.get("spec_complete", False),
346
+ all_blocked=result.get("all_blocked", False),
347
+ stale_tasks=result.get("stale_tasks", []),
348
+ dependency_graph=result.get("dependency_graph", {}),
349
+ token_estimate=result.get("token_estimate", 0),
350
+ request_id=request_id,
351
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
352
+ )
353
+
354
+ warnings = result.get("warnings", [])
355
+ return _attach_meta(
356
+ asdict(response),
357
+ request_id=request_id,
358
+ duration_ms=elapsed_ms,
359
+ warnings=warnings if warnings else None,
360
+ )
361
+
362
+
363
+ def _handle_start_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
364
+ """
365
+ Handle start-batch action for atomically starting multiple tasks.
366
+
367
+ Validates all tasks can be started before making any changes.
368
+ """
369
+ request_id = _request_id()
370
+ action = "start-batch"
371
+ spec_id = payload.get("spec_id")
372
+ if not isinstance(spec_id, str) or not spec_id.strip():
373
+ return _validation_error(
374
+ field="spec_id",
375
+ action=action,
376
+ message="Provide a non-empty spec identifier",
377
+ request_id=request_id,
378
+ )
379
+
380
+ task_ids = payload.get("task_ids")
381
+ if not isinstance(task_ids, list) or not task_ids:
382
+ return _validation_error(
383
+ field="task_ids",
384
+ action=action,
385
+ message="Provide a non-empty list of task IDs",
386
+ request_id=request_id,
387
+ )
388
+
389
+ # Validate all task_ids are strings
390
+ for i, tid in enumerate(task_ids):
391
+ if not isinstance(tid, str) or not tid.strip():
392
+ return _validation_error(
393
+ field=f"task_ids[{i}]",
394
+ action=action,
395
+ message="Each task ID must be a non-empty string",
396
+ request_id=request_id,
397
+ code=ErrorCode.VALIDATION_ERROR,
398
+ )
399
+
400
+ workspace = payload.get("workspace")
401
+ specs_dir = _resolve_specs_dir(config, workspace)
402
+ if specs_dir is None:
403
+ return _specs_dir_missing_error(request_id)
404
+
405
+ start = time.perf_counter()
406
+ result, error = start_batch(
407
+ spec_id=spec_id.strip(),
408
+ task_ids=[tid.strip() for tid in task_ids],
409
+ specs_dir=specs_dir,
410
+ )
411
+ elapsed_ms = (time.perf_counter() - start) * 1000
412
+
413
+ if error:
414
+ _metrics.counter(_metric(action), labels={"status": "error"})
415
+ # Include partial results in error response
416
+ return asdict(
417
+ error_response(
418
+ error,
419
+ error_code=ErrorCode.OPERATION_FAILED,
420
+ error_type=ErrorType.VALIDATION,
421
+ request_id=request_id,
422
+ details=result if result else None,
423
+ )
424
+ )
425
+
426
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
427
+ _metrics.counter(_metric(action), labels={"status": "success"})
428
+
429
+ response = success_response(
430
+ spec_id=spec_id.strip(),
431
+ started=result.get("started", []),
432
+ started_count=result.get("started_count", 0),
433
+ started_at=result.get("started_at"),
434
+ request_id=request_id,
435
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
436
+ )
437
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
438
+
439
+
440
+ def _handle_complete_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
441
+ """Handle complete-batch action for completing multiple tasks with partial failure support."""
442
+ request_id = _request_id()
443
+ action = "complete-batch"
444
+ spec_id = payload.get("spec_id")
445
+ if not isinstance(spec_id, str) or not spec_id.strip():
446
+ return _validation_error(field="spec_id", action=action, message="Provide a non-empty spec identifier", request_id=request_id)
447
+
448
+ completions = payload.get("completions")
449
+ if not isinstance(completions, list) or not completions:
450
+ return _validation_error(field="completions", action=action, message="Provide a non-empty list of completions", request_id=request_id)
451
+
452
+ workspace = payload.get("workspace")
453
+ specs_dir = _resolve_specs_dir(config, workspace)
454
+ if specs_dir is None:
455
+ return _specs_dir_missing_error(request_id)
456
+
457
+ start = time.perf_counter()
458
+ result, error = complete_batch(spec_id=spec_id.strip(), completions=completions, specs_dir=specs_dir)
459
+ elapsed_ms = (time.perf_counter() - start) * 1000
460
+
461
+ if error:
462
+ _metrics.counter(_metric(action), labels={"status": "error"})
463
+ return asdict(error_response(error, error_code=ErrorCode.OPERATION_FAILED, error_type=ErrorType.VALIDATION, request_id=request_id, details=result if result else None))
464
+
465
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
466
+ _metrics.counter(_metric(action), labels={"status": "success"})
467
+
468
+ response = success_response(
469
+ spec_id=spec_id.strip(),
470
+ results=result.get("results", {}),
471
+ completed_count=result.get("completed_count", 0),
472
+ failed_count=result.get("failed_count", 0),
473
+ total_processed=result.get("total_processed", 0),
474
+ request_id=request_id,
475
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
476
+ )
477
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
478
+
479
+
480
+ def _handle_reset_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
481
+ """
482
+ Handle reset-batch action for resetting stale or specified in_progress tasks.
483
+
484
+ Resets tasks back to pending status and clears started_at timestamp.
485
+ If task_ids not provided, finds stale tasks automatically based on threshold.
486
+ """
487
+ request_id = _request_id()
488
+ action = "reset-batch"
489
+ spec_id = payload.get("spec_id")
490
+ if not isinstance(spec_id, str) or not spec_id.strip():
491
+ return _validation_error(
492
+ field="spec_id",
493
+ action=action,
494
+ message="Provide a non-empty spec identifier",
495
+ request_id=request_id,
496
+ )
497
+
498
+ # Optional: specific task IDs to reset
499
+ task_ids = payload.get("task_ids")
500
+ if task_ids is not None:
501
+ if not isinstance(task_ids, list):
502
+ return _validation_error(
503
+ field="task_ids",
504
+ action=action,
505
+ message="task_ids must be a list of strings",
506
+ request_id=request_id,
507
+ )
508
+ # Validate all task_ids are strings
509
+ for i, tid in enumerate(task_ids):
510
+ if not isinstance(tid, str) or not tid.strip():
511
+ return _validation_error(
512
+ field=f"task_ids[{i}]",
513
+ action=action,
514
+ message="Each task ID must be a non-empty string",
515
+ request_id=request_id,
516
+ code=ErrorCode.VALIDATION_ERROR,
517
+ )
518
+ task_ids = [tid.strip() for tid in task_ids]
519
+
520
+ # Optional: threshold in hours for stale detection
521
+ threshold_hours = payload.get("threshold_hours", STALE_TASK_THRESHOLD_HOURS)
522
+ if not isinstance(threshold_hours, (int, float)) or threshold_hours <= 0:
523
+ return _validation_error(
524
+ field="threshold_hours",
525
+ action=action,
526
+ message="threshold_hours must be a positive number",
527
+ request_id=request_id,
528
+ )
529
+
530
+ workspace = payload.get("workspace")
531
+ specs_dir = _resolve_specs_dir(config, workspace)
532
+ if specs_dir is None:
533
+ return _specs_dir_missing_error(request_id)
534
+
535
+ start = time.perf_counter()
536
+ result, error = reset_batch(
537
+ spec_id=spec_id.strip(),
538
+ task_ids=task_ids,
539
+ threshold_hours=float(threshold_hours),
540
+ specs_dir=specs_dir,
541
+ )
542
+ elapsed_ms = (time.perf_counter() - start) * 1000
543
+
544
+ if error:
545
+ _metrics.counter(_metric(action), labels={"status": "error"})
546
+ return asdict(
547
+ error_response(
548
+ error,
549
+ error_code=ErrorCode.OPERATION_FAILED,
550
+ error_type=ErrorType.VALIDATION,
551
+ request_id=request_id,
552
+ details=result if result else None,
553
+ )
554
+ )
555
+
556
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
557
+ _metrics.counter(_metric(action), labels={"status": "success"})
558
+
559
+ response = success_response(
560
+ spec_id=spec_id.strip(),
561
+ reset=result.get("reset", []),
562
+ reset_count=result.get("reset_count", 0),
563
+ errors=result.get("errors"),
564
+ message=result.get("message"),
565
+ request_id=request_id,
566
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
567
+ )
568
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
569
+
570
+
261
571
  def _handle_next(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
262
572
  request_id = _request_id()
263
573
  action = "next"
@@ -1084,6 +1394,35 @@ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1084
1394
  sync_computed_fields(spec_data)
1085
1395
 
1086
1396
  task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
1397
+
1398
+ # Determine if commit is suggested based on git cadence config
1399
+ suggest_commit = False
1400
+ commit_scope: Optional[str] = None
1401
+ commit_message_hint: Optional[str] = None
1402
+
1403
+ if config.git.enabled:
1404
+ cadence = config.git.commit_cadence
1405
+ hierarchy = spec_data.get("hierarchy", {})
1406
+
1407
+ if cadence == "task":
1408
+ suggest_commit = True
1409
+ commit_scope = "task"
1410
+ commit_message_hint = f"task: {task_data.get('title', task_id.strip())}"
1411
+ elif cadence == "phase":
1412
+ # Check if parent phase just completed
1413
+ parent_id = task_data.get("parent")
1414
+ if parent_id:
1415
+ parent_data = hierarchy.get(parent_id, {})
1416
+ # Only suggest commit if parent is a phase and is now completed
1417
+ if (
1418
+ parent_data.get("type") == "phase"
1419
+ and parent_data.get("status") == "completed"
1420
+ ):
1421
+ suggest_commit = True
1422
+ commit_scope = "phase"
1423
+ commit_message_hint = (
1424
+ f"phase: {parent_data.get('title', parent_id)}"
1425
+ )
1087
1426
  add_journal_entry(
1088
1427
  spec_data,
1089
1428
  title=f"Task Completed: {task_data.get('title', task_id.strip())}",
@@ -1116,6 +1455,9 @@ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1116
1455
  "total_tasks": progress.get("total_tasks", 0),
1117
1456
  "percentage": progress.get("percentage", 0),
1118
1457
  },
1458
+ suggest_commit=suggest_commit,
1459
+ commit_scope=commit_scope,
1460
+ commit_message_hint=commit_message_hint,
1119
1461
  request_id=request_id,
1120
1462
  telemetry={"duration_ms": round(elapsed_ms, 2)},
1121
1463
  )
@@ -1433,6 +1775,12 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1433
1775
  action = "add"
1434
1776
  spec_id = payload.get("spec_id")
1435
1777
  parent = payload.get("parent")
1778
+ phase_id = payload.get("phase_id") # Alias for parent
1779
+
1780
+ # Use phase_id as parent if parent not provided
1781
+ if parent is None and phase_id is not None:
1782
+ parent = phase_id
1783
+
1436
1784
  title = payload.get("title")
1437
1785
  description = payload.get("description")
1438
1786
  task_type = payload.get("task_type", "task")
@@ -1440,6 +1788,11 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1440
1788
  position = payload.get("position")
1441
1789
  file_path = payload.get("file_path")
1442
1790
 
1791
+ # Research-specific parameters
1792
+ research_type = payload.get("research_type")
1793
+ blocking_mode = payload.get("blocking_mode")
1794
+ query = payload.get("query")
1795
+
1443
1796
  if not isinstance(spec_id, str) or not spec_id.strip():
1444
1797
  return _validation_error(
1445
1798
  field="spec_id",
@@ -1502,6 +1855,49 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1502
1855
  code=ErrorCode.INVALID_FORMAT,
1503
1856
  )
1504
1857
 
1858
+ # Validate research-specific parameters when task_type is "research"
1859
+ if task_type == "research":
1860
+ from foundry_mcp.core.validation import VALID_RESEARCH_TYPES, RESEARCH_BLOCKING_MODES
1861
+
1862
+ if research_type is not None and not isinstance(research_type, str):
1863
+ return _validation_error(
1864
+ field="research_type",
1865
+ action=action,
1866
+ message="research_type must be a string",
1867
+ request_id=request_id,
1868
+ code=ErrorCode.INVALID_FORMAT,
1869
+ )
1870
+ if research_type and research_type not in VALID_RESEARCH_TYPES:
1871
+ return _validation_error(
1872
+ field="research_type",
1873
+ action=action,
1874
+ message=f"Must be one of: {', '.join(sorted(VALID_RESEARCH_TYPES))}",
1875
+ request_id=request_id,
1876
+ )
1877
+ if blocking_mode is not None and not isinstance(blocking_mode, str):
1878
+ return _validation_error(
1879
+ field="blocking_mode",
1880
+ action=action,
1881
+ message="blocking_mode must be a string",
1882
+ request_id=request_id,
1883
+ code=ErrorCode.INVALID_FORMAT,
1884
+ )
1885
+ if blocking_mode and blocking_mode not in RESEARCH_BLOCKING_MODES:
1886
+ return _validation_error(
1887
+ field="blocking_mode",
1888
+ action=action,
1889
+ message=f"Must be one of: {', '.join(sorted(RESEARCH_BLOCKING_MODES))}",
1890
+ request_id=request_id,
1891
+ )
1892
+ if query is not None and not isinstance(query, str):
1893
+ return _validation_error(
1894
+ field="query",
1895
+ action=action,
1896
+ message="query must be a string",
1897
+ request_id=request_id,
1898
+ code=ErrorCode.INVALID_FORMAT,
1899
+ )
1900
+
1505
1901
  dry_run = payload.get("dry_run", False)
1506
1902
  if dry_run is not None and not isinstance(dry_run, bool):
1507
1903
  return _validation_error(
@@ -1542,16 +1938,22 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1542
1938
  )
1543
1939
 
1544
1940
  elapsed_ms = (time.perf_counter() - start) * 1000
1941
+ dry_run_data: Dict[str, Any] = {
1942
+ "spec_id": spec_id.strip(),
1943
+ "parent": parent.strip(),
1944
+ "title": title.strip(),
1945
+ "task_type": task_type,
1946
+ "position": position,
1947
+ "file_path": file_path.strip() if file_path else None,
1948
+ "dry_run": True,
1949
+ }
1950
+ # Include research parameters in dry_run response
1951
+ if task_type == "research":
1952
+ dry_run_data["research_type"] = research_type
1953
+ dry_run_data["blocking_mode"] = blocking_mode
1954
+ dry_run_data["query"] = query
1545
1955
  response = success_response(
1546
- data={
1547
- "spec_id": spec_id.strip(),
1548
- "parent": parent.strip(),
1549
- "title": title.strip(),
1550
- "task_type": task_type,
1551
- "position": position,
1552
- "file_path": file_path.strip() if file_path else None,
1553
- "dry_run": True,
1554
- },
1956
+ data=dry_run_data,
1555
1957
  request_id=request_id,
1556
1958
  telemetry={"duration_ms": round(elapsed_ms, 2)},
1557
1959
  )
@@ -1571,6 +1973,10 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1571
1973
  position=position,
1572
1974
  file_path=file_path,
1573
1975
  specs_dir=specs_dir,
1976
+ # Research-specific parameters
1977
+ research_type=research_type,
1978
+ blocking_mode=blocking_mode,
1979
+ query=query,
1574
1980
  )
1575
1981
  elapsed_ms = (time.perf_counter() - start) * 1000
1576
1982
 
@@ -3087,12 +3493,114 @@ def _handle_fix_verification_types(
3087
3493
  return asdict(response)
3088
3494
 
3089
3495
 
3496
+ def _handle_session_config(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
3497
+ """
3498
+ Handle session-config action: get/set autonomous mode preferences.
3499
+
3500
+ This action manages the ephemeral autonomous session state, allowing
3501
+ agents to enable/disable autonomous mode and track task completion
3502
+ during autonomous execution.
3503
+
3504
+ Parameters:
3505
+ get: If true, just return current session config without changes
3506
+ auto_mode: Set autonomous mode enabled (true) or disabled (false)
3507
+
3508
+ Returns:
3509
+ Current session configuration including autonomous state
3510
+ """
3511
+ from datetime import datetime, timezone
3512
+
3513
+ request_id = _request_id()
3514
+ action = "session-config"
3515
+ start = time.perf_counter()
3516
+
3517
+ # Get parameters
3518
+ get_only = payload.get("get", False)
3519
+ auto_mode = payload.get("auto_mode")
3520
+
3521
+ # Get the context tracker and session
3522
+ tracker = get_context_tracker()
3523
+ session = tracker.get_or_create_session()
3524
+
3525
+ # Initialize autonomous if not present
3526
+ if session.autonomous is None:
3527
+ session.autonomous = AutonomousSession()
3528
+
3529
+ # If just getting, return current state
3530
+ if get_only:
3531
+ elapsed_ms = (time.perf_counter() - start) * 1000
3532
+ response = success_response(
3533
+ session_id=session.session_id,
3534
+ autonomous=session.autonomous.to_dict(),
3535
+ message="Current session configuration",
3536
+ request_id=request_id,
3537
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3538
+ )
3539
+ _metrics.counter(_metric(action), labels={"status": "success", "operation": "get"})
3540
+ return asdict(response)
3541
+
3542
+ # Handle auto_mode setting
3543
+ if auto_mode is not None:
3544
+ if not isinstance(auto_mode, bool):
3545
+ return _validation_error(
3546
+ field="auto_mode",
3547
+ action=action,
3548
+ message="auto_mode must be a boolean (true/false)",
3549
+ request_id=request_id,
3550
+ )
3551
+
3552
+ previous_enabled = session.autonomous.enabled
3553
+ session.autonomous.enabled = auto_mode
3554
+
3555
+ if auto_mode and not previous_enabled:
3556
+ # Starting autonomous mode
3557
+ session.autonomous.started_at = (
3558
+ datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
3559
+ )
3560
+ session.autonomous.tasks_completed = 0
3561
+ session.autonomous.pause_reason = None
3562
+ elif not auto_mode and previous_enabled:
3563
+ # Stopping autonomous mode
3564
+ session.autonomous.pause_reason = "user"
3565
+
3566
+ elapsed_ms = (time.perf_counter() - start) * 1000
3567
+ response = success_response(
3568
+ session_id=session.session_id,
3569
+ autonomous=session.autonomous.to_dict(),
3570
+ message="Autonomous mode enabled" if session.autonomous.enabled else "Autonomous mode disabled",
3571
+ request_id=request_id,
3572
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3573
+ )
3574
+ _metrics.counter(_metric(action), labels={"status": "success", "operation": "set"})
3575
+ return asdict(response)
3576
+
3577
+
3090
3578
  _ACTION_DEFINITIONS = [
3091
3579
  ActionDefinition(
3092
3580
  name="prepare",
3093
3581
  handler=_handle_prepare,
3094
3582
  summary="Prepare next actionable task context",
3095
3583
  ),
3584
+ ActionDefinition(
3585
+ name="prepare-batch",
3586
+ handler=_handle_prepare_batch,
3587
+ summary="Prepare multiple independent tasks for parallel execution",
3588
+ ),
3589
+ ActionDefinition(
3590
+ name="start-batch",
3591
+ handler=_handle_start_batch,
3592
+ summary="Atomically start multiple tasks as in_progress",
3593
+ ),
3594
+ ActionDefinition(
3595
+ name="complete-batch",
3596
+ handler=_handle_complete_batch,
3597
+ summary="Complete multiple tasks with partial failure support",
3598
+ ),
3599
+ ActionDefinition(
3600
+ name="reset-batch",
3601
+ handler=_handle_reset_batch,
3602
+ summary="Reset stale or specified in_progress tasks to pending",
3603
+ ),
3096
3604
  ActionDefinition(
3097
3605
  name="next", handler=_handle_next, summary="Return the next actionable task"
3098
3606
  ),
@@ -3182,6 +3690,11 @@ _ACTION_DEFINITIONS = [
3182
3690
  handler=_handle_hierarchy,
3183
3691
  summary="Return paginated hierarchy slices",
3184
3692
  ),
3693
+ ActionDefinition(
3694
+ name="session-config",
3695
+ handler=_handle_session_config,
3696
+ summary="Get/set autonomous session configuration",
3697
+ ),
3185
3698
  ]
3186
3699
 
3187
3700
  _TASK_ROUTER = ActionRouter(tool_name="task", actions=_ACTION_DEFINITIONS)
@@ -3260,6 +3773,9 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
3260
3773
  category: Optional[str] = None,
3261
3774
  parent_filter: Optional[str] = None,
3262
3775
  update_metadata: Optional[Dict[str, Any]] = None,
3776
+ # session-config specific parameters
3777
+ get: bool = False,
3778
+ auto_mode: Optional[bool] = None,
3263
3779
  ) -> dict:
3264
3780
  payload = {
3265
3781
  "spec_id": spec_id,
@@ -3306,6 +3822,9 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
3306
3822
  "category": category,
3307
3823
  "parent_filter": parent_filter,
3308
3824
  "update_metadata": update_metadata,
3825
+ # session-config specific
3826
+ "get": get,
3827
+ "auto_mode": auto_mode,
3309
3828
  }
3310
3829
  return _dispatch_task_action(action=action, payload=payload, config=config)
3311
3830
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: foundry-mcp
3
- Version: 0.7.0
3
+ Version: 0.8.10
4
4
  Summary: MCP server for SDD toolkit spec management
5
5
  Project-URL: Homepage, https://github.com/tylerburleigh/foundry-mcp
6
6
  Project-URL: Repository, https://github.com/tylerburleigh/foundry-mcp
@@ -18,6 +18,7 @@ Classifier: Programming Language :: Python :: 3.12
18
18
  Requires-Python: >=3.10
19
19
  Requires-Dist: click>=8.0.0
20
20
  Requires-Dist: fastmcp>=0.1.0
21
+ Requires-Dist: filelock>=3.20.1
21
22
  Requires-Dist: mcp>=1.0.0
22
23
  Requires-Dist: tomli>=2.0.0; python_version < '3.11'
23
24
  Provides-Extra: dashboard