foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -9,6 +9,7 @@ from __future__ import annotations
9
9
  import json
10
10
  import logging
11
11
  import time
12
+ from datetime import datetime
12
13
  from dataclasses import asdict
13
14
  from pathlib import Path
14
15
  from typing import Any, Dict, List, Optional
@@ -19,9 +20,14 @@ from foundry_mcp.config import ServerConfig
19
20
  from foundry_mcp.core.ai_consultation import (
20
21
  ConsultationOrchestrator,
21
22
  ConsultationRequest,
23
+ ConsultationResult,
22
24
  ConsultationWorkflow,
23
25
  ConsensusResult,
24
26
  )
27
+ from foundry_mcp.core.prompts.fidelity_review import (
28
+ FIDELITY_SYNTHESIZED_RESPONSE_SCHEMA,
29
+ )
30
+ from foundry_mcp.core.llm_config import get_consultation_config, load_consultation_config
25
31
  from foundry_mcp.core.naming import canonical_tool
26
32
  from foundry_mcp.core.observability import get_metrics, mcp_tool
27
33
  from foundry_mcp.core.providers import get_provider_statuses
@@ -82,7 +88,11 @@ def _parse_json_content(content: str) -> Optional[dict]:
82
88
 
83
89
  def _handle_spec_review(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
84
90
  spec_id = payload.get("spec_id")
85
- review_type = payload.get("review_type", "quick")
91
+ # Get default review_type from consultation config (used when not provided or None)
92
+ consultation_config = get_consultation_config()
93
+ workflow_config = consultation_config.get_workflow_config("plan_review")
94
+ default_review_type = workflow_config.default_review_type
95
+ review_type = payload.get("review_type") or default_review_type
86
96
 
87
97
  if not isinstance(spec_id, str) or not spec_id.strip():
88
98
  return asdict(
@@ -360,6 +370,159 @@ def _handle_parse_feedback(*, config: ServerConfig, payload: Dict[str, Any]) ->
360
370
  )
361
371
 
362
372
 
373
+ def _format_fidelity_markdown(
374
+ parsed: Dict[str, Any],
375
+ spec_id: str,
376
+ spec_title: str,
377
+ scope: str,
378
+ task_id: Optional[str] = None,
379
+ phase_id: Optional[str] = None,
380
+ provider_id: Optional[str] = None,
381
+ ) -> str:
382
+ """Format fidelity review JSON as human-readable markdown."""
383
+ # Build scope detail
384
+ scope_detail = scope
385
+ if task_id:
386
+ scope_detail += f" (task: {task_id})"
387
+ elif phase_id:
388
+ scope_detail += f" (phase: {phase_id})"
389
+
390
+ lines = [
391
+ f"# Fidelity Review: {spec_title}",
392
+ "",
393
+ f"**Spec ID:** {spec_id}",
394
+ f"**Scope:** {scope_detail}",
395
+ f"**Verdict:** {parsed.get('verdict', 'unknown')}",
396
+ f"**Date:** {datetime.now().isoformat()}",
397
+ ]
398
+ if provider_id:
399
+ lines.append(f"**Provider:** {provider_id}")
400
+ lines.append("")
401
+
402
+ # Summary section
403
+ if parsed.get("summary"):
404
+ lines.extend(["## Summary", "", parsed["summary"], ""])
405
+
406
+ # Requirement Alignment
407
+ req_align = parsed.get("requirement_alignment", {})
408
+ if req_align:
409
+ lines.extend([
410
+ "## Requirement Alignment",
411
+ f"**Status:** {req_align.get('answer', 'unknown')}",
412
+ "",
413
+ req_align.get("details", ""),
414
+ "",
415
+ ])
416
+
417
+ # Success Criteria
418
+ success = parsed.get("success_criteria", {})
419
+ if success:
420
+ lines.extend([
421
+ "## Success Criteria",
422
+ f"**Status:** {success.get('met', 'unknown')}",
423
+ "",
424
+ success.get("details", ""),
425
+ "",
426
+ ])
427
+
428
+ # Deviations
429
+ deviations = parsed.get("deviations", [])
430
+ if deviations:
431
+ lines.extend(["## Deviations", ""])
432
+ for dev in deviations:
433
+ severity = dev.get("severity", "unknown")
434
+ description = dev.get("description", "")
435
+ justification = dev.get("justification", "")
436
+ lines.append(f"- **[{severity.upper()}]** {description}")
437
+ if justification:
438
+ lines.append(f" - Justification: {justification}")
439
+ lines.append("")
440
+
441
+ # Test Coverage
442
+ test_cov = parsed.get("test_coverage", {})
443
+ if test_cov:
444
+ lines.extend([
445
+ "## Test Coverage",
446
+ f"**Status:** {test_cov.get('status', 'unknown')}",
447
+ "",
448
+ test_cov.get("details", ""),
449
+ "",
450
+ ])
451
+
452
+ # Code Quality
453
+ code_quality = parsed.get("code_quality", {})
454
+ if code_quality:
455
+ lines.extend(["## Code Quality", ""])
456
+ if code_quality.get("details"):
457
+ lines.append(code_quality["details"])
458
+ lines.append("")
459
+ for issue in code_quality.get("issues", []):
460
+ lines.append(f"- {issue}")
461
+ lines.append("")
462
+
463
+ # Documentation
464
+ doc = parsed.get("documentation", {})
465
+ if doc:
466
+ lines.extend([
467
+ "## Documentation",
468
+ f"**Status:** {doc.get('status', 'unknown')}",
469
+ "",
470
+ doc.get("details", ""),
471
+ "",
472
+ ])
473
+
474
+ # Issues
475
+ issues = parsed.get("issues", [])
476
+ if issues:
477
+ lines.extend(["## Issues", ""])
478
+ for issue in issues:
479
+ lines.append(f"- {issue}")
480
+ lines.append("")
481
+
482
+ # Recommendations
483
+ recommendations = parsed.get("recommendations", [])
484
+ if recommendations:
485
+ lines.extend(["## Recommendations", ""])
486
+ for rec in recommendations:
487
+ lines.append(f"- {rec}")
488
+ lines.append("")
489
+
490
+ # Verdict consensus (if synthesized)
491
+ verdict_consensus = parsed.get("verdict_consensus", {})
492
+ if verdict_consensus:
493
+ lines.extend(["## Verdict Consensus", ""])
494
+ votes = verdict_consensus.get("votes", {})
495
+ for verdict_type, models in votes.items():
496
+ if models:
497
+ lines.append(f"- **{verdict_type}:** {', '.join(models)}")
498
+ agreement = verdict_consensus.get("agreement_level", "")
499
+ if agreement:
500
+ lines.append(f"\n**Agreement Level:** {agreement}")
501
+ notes = verdict_consensus.get("notes", "")
502
+ if notes:
503
+ lines.extend(["", notes])
504
+ lines.append("")
505
+
506
+ # Synthesis metadata
507
+ synth_meta = parsed.get("synthesis_metadata", {})
508
+ if synth_meta:
509
+ lines.extend(["## Synthesis Metadata", ""])
510
+ if synth_meta.get("models_consulted"):
511
+ lines.append(f"- Models consulted: {', '.join(synth_meta['models_consulted'])}")
512
+ if synth_meta.get("models_succeeded"):
513
+ lines.append(f"- Models succeeded: {', '.join(synth_meta['models_succeeded'])}")
514
+ if synth_meta.get("synthesis_provider"):
515
+ lines.append(f"- Synthesis provider: {synth_meta['synthesis_provider']}")
516
+ lines.append("")
517
+
518
+ lines.extend([
519
+ "---",
520
+ "*Generated by Foundry MCP Fidelity Review*",
521
+ ])
522
+
523
+ return "\n".join(lines)
524
+
525
+
363
526
  def _handle_fidelity(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
364
527
  """Best-effort fidelity review.
365
528
 
@@ -512,9 +675,25 @@ def _handle_fidelity(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
512
675
 
513
676
  scope = "task" if task_id else ("phase" if phase_id else "spec")
514
677
 
678
+ # Setup fidelity reviews directory and file naming
679
+ fidelity_reviews_dir = Path(specs_dir) / ".fidelity-reviews"
680
+ base_name = f"{spec_id}-{scope}"
681
+ if task_id:
682
+ base_name += f"-{task_id}"
683
+ elif phase_id:
684
+ base_name += f"-{phase_id}"
685
+ provider_review_paths: List[Dict[str, Any]] = []
686
+ review_path: Optional[str] = None
687
+
515
688
  spec_requirements = _build_spec_requirements(spec_data, task_id, phase_id)
516
689
  implementation_artifacts = _build_implementation_artifacts(
517
- spec_data, task_id, phase_id, files, incremental, base_branch
690
+ spec_data,
691
+ task_id,
692
+ phase_id,
693
+ files,
694
+ incremental,
695
+ base_branch,
696
+ workspace_root=ws_path,
518
697
  )
519
698
  test_results = (
520
699
  _build_test_results(spec_data, task_id, phase_id) if include_tests else ""
@@ -524,7 +703,10 @@ def _handle_fidelity(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
524
703
  preferred_providers = ai_tools if isinstance(ai_tools, list) else []
525
704
  first_provider = preferred_providers[0] if preferred_providers else None
526
705
 
527
- orchestrator = ConsultationOrchestrator()
706
+ # Load consultation config from workspace path to get provider priority list
707
+ config_file = ws_path / "foundry-mcp.toml"
708
+ consultation_config = load_consultation_config(config_file=config_file)
709
+ orchestrator = ConsultationOrchestrator(config=consultation_config)
528
710
  if not orchestrator.is_available(provider_id=first_provider):
529
711
  return asdict(
530
712
  error_response(
@@ -555,27 +737,202 @@ def _handle_fidelity(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
555
737
 
556
738
  result = orchestrator.consult(request, use_cache=True)
557
739
  is_consensus = isinstance(result, ConsensusResult)
558
- content = result.primary_content if is_consensus else result.content
740
+ synthesis_performed = False
741
+ synthesis_error = None
742
+ successful_providers: List[str] = []
743
+ failed_providers: List[Dict[str, Any]] = []
744
+
745
+ if is_consensus:
746
+ # Extract provider details for visibility
747
+ failed_providers = [
748
+ {"provider_id": r.provider_id, "error": r.error}
749
+ for r in result.responses
750
+ if not r.success
751
+ ]
752
+ # Filter for truly successful responses (success=True AND non-empty content)
753
+ successful_responses = [
754
+ r for r in result.responses if r.success and r.content.strip()
755
+ ]
756
+ successful_providers = [r.provider_id for r in successful_responses]
757
+
758
+ if len(successful_responses) >= 2:
759
+ # Multi-model mode: run synthesis to consolidate reviews
760
+ model_reviews_json = ""
761
+ for response in successful_responses:
762
+ model_reviews_json += (
763
+ f"\n---\n## Review by {response.provider_id}\n\n"
764
+ f"```json\n{response.content}\n```\n"
765
+ )
766
+
767
+ # Write individual provider review files
768
+ try:
769
+ fidelity_reviews_dir.mkdir(parents=True, exist_ok=True)
770
+ for response in successful_responses:
771
+ provider_parsed = _parse_json_content(response.content)
772
+ provider_file = fidelity_reviews_dir / f"{base_name}-{response.provider_id}.md"
773
+ if provider_parsed:
774
+ provider_md = _format_fidelity_markdown(
775
+ provider_parsed,
776
+ spec_id,
777
+ spec_data.get("title", spec_id),
778
+ scope,
779
+ task_id=task_id,
780
+ phase_id=phase_id,
781
+ provider_id=response.provider_id,
782
+ )
783
+ provider_file.write_text(provider_md, encoding="utf-8")
784
+ provider_review_paths.append({
785
+ "provider_id": response.provider_id,
786
+ "path": str(provider_file),
787
+ })
788
+ else:
789
+ # JSON parsing failed - write raw content as fallback
790
+ logger.warning(
791
+ "Provider %s returned non-JSON content, writing raw response",
792
+ response.provider_id,
793
+ )
794
+ raw_md = (
795
+ f"# Fidelity Review (Raw): {spec_id}\n\n"
796
+ f"**Provider:** {response.provider_id}\n"
797
+ f"**Note:** Response could not be parsed as JSON\n\n"
798
+ f"## Raw Response\n\n```\n{response.content}\n```\n"
799
+ )
800
+ provider_file.write_text(raw_md, encoding="utf-8")
801
+ provider_review_paths.append({
802
+ "provider_id": response.provider_id,
803
+ "path": str(provider_file),
804
+ "parse_error": True,
805
+ })
806
+ except Exception as e:
807
+ logger.warning("Failed to write provider review files: %s", e)
808
+
809
+ logger.info(
810
+ "Running fidelity synthesis for %d provider reviews: %s",
811
+ len(successful_responses),
812
+ successful_providers,
813
+ )
814
+
815
+ synthesis_request = ConsultationRequest(
816
+ workflow=ConsultationWorkflow.FIDELITY_REVIEW,
817
+ prompt_id="FIDELITY_SYNTHESIS_PROMPT_V1",
818
+ context={
819
+ "spec_id": spec_id,
820
+ "spec_title": spec_data.get("title", spec_id),
821
+ "review_scope": scope,
822
+ "num_models": len(successful_responses),
823
+ "model_reviews": model_reviews_json,
824
+ "response_schema": FIDELITY_SYNTHESIZED_RESPONSE_SCHEMA,
825
+ },
826
+ provider_id=successful_providers[0],
827
+ model=model,
828
+ )
829
+
830
+ try:
831
+ synthesis_result = orchestrator.consult(synthesis_request, use_cache=True)
832
+ except Exception as e:
833
+ logger.error("Fidelity synthesis call crashed: %s", e, exc_info=True)
834
+ synthesis_result = None
835
+
836
+ # Handle both ConsultationResult and ConsensusResult from synthesis
837
+ synthesis_success = False
838
+ synthesis_content = None
839
+ if synthesis_result:
840
+ if isinstance(synthesis_result, ConsultationResult) and synthesis_result.success:
841
+ synthesis_content = synthesis_result.content
842
+ synthesis_success = bool(synthesis_content and synthesis_content.strip())
843
+ elif isinstance(synthesis_result, ConsensusResult) and synthesis_result.success:
844
+ synthesis_content = synthesis_result.primary_content
845
+ synthesis_success = bool(synthesis_content and synthesis_content.strip())
846
+
847
+ if synthesis_success and synthesis_content:
848
+ content = synthesis_content
849
+ synthesis_performed = True
850
+ else:
851
+ # Synthesis failed - fall back to first provider's content
852
+ error_detail = "unknown"
853
+ if synthesis_result is None:
854
+ error_detail = "synthesis crashed (see logs)"
855
+ elif isinstance(synthesis_result, ConsultationResult):
856
+ error_detail = synthesis_result.error or "empty response"
857
+ elif isinstance(synthesis_result, ConsensusResult):
858
+ error_detail = "empty synthesis content"
859
+ logger.warning(
860
+ "Fidelity synthesis call failed (%s), falling back to first provider's content",
861
+ error_detail,
862
+ )
863
+ content = result.primary_content
864
+ synthesis_error = error_detail
865
+ else:
866
+ # Single successful provider - use its content directly (no synthesis needed)
867
+ content = result.primary_content
868
+ else:
869
+ content = result.content
559
870
 
560
871
  parsed = _parse_json_content(content)
561
872
  verdict = parsed.get("verdict") if parsed else "unknown"
562
873
 
874
+ # Write main fidelity review file
875
+ if parsed:
876
+ try:
877
+ fidelity_reviews_dir.mkdir(parents=True, exist_ok=True)
878
+ main_md = _format_fidelity_markdown(
879
+ parsed,
880
+ spec_id,
881
+ spec_data.get("title", spec_id),
882
+ scope,
883
+ task_id=task_id,
884
+ phase_id=phase_id,
885
+ )
886
+ review_file = fidelity_reviews_dir / f"{base_name}.md"
887
+ review_file.write_text(main_md, encoding="utf-8")
888
+ review_path = str(review_file)
889
+ except Exception as e:
890
+ logger.warning("Failed to write main fidelity review file: %s", e)
891
+
563
892
  duration_ms = (time.perf_counter() - start_time) * 1000
564
893
 
894
+ # Build consensus info with synthesis details
895
+ consensus_info: Dict[str, Any] = {
896
+ "mode": "multi_model" if is_consensus else "single_model",
897
+ "threshold": consensus_threshold,
898
+ "provider_id": getattr(result, "provider_id", None),
899
+ "model_used": getattr(result, "model_used", None),
900
+ "synthesis_performed": synthesis_performed,
901
+ }
902
+
903
+ if is_consensus:
904
+ consensus_info["successful_providers"] = successful_providers
905
+ consensus_info["failed_providers"] = failed_providers
906
+ if synthesis_error:
907
+ consensus_info["synthesis_error"] = synthesis_error
908
+
909
+ # Include additional synthesized fields if available
910
+ response_data: Dict[str, Any] = {
911
+ "spec_id": spec_id,
912
+ "title": spec_data.get("title", spec_id),
913
+ "scope": scope,
914
+ "verdict": verdict,
915
+ "deviations": parsed.get("deviations") if parsed else [],
916
+ "recommendations": parsed.get("recommendations") if parsed else [],
917
+ "consensus": consensus_info,
918
+ }
919
+
920
+ # Add file paths if reviews were written
921
+ if review_path:
922
+ response_data["review_path"] = review_path
923
+ if provider_review_paths:
924
+ response_data["provider_reviews"] = provider_review_paths
925
+
926
+ # Add synthesis-specific fields if synthesis was performed
927
+ if synthesis_performed and parsed:
928
+ if "verdict_consensus" in parsed:
929
+ response_data["verdict_consensus"] = parsed["verdict_consensus"]
930
+ if "synthesis_metadata" in parsed:
931
+ response_data["synthesis_metadata"] = parsed["synthesis_metadata"]
932
+
565
933
  return asdict(
566
934
  success_response(
567
- spec_id=spec_id,
568
- title=spec_data.get("title", spec_id),
569
- scope=scope,
570
- verdict=verdict,
571
- deviations=(parsed.get("deviations") if parsed else []),
572
- recommendations=(parsed.get("recommendations") if parsed else []),
573
- consensus={
574
- "mode": "multi_model" if is_consensus else "single_model",
575
- "threshold": consensus_threshold,
576
- "provider_id": getattr(result, "provider_id", None),
577
- "model_used": getattr(result, "model_used", None),
578
- },
935
+ **response_data,
579
936
  telemetry={"duration_ms": round(duration_ms, 2)},
580
937
  )
581
938
  )
@@ -633,7 +990,7 @@ def register_unified_review_tool(mcp: FastMCP, config: ServerConfig) -> None:
633
990
  def review(
634
991
  action: str,
635
992
  spec_id: Optional[str] = None,
636
- review_type: str = "quick",
993
+ review_type: Optional[str] = None,
637
994
  tools: Optional[str] = None,
638
995
  model: Optional[str] = None,
639
996
  ai_provider: Optional[str] = None,
@@ -193,6 +193,7 @@ def _run_ai_review(
193
193
  ConsultationRequest,
194
194
  ConsultationWorkflow,
195
195
  )
196
+ from foundry_mcp.core.llm_config import load_consultation_config
196
197
  except ImportError:
197
198
  return asdict(
198
199
  error_response(
@@ -203,7 +204,21 @@ def _run_ai_review(
203
204
  )
204
205
  )
205
206
 
206
- orchestrator = ConsultationOrchestrator(default_timeout=ai_timeout)
207
+ # Load consultation config from workspace (fixes config discovery issue)
208
+ # Derive workspace from specs_dir - check parent directories for config
209
+ config_file = None
210
+ if specs_dir:
211
+ ws_path = specs_dir.parent if specs_dir.name == "specs" else specs_dir
212
+ for _ in range(5): # Search up to 5 levels for foundry-mcp.toml
213
+ candidate = ws_path / "foundry-mcp.toml"
214
+ if candidate.exists():
215
+ config_file = candidate
216
+ break
217
+ if ws_path.parent == ws_path: # Reached root
218
+ break
219
+ ws_path = ws_path.parent
220
+ consultation_config = load_consultation_config(config_file=config_file)
221
+ orchestrator = ConsultationOrchestrator(config=consultation_config, default_timeout=ai_timeout)
207
222
 
208
223
  if not orchestrator.is_available(provider_id=ai_provider):
209
224
  return asdict(
@@ -18,7 +18,6 @@ from mcp.server.fastmcp import FastMCP
18
18
  from foundry_mcp.config import ServerConfig
19
19
  from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
20
20
  from foundry_mcp.core.discovery import get_capabilities, get_tool_registry
21
- from foundry_mcp.core.feature_flags import get_flag_service
22
21
  from foundry_mcp.core.naming import canonical_tool
23
22
  from foundry_mcp.core.observability import (
24
23
  get_metrics,
@@ -256,26 +255,15 @@ def _handle_tools(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
256
255
 
257
256
  start_time = time.perf_counter()
258
257
 
259
- categories_list: list[str]
260
- flag_service = get_flag_service()
261
- if flag_service.is_enabled("unified_manifest"):
262
- all_tools = _build_unified_manifest_tools()
263
- if category:
264
- all_tools = [tool for tool in all_tools if tool.get("category") == category]
265
- if tag:
266
- all_tools = [tool for tool in all_tools if tag in (tool.get("tags") or [])]
267
- categories_list = sorted(
268
- {tool.get("category", "general") for tool in all_tools}
269
- )
270
- else:
271
- registry = get_tool_registry()
272
- all_tools = registry.list_tools(
273
- category=category,
274
- tag=tag,
275
- include_deprecated=include_deprecated,
276
- )
277
- categories = registry.list_categories()
278
- categories_list = [c["name"] for c in categories]
258
+ # Always use unified manifest (feature flags removed)
259
+ all_tools = _build_unified_manifest_tools()
260
+ if category:
261
+ all_tools = [tool for tool in all_tools if tool.get("category") == category]
262
+ if tag:
263
+ all_tools = [tool for tool in all_tools if tag in (tool.get("tags") or [])]
264
+ categories_list = sorted(
265
+ {tool.get("category", "general") for tool in all_tools}
266
+ )
279
267
 
280
268
  start_idx = 0
281
269
  if cursor:
@@ -360,9 +348,6 @@ def _handle_tools(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
360
348
  tokens=manifest_tokens,
361
349
  tool_count=len(all_tools),
362
350
  )
363
- exporter.record_feature_flag_state(
364
- "unified_manifest", flag_service.is_enabled("unified_manifest")
365
- )
366
351
 
367
352
  response["meta"]["request_id"] = request_id
368
353
  return response