foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/commands/plan.py +10 -3
  3. foundry_mcp/cli/commands/review.py +19 -4
  4. foundry_mcp/cli/commands/specs.py +38 -208
  5. foundry_mcp/cli/output.py +3 -3
  6. foundry_mcp/config.py +235 -5
  7. foundry_mcp/core/ai_consultation.py +146 -9
  8. foundry_mcp/core/discovery.py +6 -6
  9. foundry_mcp/core/error_store.py +2 -2
  10. foundry_mcp/core/intake.py +933 -0
  11. foundry_mcp/core/llm_config.py +20 -2
  12. foundry_mcp/core/metrics_store.py +2 -2
  13. foundry_mcp/core/progress.py +70 -0
  14. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  15. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  16. foundry_mcp/core/prompts/plan_review.py +5 -1
  17. foundry_mcp/core/providers/claude.py +6 -47
  18. foundry_mcp/core/providers/codex.py +6 -57
  19. foundry_mcp/core/providers/cursor_agent.py +3 -44
  20. foundry_mcp/core/providers/gemini.py +6 -57
  21. foundry_mcp/core/providers/opencode.py +35 -5
  22. foundry_mcp/core/research/__init__.py +68 -0
  23. foundry_mcp/core/research/memory.py +425 -0
  24. foundry_mcp/core/research/models.py +437 -0
  25. foundry_mcp/core/research/workflows/__init__.py +22 -0
  26. foundry_mcp/core/research/workflows/base.py +204 -0
  27. foundry_mcp/core/research/workflows/chat.py +271 -0
  28. foundry_mcp/core/research/workflows/consensus.py +396 -0
  29. foundry_mcp/core/research/workflows/ideate.py +682 -0
  30. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  31. foundry_mcp/core/responses.py +450 -0
  32. foundry_mcp/core/spec.py +2438 -236
  33. foundry_mcp/core/task.py +1064 -19
  34. foundry_mcp/core/testing.py +512 -123
  35. foundry_mcp/core/validation.py +313 -42
  36. foundry_mcp/dashboard/components/charts.py +0 -57
  37. foundry_mcp/dashboard/launcher.py +11 -0
  38. foundry_mcp/dashboard/views/metrics.py +25 -35
  39. foundry_mcp/dashboard/views/overview.py +1 -65
  40. foundry_mcp/resources/specs.py +25 -25
  41. foundry_mcp/schemas/intake-schema.json +89 -0
  42. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  43. foundry_mcp/server.py +38 -0
  44. foundry_mcp/tools/unified/__init__.py +4 -2
  45. foundry_mcp/tools/unified/authoring.py +2423 -267
  46. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  47. foundry_mcp/tools/unified/environment.py +235 -6
  48. foundry_mcp/tools/unified/error.py +18 -1
  49. foundry_mcp/tools/unified/lifecycle.py +8 -0
  50. foundry_mcp/tools/unified/plan.py +113 -1
  51. foundry_mcp/tools/unified/research.py +658 -0
  52. foundry_mcp/tools/unified/review.py +370 -16
  53. foundry_mcp/tools/unified/spec.py +367 -0
  54. foundry_mcp/tools/unified/task.py +1163 -48
  55. foundry_mcp/tools/unified/test.py +69 -8
  56. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
  57. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
  58. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
  59. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
  60. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
foundry_mcp/config.py CHANGED
@@ -10,6 +10,7 @@ Environment variables:
10
10
  - FOUNDRY_MCP_WORKSPACE_ROOTS: Comma-separated list of workspace root paths
11
11
  - FOUNDRY_MCP_SPECS_DIR: Path to specs directory
12
12
  - FOUNDRY_MCP_JOURNALS_PATH: Path to journals directory
13
+ - FOUNDRY_MCP_BIKELANE_DIR: Path to bikelane intake queue directory (default: specs/.bikelane)
13
14
  - FOUNDRY_MCP_LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR)
14
15
  - FOUNDRY_MCP_API_KEYS: Comma-separated list of valid API keys (optional)
15
16
  - FOUNDRY_MCP_REQUIRE_AUTH: Whether to require API key authentication (true/false)
@@ -27,6 +28,7 @@ import logging
27
28
  import functools
28
29
  import time
29
30
  from dataclasses import dataclass, field
31
+ from importlib.metadata import version as get_package_version, PackageNotFoundError
30
32
  from pathlib import Path
31
33
  from typing import Optional, List, Dict, Any, Callable, TypeVar
32
34
 
@@ -38,6 +40,17 @@ except ImportError:
38
40
 
39
41
  logger = logging.getLogger(__name__)
40
42
 
43
+
44
+ def _get_version() -> str:
45
+ """Get package version from metadata (single source of truth: pyproject.toml)."""
46
+ try:
47
+ return get_package_version("foundry-mcp")
48
+ except PackageNotFoundError:
49
+ return "0.5.0" # Fallback for dev without install
50
+
51
+
52
+ _PACKAGE_VERSION = _get_version()
53
+
41
54
  T = TypeVar("T")
42
55
 
43
56
 
@@ -149,7 +162,7 @@ class ErrorCollectionConfig:
149
162
 
150
163
  Attributes:
151
164
  enabled: Whether error collection is enabled
152
- storage_path: Directory path for error storage (default: .cache/foundry-mcp/errors)
165
+ storage_path: Directory path for error storage (default: ~/.foundry-mcp/errors)
153
166
  retention_days: Delete records older than this many days
154
167
  max_errors: Maximum number of error records to keep
155
168
  include_stack_traces: Whether to include stack traces in error records
@@ -190,7 +203,7 @@ class ErrorCollectionConfig:
190
203
  """
191
204
  if self.storage_path:
192
205
  return Path(self.storage_path).expanduser()
193
- return Path.home() / ".cache" / "foundry-mcp" / "errors"
206
+ return Path.home() / ".foundry-mcp" / "errors"
194
207
 
195
208
 
196
209
  @dataclass
@@ -203,7 +216,7 @@ class MetricsPersistenceConfig:
203
216
 
204
217
  Attributes:
205
218
  enabled: Whether metrics persistence is enabled
206
- storage_path: Directory path for metrics storage (default: .cache/foundry-mcp/metrics)
219
+ storage_path: Directory path for metrics storage (default: ~/.foundry-mcp/metrics)
207
220
  retention_days: Delete records older than this many days
208
221
  max_records: Maximum number of metric data points to keep
209
222
  bucket_interval_seconds: Aggregation bucket interval (default: 60s = 1 minute)
@@ -262,7 +275,7 @@ class MetricsPersistenceConfig:
262
275
  """
263
276
  if self.storage_path:
264
277
  return Path(self.storage_path).expanduser()
265
- return Path.home() / ".cache" / "foundry-mcp" / "metrics"
278
+ return Path.home() / ".foundry-mcp" / "metrics"
266
279
 
267
280
  def should_persist_metric(self, metric_name: str) -> bool:
268
281
  """Check if a metric should be persisted.
@@ -319,6 +332,180 @@ class DashboardConfig:
319
332
  )
320
333
 
321
334
 
335
+ @dataclass
336
+ class RunnerConfig:
337
+ """Configuration for a test runner (pytest, go, npm, etc.).
338
+
339
+ Attributes:
340
+ command: Command to execute (e.g., ["go", "test"] or ["python", "-m", "pytest"])
341
+ run_args: Additional arguments for running tests
342
+ discover_args: Arguments for test discovery
343
+ pattern: File pattern for test discovery (e.g., "*_test.go", "test_*.py")
344
+ timeout: Default timeout in seconds
345
+ """
346
+
347
+ command: List[str] = field(default_factory=list)
348
+ run_args: List[str] = field(default_factory=list)
349
+ discover_args: List[str] = field(default_factory=list)
350
+ pattern: str = "*"
351
+ timeout: int = 300
352
+
353
+ @classmethod
354
+ def from_toml_dict(cls, data: Dict[str, Any]) -> "RunnerConfig":
355
+ """Create config from TOML dict.
356
+
357
+ Args:
358
+ data: Dict from TOML parsing
359
+
360
+ Returns:
361
+ RunnerConfig instance
362
+ """
363
+ command = data.get("command", [])
364
+ # Handle string command (convert to list)
365
+ if isinstance(command, str):
366
+ command = command.split()
367
+
368
+ run_args = data.get("run_args", [])
369
+ if isinstance(run_args, str):
370
+ run_args = run_args.split()
371
+
372
+ discover_args = data.get("discover_args", [])
373
+ if isinstance(discover_args, str):
374
+ discover_args = discover_args.split()
375
+
376
+ return cls(
377
+ command=command,
378
+ run_args=run_args,
379
+ discover_args=discover_args,
380
+ pattern=str(data.get("pattern", "*")),
381
+ timeout=int(data.get("timeout", 300)),
382
+ )
383
+
384
+
385
+ @dataclass
386
+ class TestConfig:
387
+ """Configuration for test runners.
388
+
389
+ Supports multiple test runners (pytest, go, npm, etc.) with configurable
390
+ commands and arguments. Runners can be defined in TOML config and selected
391
+ at runtime via the 'runner' parameter.
392
+
393
+ Attributes:
394
+ default_runner: Default runner to use when none specified
395
+ runners: Dict of runner name to RunnerConfig
396
+ """
397
+
398
+ default_runner: str = "pytest"
399
+ runners: Dict[str, RunnerConfig] = field(default_factory=dict)
400
+
401
+ @classmethod
402
+ def from_toml_dict(cls, data: Dict[str, Any]) -> "TestConfig":
403
+ """Create config from TOML dict (typically [test] section).
404
+
405
+ Args:
406
+ data: Dict from TOML parsing
407
+
408
+ Returns:
409
+ TestConfig instance
410
+ """
411
+ runners = {}
412
+ runners_data = data.get("runners", {})
413
+ for name, runner_data in runners_data.items():
414
+ runners[name] = RunnerConfig.from_toml_dict(runner_data)
415
+
416
+ return cls(
417
+ default_runner=str(data.get("default_runner", "pytest")),
418
+ runners=runners,
419
+ )
420
+
421
+ def get_runner(self, name: Optional[str] = None) -> Optional[RunnerConfig]:
422
+ """Get runner config by name.
423
+
424
+ Args:
425
+ name: Runner name, or None to use default
426
+
427
+ Returns:
428
+ RunnerConfig if found, None otherwise
429
+ """
430
+ runner_name = name or self.default_runner
431
+ return self.runners.get(runner_name)
432
+
433
+
434
+ @dataclass
435
+ class ResearchConfig:
436
+ """Configuration for research workflows (CHAT, CONSENSUS, THINKDEEP, IDEATE).
437
+
438
+ Attributes:
439
+ enabled: Master switch for research tools
440
+ storage_path: Directory for research state persistence (default: ~/.foundry-mcp/research)
441
+ storage_backend: Storage backend type (currently only 'file' supported)
442
+ ttl_hours: Time-to-live for stored states in hours
443
+ max_messages_per_thread: Maximum messages retained in a conversation thread
444
+ default_provider: Default LLM provider for single-model workflows
445
+ consensus_providers: List of provider IDs for CONSENSUS workflow
446
+ thinkdeep_max_depth: Maximum investigation depth for THINKDEEP workflow
447
+ ideate_perspectives: List of perspectives for IDEATE brainstorming
448
+ """
449
+
450
+ enabled: bool = True
451
+ storage_path: str = "" # Empty = use default (~/.foundry-mcp/research)
452
+ storage_backend: str = "file"
453
+ ttl_hours: int = 24
454
+ max_messages_per_thread: int = 100
455
+ default_provider: str = "gemini"
456
+ consensus_providers: List[str] = field(
457
+ default_factory=lambda: ["gemini", "claude"]
458
+ )
459
+ thinkdeep_max_depth: int = 5
460
+ ideate_perspectives: List[str] = field(
461
+ default_factory=lambda: ["technical", "creative", "practical", "visionary"]
462
+ )
463
+
464
+ @classmethod
465
+ def from_toml_dict(cls, data: Dict[str, Any]) -> "ResearchConfig":
466
+ """Create config from TOML dict (typically [research] section).
467
+
468
+ Args:
469
+ data: Dict from TOML parsing
470
+
471
+ Returns:
472
+ ResearchConfig instance
473
+ """
474
+ # Parse consensus_providers - handle both string and list
475
+ consensus_providers = data.get("consensus_providers", ["gemini", "claude"])
476
+ if isinstance(consensus_providers, str):
477
+ consensus_providers = [p.strip() for p in consensus_providers.split(",")]
478
+
479
+ # Parse ideate_perspectives - handle both string and list
480
+ ideate_perspectives = data.get(
481
+ "ideate_perspectives", ["technical", "creative", "practical", "visionary"]
482
+ )
483
+ if isinstance(ideate_perspectives, str):
484
+ ideate_perspectives = [p.strip() for p in ideate_perspectives.split(",")]
485
+
486
+ return cls(
487
+ enabled=_parse_bool(data.get("enabled", True)),
488
+ storage_path=str(data.get("storage_path", "")),
489
+ storage_backend=str(data.get("storage_backend", "file")),
490
+ ttl_hours=int(data.get("ttl_hours", 24)),
491
+ max_messages_per_thread=int(data.get("max_messages_per_thread", 100)),
492
+ default_provider=str(data.get("default_provider", "gemini")),
493
+ consensus_providers=consensus_providers,
494
+ thinkdeep_max_depth=int(data.get("thinkdeep_max_depth", 5)),
495
+ ideate_perspectives=ideate_perspectives,
496
+ )
497
+
498
+ def get_storage_path(self) -> Path:
499
+ """Get resolved storage path.
500
+
501
+ Returns:
502
+ Path to storage directory (creates if needed)
503
+ """
504
+ if self.storage_path:
505
+ return Path(self.storage_path).expanduser()
506
+ return Path.home() / ".foundry-mcp" / "research"
507
+
508
+
322
509
  _VALID_COMMIT_CADENCE = {"manual", "task", "phase"}
323
510
 
324
511
 
@@ -348,6 +535,7 @@ class ServerConfig:
348
535
  workspace_roots: List[Path] = field(default_factory=list)
349
536
  specs_dir: Optional[Path] = None
350
537
  journals_path: Optional[Path] = None
538
+ bikelane_dir: Optional[Path] = None # Intake queue storage (default: specs/.bikelane)
351
539
 
352
540
  # Logging configuration
353
541
  log_level: str = "INFO"
@@ -359,7 +547,7 @@ class ServerConfig:
359
547
 
360
548
  # Server configuration
361
549
  server_name: str = "foundry-mcp"
362
- server_version: str = "0.1.0"
550
+ server_version: str = field(default_factory=lambda: _PACKAGE_VERSION)
363
551
 
364
552
  # Git workflow configuration
365
553
  git: GitSettings = field(default_factory=GitSettings)
@@ -379,6 +567,12 @@ class ServerConfig:
379
567
  # Dashboard configuration
380
568
  dashboard: DashboardConfig = field(default_factory=DashboardConfig)
381
569
 
570
+ # Test runner configuration
571
+ test: TestConfig = field(default_factory=TestConfig)
572
+
573
+ # Research workflows configuration
574
+ research: ResearchConfig = field(default_factory=ResearchConfig)
575
+
382
576
  @classmethod
383
577
  def from_env(cls, config_file: Optional[str] = None) -> "ServerConfig":
384
578
  """
@@ -426,6 +620,8 @@ class ServerConfig:
426
620
  self.specs_dir = Path(ws["specs_dir"])
427
621
  if "journals_path" in ws:
428
622
  self.journals_path = Path(ws["journals_path"])
623
+ if "bikelane_dir" in ws:
624
+ self.bikelane_dir = Path(ws["bikelane_dir"])
429
625
 
430
626
  # Logging settings
431
627
  if "logging" in data:
@@ -499,6 +695,14 @@ class ServerConfig:
499
695
  if "dashboard" in data:
500
696
  self.dashboard = DashboardConfig.from_toml_dict(data["dashboard"])
501
697
 
698
+ # Test runner settings
699
+ if "test" in data:
700
+ self.test = TestConfig.from_toml_dict(data["test"])
701
+
702
+ # Research workflows settings
703
+ if "research" in data:
704
+ self.research = ResearchConfig.from_toml_dict(data["research"])
705
+
502
706
  except Exception as e:
503
707
  logger.error(f"Error loading config file {path}: {e}")
504
708
 
@@ -516,6 +720,10 @@ class ServerConfig:
516
720
  if journals := os.environ.get("FOUNDRY_MCP_JOURNALS_PATH"):
517
721
  self.journals_path = Path(journals)
518
722
 
723
+ # Bikelane directory (intake queue storage)
724
+ if bikelane := os.environ.get("FOUNDRY_MCP_BIKELANE_DIR"):
725
+ self.bikelane_dir = Path(bikelane)
726
+
519
727
  # Log level
520
728
  if level := os.environ.get("FOUNDRY_MCP_LOG_LEVEL"):
521
729
  self.log_level = level.upper()
@@ -689,6 +897,28 @@ class ServerConfig:
689
897
 
690
898
  return key in self.api_keys
691
899
 
900
+ def get_bikelane_dir(self, specs_dir: Optional[Path] = None) -> Path:
901
+ """
902
+ Get the resolved bikelane directory path.
903
+
904
+ Priority:
905
+ 1. Explicitly configured bikelane_dir (from TOML or env var)
906
+ 2. Default: specs_dir/.bikelane (where specs_dir is resolved)
907
+
908
+ Args:
909
+ specs_dir: Optional specs directory to use for default path.
910
+ If not provided, uses self.specs_dir or "./specs"
911
+
912
+ Returns:
913
+ Path to bikelane directory
914
+ """
915
+ if self.bikelane_dir is not None:
916
+ return self.bikelane_dir.expanduser()
917
+
918
+ # Fall back to default: specs/.bikelane
919
+ base_specs = specs_dir or self.specs_dir or Path("./specs")
920
+ return base_specs / ".bikelane"
921
+
692
922
  def setup_logging(self) -> None:
693
923
  """Configure logging based on settings."""
694
924
  level = getattr(logging, self.log_level, logging.INFO)
@@ -44,7 +44,7 @@ import hashlib
44
44
  import json
45
45
  import logging
46
46
  import time
47
- from dataclasses import dataclass, field
47
+ from dataclasses import dataclass, field, replace
48
48
  from enum import Enum
49
49
  from pathlib import Path
50
50
  from typing import Any, Dict, List, Optional, Sequence, Union
@@ -1305,6 +1305,143 @@ class ConsultationOrchestrator:
1305
1305
  warnings=warnings,
1306
1306
  )
1307
1307
 
1308
+ async def _execute_parallel_providers_with_fallback_async(
1309
+ self,
1310
+ request: ConsultationRequest,
1311
+ prompt: str,
1312
+ all_providers: List[ResolvedProvider],
1313
+ min_models: int = 1,
1314
+ ) -> ConsensusResult:
1315
+ """
1316
+ Execute providers in parallel with sequential fallback on failures.
1317
+
1318
+ Uses a two-phase approach:
1319
+ 1. Execute first min_models providers in parallel
1320
+ 2. If any fail and fallback_enabled, try remaining providers sequentially
1321
+ until min_models succeed or providers exhausted
1322
+
1323
+ Args:
1324
+ request: The consultation request
1325
+ prompt: The rendered prompt
1326
+ all_providers: Complete priority list of providers to try
1327
+ min_models: Minimum successful models required
1328
+
1329
+ Returns:
1330
+ ConsensusResult with all attempted provider responses
1331
+ """
1332
+ start_time = time.time()
1333
+ warnings: List[str] = []
1334
+ all_responses: List[ProviderResponse] = []
1335
+
1336
+ if not all_providers:
1337
+ return ConsensusResult(
1338
+ workflow=request.workflow,
1339
+ responses=[],
1340
+ duration_ms=0.0,
1341
+ warnings=["No providers available for parallel execution"],
1342
+ )
1343
+
1344
+ # Phase 1: Initial parallel execution of first min_models providers
1345
+ initial_providers = all_providers[:min_models]
1346
+ logger.debug(
1347
+ f"Phase 1: Executing {len(initial_providers)} providers in parallel"
1348
+ )
1349
+
1350
+ tasks = [
1351
+ self._execute_single_provider_async(request, prompt, resolved)
1352
+ for resolved in initial_providers
1353
+ ]
1354
+ initial_responses: List[ProviderResponse] = await asyncio.gather(*tasks)
1355
+ all_responses.extend(initial_responses)
1356
+
1357
+ # Count successes and log failures
1358
+ # A response is only truly successful if it has non-empty content
1359
+ successful_count = sum(
1360
+ 1 for r in initial_responses if r.success and r.content.strip()
1361
+ )
1362
+ for response in initial_responses:
1363
+ if not response.success:
1364
+ warnings.append(
1365
+ f"Provider {response.provider_id} failed: {response.error}"
1366
+ )
1367
+ elif not response.content.strip():
1368
+ warnings.append(
1369
+ f"Provider {response.provider_id} returned empty content"
1370
+ )
1371
+
1372
+ # Phase 2: Sequential fallback if needed and enabled
1373
+ if successful_count < min_models and self._config.fallback_enabled:
1374
+ needed = min_models - successful_count
1375
+ remaining_providers = all_providers[min_models:]
1376
+
1377
+ if remaining_providers:
1378
+ warnings.append(
1379
+ f"Initial parallel execution yielded {successful_count}/{min_models} "
1380
+ f"successes, attempting fallback for {needed} more"
1381
+ )
1382
+
1383
+ for fallback_provider in remaining_providers:
1384
+ # Skip if already tried (shouldn't happen, but safety check)
1385
+ if any(
1386
+ r.provider_id == fallback_provider.provider_id
1387
+ for r in all_responses
1388
+ ):
1389
+ continue
1390
+
1391
+ # Check if provider is available
1392
+ if not check_provider_available(fallback_provider.provider_id):
1393
+ warnings.append(
1394
+ f"Fallback provider {fallback_provider.provider_id} "
1395
+ "is not available, skipping"
1396
+ )
1397
+ continue
1398
+
1399
+ logger.debug(
1400
+ f"Fallback attempt: trying provider {fallback_provider.provider_id}"
1401
+ )
1402
+
1403
+ response = await self._execute_single_provider_async(
1404
+ request, prompt, fallback_provider
1405
+ )
1406
+ all_responses.append(response)
1407
+
1408
+ if response.success and response.content.strip():
1409
+ successful_count += 1
1410
+ warnings.append(
1411
+ f"Fallback provider {fallback_provider.provider_id} succeeded"
1412
+ )
1413
+ if successful_count >= min_models:
1414
+ logger.debug(
1415
+ f"Reached {min_models} successful providers via fallback"
1416
+ )
1417
+ break
1418
+ elif response.success and not response.content.strip():
1419
+ warnings.append(
1420
+ f"Fallback provider {fallback_provider.provider_id} "
1421
+ "returned empty content"
1422
+ )
1423
+ else:
1424
+ warnings.append(
1425
+ f"Fallback provider {fallback_provider.provider_id} "
1426
+ f"failed: {response.error}"
1427
+ )
1428
+
1429
+ duration_ms = (time.time() - start_time) * 1000
1430
+
1431
+ # Final warning if still insufficient
1432
+ if successful_count < min_models:
1433
+ warnings.append(
1434
+ f"Only {successful_count} of {min_models} required models succeeded "
1435
+ f"after trying {len(all_responses)} provider(s)"
1436
+ )
1437
+
1438
+ return ConsensusResult(
1439
+ workflow=request.workflow,
1440
+ responses=all_responses,
1441
+ duration_ms=duration_ms,
1442
+ warnings=warnings,
1443
+ )
1444
+
1308
1445
  def _execute_with_fallback(
1309
1446
  self,
1310
1447
  request: ConsultationRequest,
@@ -1490,6 +1627,10 @@ class ConsultationOrchestrator:
1490
1627
  workflow_config = self._config.get_workflow_config(effective_workflow)
1491
1628
  min_models = workflow_config.min_models
1492
1629
 
1630
+ # Apply workflow-specific timeout override if configured
1631
+ if workflow_config.timeout_override is not None:
1632
+ request = replace(request, timeout=workflow_config.timeout_override)
1633
+
1493
1634
  # Generate cache key
1494
1635
  cache_key = self._generate_cache_key(request)
1495
1636
 
@@ -1533,14 +1674,10 @@ class ConsultationOrchestrator:
1533
1674
  providers = self._get_providers_to_try(request)
1534
1675
 
1535
1676
  if min_models > 1:
1536
- # Multi-model mode: execute providers in parallel
1537
- # Limit to min_models providers (or all available if fewer)
1538
- providers_to_use = (
1539
- providers[:min_models] if len(providers) >= min_models else providers
1540
- )
1541
-
1542
- result = await self._execute_parallel_providers_async(
1543
- request, prompt, providers_to_use, min_models
1677
+ # Multi-model mode: execute providers in parallel with fallback support
1678
+ # Pass full provider list - fallback will try additional providers if needed
1679
+ result = await self._execute_parallel_providers_with_fallback_async(
1680
+ request, prompt, providers, min_models
1544
1681
  )
1545
1682
  return result
1546
1683
  else:
@@ -925,11 +925,11 @@ LLM_TOOL_METADATA: Dict[str, ToolMetadata] = {
925
925
  ParameterMetadata(
926
926
  name="review_type",
927
927
  type=ParameterType.STRING,
928
- description="Type of review to perform",
928
+ description="Type of review to perform (defaults to config value, typically 'full')",
929
929
  required=False,
930
- default="quick",
930
+ default="full",
931
931
  constraints={"enum": ["quick", "full", "security", "feasibility"]},
932
- examples=["quick", "full", "security"],
932
+ examples=["full", "quick", "security"],
933
933
  ),
934
934
  ParameterMetadata(
935
935
  name="tools",
@@ -965,13 +965,13 @@ LLM_TOOL_METADATA: Dict[str, ToolMetadata] = {
965
965
  related_tools=["review-list-tools", "review-list-plan-tools", "spec-review-fidelity"],
966
966
  examples=[
967
967
  {
968
- "description": "Quick review of a specification",
969
- "input": {"spec_id": "feature-auth-001", "review_type": "quick"},
968
+ "description": "Full review of a specification",
969
+ "input": {"spec_id": "feature-auth-001", "review_type": "full"},
970
970
  "output": {
971
971
  "success": True,
972
972
  "data": {
973
973
  "spec_id": "feature-auth-001",
974
- "review_type": "quick",
974
+ "review_type": "full",
975
975
  "findings": [],
976
976
  "suggestions": ["Consider adding error handling"],
977
977
  },
@@ -137,7 +137,7 @@ class FileErrorStore(ErrorStore):
137
137
  for efficient querying. Thread-safe with file locking for concurrent access.
138
138
 
139
139
  Directory structure:
140
- .cache/foundry-mcp/errors/
140
+ ~/.foundry-mcp/errors/
141
141
  errors.jsonl - Append-only error log
142
142
  index.json - Fingerprint -> metadata mapping
143
143
  stats.json - Pre-computed statistics (updated periodically)
@@ -579,7 +579,7 @@ def get_error_store(storage_path: Optional[str | Path] = None) -> ErrorStore:
579
579
  if _error_store is None:
580
580
  if storage_path is None:
581
581
  # Default path
582
- storage_path = Path.home() / ".cache" / "foundry-mcp" / "errors"
582
+ storage_path = Path.home() / ".foundry-mcp" / "errors"
583
583
  _error_store = FileErrorStore(storage_path)
584
584
 
585
585
  return _error_store