foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +615 -11
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +7 -7
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +28 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +51 -48
- foundry_mcp/core/providers/codex.py +70 -60
- foundry_mcp/core/providers/cursor_agent.py +25 -47
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +69 -58
- foundry_mcp/core/providers/opencode.py +101 -47
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1220 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +690 -0
- foundry_mcp/core/spec.py +2439 -236
- foundry_mcp/core/task.py +1205 -31
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +319 -43
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +0 -14
- foundry_mcp/tools/unified/__init__.py +39 -18
- foundry_mcp/tools/unified/authoring.py +2371 -248
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +434 -32
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +133 -2
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +374 -17
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1664 -30
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
- foundry_mcp-0.8.10.dist-info/RECORD +153 -0
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- foundry_mcp-0.3.3.dist-info/RECORD +0 -135
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
foundry_mcp/config.py
CHANGED
|
@@ -10,11 +10,19 @@ Environment variables:
|
|
|
10
10
|
- FOUNDRY_MCP_WORKSPACE_ROOTS: Comma-separated list of workspace root paths
|
|
11
11
|
- FOUNDRY_MCP_SPECS_DIR: Path to specs directory
|
|
12
12
|
- FOUNDRY_MCP_JOURNALS_PATH: Path to journals directory
|
|
13
|
+
- FOUNDRY_MCP_BIKELANE_DIR: Path to bikelane intake queue directory (default: specs/.bikelane)
|
|
13
14
|
- FOUNDRY_MCP_LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR)
|
|
14
15
|
- FOUNDRY_MCP_API_KEYS: Comma-separated list of valid API keys (optional)
|
|
15
16
|
- FOUNDRY_MCP_REQUIRE_AUTH: Whether to require API key authentication (true/false)
|
|
16
17
|
- FOUNDRY_MCP_CONFIG_FILE: Path to TOML config file
|
|
17
18
|
|
|
19
|
+
Search Provider API Keys (for deep research workflow):
|
|
20
|
+
- TAVILY_API_KEY: API key for Tavily web search (https://tavily.com/)
|
|
21
|
+
- PERPLEXITY_API_KEY: API key for Perplexity Search (https://docs.perplexity.ai/)
|
|
22
|
+
- GOOGLE_API_KEY: API key for Google Custom Search (https://console.cloud.google.com/)
|
|
23
|
+
- GOOGLE_CSE_ID: Google Custom Search Engine ID (https://cse.google.com/)
|
|
24
|
+
- SEMANTIC_SCHOLAR_API_KEY: API key for Semantic Scholar academic search (optional for basic tier)
|
|
25
|
+
|
|
18
26
|
API Key Security:
|
|
19
27
|
- Keys should be rotated regularly (recommended: every 90 days)
|
|
20
28
|
- To revoke a key: remove it from FOUNDRY_MCP_API_KEYS and restart server
|
|
@@ -27,8 +35,9 @@ import logging
|
|
|
27
35
|
import functools
|
|
28
36
|
import time
|
|
29
37
|
from dataclasses import dataclass, field
|
|
38
|
+
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
|
30
39
|
from pathlib import Path
|
|
31
|
-
from typing import Optional, List, Dict, Any, Callable, TypeVar
|
|
40
|
+
from typing import Optional, List, Dict, Any, Callable, TypeVar, Tuple
|
|
32
41
|
|
|
33
42
|
try:
|
|
34
43
|
import tomllib
|
|
@@ -38,6 +47,17 @@ except ImportError:
|
|
|
38
47
|
|
|
39
48
|
logger = logging.getLogger(__name__)
|
|
40
49
|
|
|
50
|
+
|
|
51
|
+
def _get_version() -> str:
|
|
52
|
+
"""Get package version from metadata (single source of truth: pyproject.toml)."""
|
|
53
|
+
try:
|
|
54
|
+
return get_package_version("foundry-mcp")
|
|
55
|
+
except PackageNotFoundError:
|
|
56
|
+
return "0.5.0" # Fallback for dev without install
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
_PACKAGE_VERSION = _get_version()
|
|
60
|
+
|
|
41
61
|
T = TypeVar("T")
|
|
42
62
|
|
|
43
63
|
|
|
@@ -46,7 +66,6 @@ class GitSettings:
|
|
|
46
66
|
"""Git workflow preferences for CLI + MCP surfaces."""
|
|
47
67
|
|
|
48
68
|
enabled: bool = False
|
|
49
|
-
auto_branch: bool = False
|
|
50
69
|
auto_commit: bool = False
|
|
51
70
|
auto_push: bool = False
|
|
52
71
|
auto_pr: bool = False
|
|
@@ -149,7 +168,7 @@ class ErrorCollectionConfig:
|
|
|
149
168
|
|
|
150
169
|
Attributes:
|
|
151
170
|
enabled: Whether error collection is enabled
|
|
152
|
-
storage_path: Directory path for error storage (default:
|
|
171
|
+
storage_path: Directory path for error storage (default: ~/.foundry-mcp/errors)
|
|
153
172
|
retention_days: Delete records older than this many days
|
|
154
173
|
max_errors: Maximum number of error records to keep
|
|
155
174
|
include_stack_traces: Whether to include stack traces in error records
|
|
@@ -190,7 +209,7 @@ class ErrorCollectionConfig:
|
|
|
190
209
|
"""
|
|
191
210
|
if self.storage_path:
|
|
192
211
|
return Path(self.storage_path).expanduser()
|
|
193
|
-
return Path.home() / ".
|
|
212
|
+
return Path.home() / ".foundry-mcp" / "errors"
|
|
194
213
|
|
|
195
214
|
|
|
196
215
|
@dataclass
|
|
@@ -203,7 +222,7 @@ class MetricsPersistenceConfig:
|
|
|
203
222
|
|
|
204
223
|
Attributes:
|
|
205
224
|
enabled: Whether metrics persistence is enabled
|
|
206
|
-
storage_path: Directory path for metrics storage (default:
|
|
225
|
+
storage_path: Directory path for metrics storage (default: ~/.foundry-mcp/metrics)
|
|
207
226
|
retention_days: Delete records older than this many days
|
|
208
227
|
max_records: Maximum number of metric data points to keep
|
|
209
228
|
bucket_interval_seconds: Aggregation bucket interval (default: 60s = 1 minute)
|
|
@@ -262,7 +281,7 @@ class MetricsPersistenceConfig:
|
|
|
262
281
|
"""
|
|
263
282
|
if self.storage_path:
|
|
264
283
|
return Path(self.storage_path).expanduser()
|
|
265
|
-
return Path.home() / ".
|
|
284
|
+
return Path.home() / ".foundry-mcp" / "metrics"
|
|
266
285
|
|
|
267
286
|
def should_persist_metric(self, metric_name: str) -> bool:
|
|
268
287
|
"""Check if a metric should be persisted.
|
|
@@ -319,6 +338,484 @@ class DashboardConfig:
|
|
|
319
338
|
)
|
|
320
339
|
|
|
321
340
|
|
|
341
|
+
@dataclass
|
|
342
|
+
class RunnerConfig:
|
|
343
|
+
"""Configuration for a test runner (pytest, go, npm, etc.).
|
|
344
|
+
|
|
345
|
+
Attributes:
|
|
346
|
+
command: Command to execute (e.g., ["go", "test"] or ["python", "-m", "pytest"])
|
|
347
|
+
run_args: Additional arguments for running tests
|
|
348
|
+
discover_args: Arguments for test discovery
|
|
349
|
+
pattern: File pattern for test discovery (e.g., "*_test.go", "test_*.py")
|
|
350
|
+
timeout: Default timeout in seconds
|
|
351
|
+
"""
|
|
352
|
+
|
|
353
|
+
command: List[str] = field(default_factory=list)
|
|
354
|
+
run_args: List[str] = field(default_factory=list)
|
|
355
|
+
discover_args: List[str] = field(default_factory=list)
|
|
356
|
+
pattern: str = "*"
|
|
357
|
+
timeout: int = 300
|
|
358
|
+
|
|
359
|
+
@classmethod
|
|
360
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "RunnerConfig":
|
|
361
|
+
"""Create config from TOML dict.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
data: Dict from TOML parsing
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
RunnerConfig instance
|
|
368
|
+
"""
|
|
369
|
+
command = data.get("command", [])
|
|
370
|
+
# Handle string command (convert to list)
|
|
371
|
+
if isinstance(command, str):
|
|
372
|
+
command = command.split()
|
|
373
|
+
|
|
374
|
+
run_args = data.get("run_args", [])
|
|
375
|
+
if isinstance(run_args, str):
|
|
376
|
+
run_args = run_args.split()
|
|
377
|
+
|
|
378
|
+
discover_args = data.get("discover_args", [])
|
|
379
|
+
if isinstance(discover_args, str):
|
|
380
|
+
discover_args = discover_args.split()
|
|
381
|
+
|
|
382
|
+
return cls(
|
|
383
|
+
command=command,
|
|
384
|
+
run_args=run_args,
|
|
385
|
+
discover_args=discover_args,
|
|
386
|
+
pattern=str(data.get("pattern", "*")),
|
|
387
|
+
timeout=int(data.get("timeout", 300)),
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
@dataclass
|
|
392
|
+
class TestConfig:
|
|
393
|
+
"""Configuration for test runners.
|
|
394
|
+
|
|
395
|
+
Supports multiple test runners (pytest, go, npm, etc.) with configurable
|
|
396
|
+
commands and arguments. Runners can be defined in TOML config and selected
|
|
397
|
+
at runtime via the 'runner' parameter.
|
|
398
|
+
|
|
399
|
+
Attributes:
|
|
400
|
+
default_runner: Default runner to use when none specified
|
|
401
|
+
runners: Dict of runner name to RunnerConfig
|
|
402
|
+
"""
|
|
403
|
+
|
|
404
|
+
default_runner: str = "pytest"
|
|
405
|
+
runners: Dict[str, RunnerConfig] = field(default_factory=dict)
|
|
406
|
+
|
|
407
|
+
@classmethod
|
|
408
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "TestConfig":
|
|
409
|
+
"""Create config from TOML dict (typically [test] section).
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
data: Dict from TOML parsing
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
TestConfig instance
|
|
416
|
+
"""
|
|
417
|
+
runners = {}
|
|
418
|
+
runners_data = data.get("runners", {})
|
|
419
|
+
for name, runner_data in runners_data.items():
|
|
420
|
+
runners[name] = RunnerConfig.from_toml_dict(runner_data)
|
|
421
|
+
|
|
422
|
+
return cls(
|
|
423
|
+
default_runner=str(data.get("default_runner", "pytest")),
|
|
424
|
+
runners=runners,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
def get_runner(self, name: Optional[str] = None) -> Optional[RunnerConfig]:
|
|
428
|
+
"""Get runner config by name.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
name: Runner name, or None to use default
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
RunnerConfig if found, None otherwise
|
|
435
|
+
"""
|
|
436
|
+
runner_name = name or self.default_runner
|
|
437
|
+
return self.runners.get(runner_name)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
@dataclass
|
|
441
|
+
class ResearchConfig:
|
|
442
|
+
"""Configuration for research workflows (CHAT, CONSENSUS, THINKDEEP, IDEATE, DEEP_RESEARCH).
|
|
443
|
+
|
|
444
|
+
Attributes:
|
|
445
|
+
enabled: Master switch for research tools
|
|
446
|
+
storage_path: Directory for research state persistence (default: ~/.foundry-mcp/research)
|
|
447
|
+
storage_backend: Storage backend type (currently only 'file' supported)
|
|
448
|
+
ttl_hours: Time-to-live for stored states in hours
|
|
449
|
+
max_messages_per_thread: Maximum messages retained in a conversation thread
|
|
450
|
+
default_provider: Default LLM provider for single-model workflows
|
|
451
|
+
consensus_providers: List of provider IDs for CONSENSUS workflow
|
|
452
|
+
thinkdeep_max_depth: Maximum investigation depth for THINKDEEP workflow
|
|
453
|
+
ideate_perspectives: List of perspectives for IDEATE brainstorming
|
|
454
|
+
default_timeout: Default timeout in seconds for provider calls (thinkdeep uses 2x)
|
|
455
|
+
deep_research_max_iterations: Maximum refinement iterations for DEEP_RESEARCH
|
|
456
|
+
deep_research_max_sub_queries: Maximum sub-queries for query decomposition
|
|
457
|
+
deep_research_max_sources: Maximum sources per sub-query
|
|
458
|
+
deep_research_follow_links: Whether to follow and extract content from links
|
|
459
|
+
deep_research_timeout: Default timeout per operation in seconds
|
|
460
|
+
deep_research_max_concurrent: Maximum concurrent operations
|
|
461
|
+
deep_research_providers: Ordered list of search providers for deep research
|
|
462
|
+
deep_research_audit_artifacts: Whether to write per-run audit artifacts
|
|
463
|
+
search_rate_limit: Global rate limit for search APIs (requests per minute)
|
|
464
|
+
max_concurrent_searches: Maximum concurrent search requests (for asyncio.Semaphore)
|
|
465
|
+
per_provider_rate_limits: Per-provider rate limits in requests per minute
|
|
466
|
+
tavily_api_key: API key for Tavily search provider (optional, reads from TAVILY_API_KEY env var)
|
|
467
|
+
perplexity_api_key: API key for Perplexity Search (optional, reads from PERPLEXITY_API_KEY env var)
|
|
468
|
+
google_api_key: API key for Google Custom Search (optional, reads from GOOGLE_API_KEY env var)
|
|
469
|
+
google_cse_id: Google Custom Search Engine ID (optional, reads from GOOGLE_CSE_ID env var)
|
|
470
|
+
semantic_scholar_api_key: API key for Semantic Scholar (optional, reads from SEMANTIC_SCHOLAR_API_KEY env var)
|
|
471
|
+
"""
|
|
472
|
+
|
|
473
|
+
enabled: bool = True
|
|
474
|
+
storage_path: str = "" # Empty = use default (~/.foundry-mcp/research)
|
|
475
|
+
storage_backend: str = "file"
|
|
476
|
+
ttl_hours: int = 24
|
|
477
|
+
max_messages_per_thread: int = 100
|
|
478
|
+
default_provider: str = "gemini"
|
|
479
|
+
consensus_providers: List[str] = field(
|
|
480
|
+
default_factory=lambda: ["gemini", "claude"]
|
|
481
|
+
)
|
|
482
|
+
thinkdeep_max_depth: int = 5
|
|
483
|
+
ideate_perspectives: List[str] = field(
|
|
484
|
+
default_factory=lambda: ["technical", "creative", "practical", "visionary"]
|
|
485
|
+
)
|
|
486
|
+
default_timeout: float = 60.0 # 60 seconds default, configurable
|
|
487
|
+
# Deep research configuration
|
|
488
|
+
deep_research_max_iterations: int = 3
|
|
489
|
+
deep_research_max_sub_queries: int = 5
|
|
490
|
+
deep_research_max_sources: int = 5
|
|
491
|
+
deep_research_follow_links: bool = True
|
|
492
|
+
deep_research_timeout: float = 120.0
|
|
493
|
+
deep_research_max_concurrent: int = 3
|
|
494
|
+
# Per-phase timeout overrides (seconds) - uses deep_research_timeout if not set
|
|
495
|
+
deep_research_planning_timeout: float = 60.0
|
|
496
|
+
deep_research_analysis_timeout: float = 90.0
|
|
497
|
+
deep_research_synthesis_timeout: float = 180.0
|
|
498
|
+
deep_research_refinement_timeout: float = 60.0
|
|
499
|
+
# Per-phase provider overrides - uses default_provider if not set
|
|
500
|
+
deep_research_planning_provider: Optional[str] = None
|
|
501
|
+
deep_research_analysis_provider: Optional[str] = None
|
|
502
|
+
deep_research_synthesis_provider: Optional[str] = None
|
|
503
|
+
deep_research_refinement_provider: Optional[str] = None
|
|
504
|
+
deep_research_providers: List[str] = field(
|
|
505
|
+
default_factory=lambda: ["tavily", "google", "semantic_scholar"]
|
|
506
|
+
)
|
|
507
|
+
deep_research_audit_artifacts: bool = True
|
|
508
|
+
# Research mode: "general" | "academic" | "technical"
|
|
509
|
+
deep_research_mode: str = "general"
|
|
510
|
+
# Search rate limiting configuration
|
|
511
|
+
search_rate_limit: int = 60 # requests per minute (global default)
|
|
512
|
+
max_concurrent_searches: int = 3 # for asyncio.Semaphore in gathering phase
|
|
513
|
+
per_provider_rate_limits: Dict[str, int] = field(
|
|
514
|
+
default_factory=lambda: {
|
|
515
|
+
"tavily": 60, # Tavily free tier: ~1 req/sec
|
|
516
|
+
"perplexity": 60, # Perplexity: ~1 req/sec (pricing: $5/1k requests)
|
|
517
|
+
"google": 100, # Google CSE: 100 queries/day free, ~100/min paid
|
|
518
|
+
"semantic_scholar": 100, # Semantic Scholar: 100 req/5min unauthenticated
|
|
519
|
+
}
|
|
520
|
+
)
|
|
521
|
+
# Search provider API keys (all optional, read from env vars if not set)
|
|
522
|
+
tavily_api_key: Optional[str] = None
|
|
523
|
+
perplexity_api_key: Optional[str] = None
|
|
524
|
+
google_api_key: Optional[str] = None
|
|
525
|
+
google_cse_id: Optional[str] = None
|
|
526
|
+
semantic_scholar_api_key: Optional[str] = None
|
|
527
|
+
|
|
528
|
+
@classmethod
|
|
529
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ResearchConfig":
|
|
530
|
+
"""Create config from TOML dict (typically [research] section).
|
|
531
|
+
|
|
532
|
+
Args:
|
|
533
|
+
data: Dict from TOML parsing
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
ResearchConfig instance
|
|
537
|
+
"""
|
|
538
|
+
# Parse consensus_providers - handle both string and list
|
|
539
|
+
consensus_providers = data.get("consensus_providers", ["gemini", "claude"])
|
|
540
|
+
if isinstance(consensus_providers, str):
|
|
541
|
+
consensus_providers = [p.strip() for p in consensus_providers.split(",")]
|
|
542
|
+
|
|
543
|
+
# Parse ideate_perspectives - handle both string and list
|
|
544
|
+
ideate_perspectives = data.get(
|
|
545
|
+
"ideate_perspectives", ["technical", "creative", "practical", "visionary"]
|
|
546
|
+
)
|
|
547
|
+
if isinstance(ideate_perspectives, str):
|
|
548
|
+
ideate_perspectives = [p.strip() for p in ideate_perspectives.split(",")]
|
|
549
|
+
|
|
550
|
+
# Parse deep_research_providers - handle both string and list
|
|
551
|
+
deep_research_providers = data.get(
|
|
552
|
+
"deep_research_providers", ["tavily", "google", "semantic_scholar"]
|
|
553
|
+
)
|
|
554
|
+
if isinstance(deep_research_providers, str):
|
|
555
|
+
deep_research_providers = [
|
|
556
|
+
p.strip() for p in deep_research_providers.split(",") if p.strip()
|
|
557
|
+
]
|
|
558
|
+
|
|
559
|
+
# Parse per_provider_rate_limits - handle dict from TOML
|
|
560
|
+
per_provider_rate_limits = data.get("per_provider_rate_limits", {
|
|
561
|
+
"tavily": 60,
|
|
562
|
+
"perplexity": 60,
|
|
563
|
+
"google": 100,
|
|
564
|
+
"semantic_scholar": 100,
|
|
565
|
+
})
|
|
566
|
+
if isinstance(per_provider_rate_limits, dict):
|
|
567
|
+
# Convert values to int
|
|
568
|
+
per_provider_rate_limits = {
|
|
569
|
+
k: int(v) for k, v in per_provider_rate_limits.items()
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
return cls(
|
|
573
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
574
|
+
storage_path=str(data.get("storage_path", "")),
|
|
575
|
+
storage_backend=str(data.get("storage_backend", "file")),
|
|
576
|
+
ttl_hours=int(data.get("ttl_hours", 24)),
|
|
577
|
+
max_messages_per_thread=int(data.get("max_messages_per_thread", 100)),
|
|
578
|
+
default_provider=str(data.get("default_provider", "gemini")),
|
|
579
|
+
consensus_providers=consensus_providers,
|
|
580
|
+
thinkdeep_max_depth=int(data.get("thinkdeep_max_depth", 5)),
|
|
581
|
+
ideate_perspectives=ideate_perspectives,
|
|
582
|
+
default_timeout=float(data.get("default_timeout", 60.0)),
|
|
583
|
+
# Deep research configuration
|
|
584
|
+
deep_research_max_iterations=int(data.get("deep_research_max_iterations", 3)),
|
|
585
|
+
deep_research_max_sub_queries=int(data.get("deep_research_max_sub_queries", 5)),
|
|
586
|
+
deep_research_max_sources=int(data.get("deep_research_max_sources", 5)),
|
|
587
|
+
deep_research_follow_links=_parse_bool(data.get("deep_research_follow_links", True)),
|
|
588
|
+
deep_research_timeout=float(data.get("deep_research_timeout", 120.0)),
|
|
589
|
+
deep_research_max_concurrent=int(data.get("deep_research_max_concurrent", 3)),
|
|
590
|
+
# Per-phase timeout overrides
|
|
591
|
+
deep_research_planning_timeout=float(data.get("deep_research_planning_timeout", 60.0)),
|
|
592
|
+
deep_research_analysis_timeout=float(data.get("deep_research_analysis_timeout", 90.0)),
|
|
593
|
+
deep_research_synthesis_timeout=float(data.get("deep_research_synthesis_timeout", 180.0)),
|
|
594
|
+
deep_research_refinement_timeout=float(data.get("deep_research_refinement_timeout", 60.0)),
|
|
595
|
+
# Per-phase provider overrides
|
|
596
|
+
deep_research_planning_provider=data.get("deep_research_planning_provider"),
|
|
597
|
+
deep_research_analysis_provider=data.get("deep_research_analysis_provider"),
|
|
598
|
+
deep_research_synthesis_provider=data.get("deep_research_synthesis_provider"),
|
|
599
|
+
deep_research_refinement_provider=data.get("deep_research_refinement_provider"),
|
|
600
|
+
deep_research_providers=deep_research_providers,
|
|
601
|
+
deep_research_audit_artifacts=_parse_bool(
|
|
602
|
+
data.get("deep_research_audit_artifacts", True)
|
|
603
|
+
),
|
|
604
|
+
# Research mode
|
|
605
|
+
deep_research_mode=str(data.get("deep_research_mode", "general")),
|
|
606
|
+
# Search rate limiting configuration
|
|
607
|
+
search_rate_limit=int(data.get("search_rate_limit", 60)),
|
|
608
|
+
max_concurrent_searches=int(data.get("max_concurrent_searches", 3)),
|
|
609
|
+
per_provider_rate_limits=per_provider_rate_limits,
|
|
610
|
+
# Search provider API keys (None means not set in TOML, will check env vars)
|
|
611
|
+
tavily_api_key=data.get("tavily_api_key"),
|
|
612
|
+
perplexity_api_key=data.get("perplexity_api_key"),
|
|
613
|
+
google_api_key=data.get("google_api_key"),
|
|
614
|
+
google_cse_id=data.get("google_cse_id"),
|
|
615
|
+
semantic_scholar_api_key=data.get("semantic_scholar_api_key"),
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
def get_storage_path(self) -> Path:
|
|
619
|
+
"""Get resolved storage path.
|
|
620
|
+
|
|
621
|
+
Returns:
|
|
622
|
+
Path to storage directory (creates if needed)
|
|
623
|
+
"""
|
|
624
|
+
if self.storage_path:
|
|
625
|
+
return Path(self.storage_path).expanduser()
|
|
626
|
+
return Path.home() / ".foundry-mcp" / "research"
|
|
627
|
+
|
|
628
|
+
def get_provider_rate_limit(self, provider: str) -> int:
|
|
629
|
+
"""Get rate limit for a specific provider.
|
|
630
|
+
|
|
631
|
+
Returns the provider-specific rate limit if configured,
|
|
632
|
+
otherwise falls back to the global search_rate_limit.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
provider: Provider name (e.g., "tavily", "google", "semantic_scholar")
|
|
636
|
+
|
|
637
|
+
Returns:
|
|
638
|
+
Rate limit in requests per minute
|
|
639
|
+
"""
|
|
640
|
+
return self.per_provider_rate_limits.get(provider, self.search_rate_limit)
|
|
641
|
+
|
|
642
|
+
def get_phase_timeout(self, phase: str) -> float:
|
|
643
|
+
"""Get timeout for a specific deep research phase.
|
|
644
|
+
|
|
645
|
+
Returns the phase-specific timeout if configured, otherwise
|
|
646
|
+
falls back to deep_research_timeout.
|
|
647
|
+
|
|
648
|
+
Args:
|
|
649
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement", "gathering")
|
|
650
|
+
|
|
651
|
+
Returns:
|
|
652
|
+
Timeout in seconds for the phase
|
|
653
|
+
"""
|
|
654
|
+
phase_timeouts = {
|
|
655
|
+
"planning": self.deep_research_planning_timeout,
|
|
656
|
+
"analysis": self.deep_research_analysis_timeout,
|
|
657
|
+
"synthesis": self.deep_research_synthesis_timeout,
|
|
658
|
+
"refinement": self.deep_research_refinement_timeout,
|
|
659
|
+
"gathering": self.deep_research_timeout, # Gathering uses default
|
|
660
|
+
}
|
|
661
|
+
return phase_timeouts.get(phase.lower(), self.deep_research_timeout)
|
|
662
|
+
|
|
663
|
+
def get_phase_provider(self, phase: str) -> str:
|
|
664
|
+
"""Get LLM provider ID for a specific deep research phase.
|
|
665
|
+
|
|
666
|
+
Returns the phase-specific provider if configured, otherwise
|
|
667
|
+
falls back to default_provider. Supports both simple names ("gemini")
|
|
668
|
+
and ProviderSpec format ("[cli]gemini:pro").
|
|
669
|
+
|
|
670
|
+
Args:
|
|
671
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement")
|
|
672
|
+
|
|
673
|
+
Returns:
|
|
674
|
+
Provider ID for the phase (e.g., "gemini", "opencode")
|
|
675
|
+
"""
|
|
676
|
+
provider_id, _ = self.resolve_phase_provider(phase)
|
|
677
|
+
return provider_id
|
|
678
|
+
|
|
679
|
+
def resolve_phase_provider(self, phase: str) -> Tuple[str, Optional[str]]:
|
|
680
|
+
"""Resolve provider ID and model for a deep research phase.
|
|
681
|
+
|
|
682
|
+
Parses ProviderSpec format ("[cli]gemini:pro") or simple names ("gemini").
|
|
683
|
+
Returns (provider_id, model) tuple for use with the provider registry.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement")
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
Tuple of (provider_id, model) where model may be None
|
|
690
|
+
"""
|
|
691
|
+
phase_providers = {
|
|
692
|
+
"planning": self.deep_research_planning_provider,
|
|
693
|
+
"analysis": self.deep_research_analysis_provider,
|
|
694
|
+
"synthesis": self.deep_research_synthesis_provider,
|
|
695
|
+
"refinement": self.deep_research_refinement_provider,
|
|
696
|
+
}
|
|
697
|
+
spec_str = phase_providers.get(phase.lower()) or self.default_provider
|
|
698
|
+
return _parse_provider_spec(spec_str)
|
|
699
|
+
|
|
700
|
+
def get_search_provider_api_key(
|
|
701
|
+
self,
|
|
702
|
+
provider: str,
|
|
703
|
+
required: bool = True,
|
|
704
|
+
) -> Optional[str]:
|
|
705
|
+
"""Get API key for a search provider with fallback to environment variables.
|
|
706
|
+
|
|
707
|
+
Checks config value first, then falls back to environment variable.
|
|
708
|
+
Raises ValueError with clear error message if required and not found.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
provider: Provider name ("tavily", "google", "semantic_scholar")
|
|
712
|
+
required: If True, raises ValueError when key is missing (default: True)
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
API key string, or None if not required and not found
|
|
716
|
+
|
|
717
|
+
Raises:
|
|
718
|
+
ValueError: If required=True and no API key is found
|
|
719
|
+
|
|
720
|
+
Example:
|
|
721
|
+
# Get Tavily API key (will raise if missing)
|
|
722
|
+
api_key = config.research.get_search_provider_api_key("tavily")
|
|
723
|
+
|
|
724
|
+
# Get Semantic Scholar API key (optional, returns None if missing)
|
|
725
|
+
api_key = config.research.get_search_provider_api_key(
|
|
726
|
+
"semantic_scholar", required=False
|
|
727
|
+
)
|
|
728
|
+
"""
|
|
729
|
+
# Map provider names to config attributes and env vars
|
|
730
|
+
provider_config = {
|
|
731
|
+
"tavily": {
|
|
732
|
+
"config_key": "tavily_api_key",
|
|
733
|
+
"env_var": "TAVILY_API_KEY",
|
|
734
|
+
"setup_url": "https://tavily.com/",
|
|
735
|
+
},
|
|
736
|
+
"perplexity": {
|
|
737
|
+
"config_key": "perplexity_api_key",
|
|
738
|
+
"env_var": "PERPLEXITY_API_KEY",
|
|
739
|
+
"setup_url": "https://docs.perplexity.ai/",
|
|
740
|
+
},
|
|
741
|
+
"google": {
|
|
742
|
+
"config_key": "google_api_key",
|
|
743
|
+
"env_var": "GOOGLE_API_KEY",
|
|
744
|
+
"setup_url": "https://console.cloud.google.com/apis/credentials",
|
|
745
|
+
},
|
|
746
|
+
"google_cse": {
|
|
747
|
+
"config_key": "google_cse_id",
|
|
748
|
+
"env_var": "GOOGLE_CSE_ID",
|
|
749
|
+
"setup_url": "https://cse.google.com/",
|
|
750
|
+
},
|
|
751
|
+
"semantic_scholar": {
|
|
752
|
+
"config_key": "semantic_scholar_api_key",
|
|
753
|
+
"env_var": "SEMANTIC_SCHOLAR_API_KEY",
|
|
754
|
+
"setup_url": "https://www.semanticscholar.org/product/api",
|
|
755
|
+
},
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
provider_lower = provider.lower()
|
|
759
|
+
if provider_lower not in provider_config:
|
|
760
|
+
raise ValueError(
|
|
761
|
+
f"Unknown search provider: '{provider}'. "
|
|
762
|
+
f"Valid providers: {', '.join(provider_config.keys())}"
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
config_info = provider_config[provider_lower]
|
|
766
|
+
config_key = config_info["config_key"]
|
|
767
|
+
env_var = config_info["env_var"]
|
|
768
|
+
|
|
769
|
+
# Check config value first
|
|
770
|
+
api_key = getattr(self, config_key, None)
|
|
771
|
+
|
|
772
|
+
# Fall back to environment variable
|
|
773
|
+
if not api_key:
|
|
774
|
+
api_key = os.environ.get(env_var)
|
|
775
|
+
|
|
776
|
+
# Handle missing key
|
|
777
|
+
if not api_key:
|
|
778
|
+
if required:
|
|
779
|
+
raise ValueError(
|
|
780
|
+
f"{provider.title()} API key not configured. "
|
|
781
|
+
f"Set via {env_var} environment variable or "
|
|
782
|
+
f"'research.{config_key}' in foundry-mcp.toml. "
|
|
783
|
+
f"Get an API key at: {config_info['setup_url']}"
|
|
784
|
+
)
|
|
785
|
+
return None
|
|
786
|
+
|
|
787
|
+
return api_key
|
|
788
|
+
|
|
789
|
+
def get_google_credentials(self, required: bool = True) -> tuple[Optional[str], Optional[str]]:
|
|
790
|
+
"""Get both Google API key and CSE ID for Google Custom Search.
|
|
791
|
+
|
|
792
|
+
Convenience method that retrieves both required credentials for
|
|
793
|
+
Google Custom Search API.
|
|
794
|
+
|
|
795
|
+
Args:
|
|
796
|
+
required: If True, raises ValueError when either credential is missing
|
|
797
|
+
|
|
798
|
+
Returns:
|
|
799
|
+
Tuple of (api_key, cse_id)
|
|
800
|
+
|
|
801
|
+
Raises:
|
|
802
|
+
ValueError: If required=True and either credential is missing
|
|
803
|
+
"""
|
|
804
|
+
api_key = self.get_search_provider_api_key("google", required=required)
|
|
805
|
+
cse_id = self.get_search_provider_api_key("google_cse", required=required)
|
|
806
|
+
return api_key, cse_id
|
|
807
|
+
|
|
808
|
+
def get_default_provider_spec(self) -> "ProviderSpec":
|
|
809
|
+
"""Parse default_provider into a ProviderSpec."""
|
|
810
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
811
|
+
return ProviderSpec.parse_flexible(self.default_provider)
|
|
812
|
+
|
|
813
|
+
def get_consensus_provider_specs(self) -> List["ProviderSpec"]:
|
|
814
|
+
"""Parse consensus_providers into ProviderSpec list."""
|
|
815
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
816
|
+
return [ProviderSpec.parse_flexible(p) for p in self.consensus_providers]
|
|
817
|
+
|
|
818
|
+
|
|
322
819
|
_VALID_COMMIT_CADENCE = {"manual", "task", "phase"}
|
|
323
820
|
|
|
324
821
|
|
|
@@ -334,6 +831,45 @@ def _normalize_commit_cadence(value: str) -> str:
|
|
|
334
831
|
return normalized
|
|
335
832
|
|
|
336
833
|
|
|
834
|
+
def _parse_provider_spec(spec: str) -> Tuple[str, Optional[str]]:
|
|
835
|
+
"""Parse a provider specification into (provider_id, model).
|
|
836
|
+
|
|
837
|
+
Supports both simple names and ProviderSpec bracket notation:
|
|
838
|
+
- "gemini" -> ("gemini", None)
|
|
839
|
+
- "[cli]gemini:pro" -> ("gemini", "pro")
|
|
840
|
+
- "[cli]opencode:openai/gpt-5.2" -> ("opencode", "openai/gpt-5.2")
|
|
841
|
+
- "[api]openai/gpt-4.1" -> ("openai", "gpt-4.1")
|
|
842
|
+
|
|
843
|
+
Args:
|
|
844
|
+
spec: Provider specification string
|
|
845
|
+
|
|
846
|
+
Returns:
|
|
847
|
+
Tuple of (provider_id, model) where model may be None
|
|
848
|
+
"""
|
|
849
|
+
spec = spec.strip()
|
|
850
|
+
|
|
851
|
+
# Simple name (no brackets) - backward compatible
|
|
852
|
+
if not spec.startswith("["):
|
|
853
|
+
return (spec, None)
|
|
854
|
+
|
|
855
|
+
# Try to parse with ProviderSpec
|
|
856
|
+
try:
|
|
857
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
858
|
+
|
|
859
|
+
parsed = ProviderSpec.parse(spec)
|
|
860
|
+
# Build model string with backend routing if present
|
|
861
|
+
model = None
|
|
862
|
+
if parsed.backend and parsed.model:
|
|
863
|
+
model = f"{parsed.backend}/{parsed.model}"
|
|
864
|
+
elif parsed.model:
|
|
865
|
+
model = parsed.model
|
|
866
|
+
return (parsed.provider, model)
|
|
867
|
+
except (ValueError, ImportError) as e:
|
|
868
|
+
logger.warning("Failed to parse provider spec '%s': %s", spec, e)
|
|
869
|
+
# Fall back to treating as simple name (strip brackets)
|
|
870
|
+
return (spec.split("]")[-1].split(":")[0], None)
|
|
871
|
+
|
|
872
|
+
|
|
337
873
|
def _parse_bool(value: Any) -> bool:
|
|
338
874
|
if isinstance(value, bool):
|
|
339
875
|
return value
|
|
@@ -348,6 +884,7 @@ class ServerConfig:
|
|
|
348
884
|
workspace_roots: List[Path] = field(default_factory=list)
|
|
349
885
|
specs_dir: Optional[Path] = None
|
|
350
886
|
journals_path: Optional[Path] = None
|
|
887
|
+
bikelane_dir: Optional[Path] = None # Intake queue storage (default: specs/.bikelane)
|
|
351
888
|
|
|
352
889
|
# Logging configuration
|
|
353
890
|
log_level: str = "INFO"
|
|
@@ -359,7 +896,7 @@ class ServerConfig:
|
|
|
359
896
|
|
|
360
897
|
# Server configuration
|
|
361
898
|
server_name: str = "foundry-mcp"
|
|
362
|
-
server_version: str =
|
|
899
|
+
server_version: str = field(default_factory=lambda: _PACKAGE_VERSION)
|
|
363
900
|
|
|
364
901
|
# Git workflow configuration
|
|
365
902
|
git: GitSettings = field(default_factory=GitSettings)
|
|
@@ -379,6 +916,15 @@ class ServerConfig:
|
|
|
379
916
|
# Dashboard configuration
|
|
380
917
|
dashboard: DashboardConfig = field(default_factory=DashboardConfig)
|
|
381
918
|
|
|
919
|
+
# Test runner configuration
|
|
920
|
+
test: TestConfig = field(default_factory=TestConfig)
|
|
921
|
+
|
|
922
|
+
# Research workflows configuration
|
|
923
|
+
research: ResearchConfig = field(default_factory=ResearchConfig)
|
|
924
|
+
|
|
925
|
+
# Tool registration control
|
|
926
|
+
disabled_tools: List[str] = field(default_factory=list)
|
|
927
|
+
|
|
382
928
|
@classmethod
|
|
383
929
|
def from_env(cls, config_file: Optional[str] = None) -> "ServerConfig":
|
|
384
930
|
"""
|
|
@@ -426,6 +972,8 @@ class ServerConfig:
|
|
|
426
972
|
self.specs_dir = Path(ws["specs_dir"])
|
|
427
973
|
if "journals_path" in ws:
|
|
428
974
|
self.journals_path = Path(ws["journals_path"])
|
|
975
|
+
if "bikelane_dir" in ws:
|
|
976
|
+
self.bikelane_dir = Path(ws["bikelane_dir"])
|
|
429
977
|
|
|
430
978
|
# Logging settings
|
|
431
979
|
if "logging" in data:
|
|
@@ -450,14 +998,21 @@ class ServerConfig:
|
|
|
450
998
|
self.server_name = srv["name"]
|
|
451
999
|
if "version" in srv:
|
|
452
1000
|
self.server_version = srv["version"]
|
|
1001
|
+
# Legacy: disabled_tools under [server] (deprecated)
|
|
1002
|
+
if "disabled_tools" in srv:
|
|
1003
|
+
self.disabled_tools = srv["disabled_tools"]
|
|
1004
|
+
|
|
1005
|
+
# Tools configuration (preferred location for disabled_tools)
|
|
1006
|
+
if "tools" in data:
|
|
1007
|
+
tools_cfg = data["tools"]
|
|
1008
|
+
if "disabled_tools" in tools_cfg:
|
|
1009
|
+
self.disabled_tools = tools_cfg["disabled_tools"]
|
|
453
1010
|
|
|
454
1011
|
# Git workflow settings
|
|
455
1012
|
if "git" in data:
|
|
456
1013
|
git_cfg = data["git"]
|
|
457
1014
|
if "enabled" in git_cfg:
|
|
458
1015
|
self.git.enabled = _parse_bool(git_cfg["enabled"])
|
|
459
|
-
if "auto_branch" in git_cfg:
|
|
460
|
-
self.git.auto_branch = _parse_bool(git_cfg["auto_branch"])
|
|
461
1016
|
if "auto_commit" in git_cfg:
|
|
462
1017
|
self.git.auto_commit = _parse_bool(git_cfg["auto_commit"])
|
|
463
1018
|
if "auto_push" in git_cfg:
|
|
@@ -499,6 +1054,14 @@ class ServerConfig:
|
|
|
499
1054
|
if "dashboard" in data:
|
|
500
1055
|
self.dashboard = DashboardConfig.from_toml_dict(data["dashboard"])
|
|
501
1056
|
|
|
1057
|
+
# Test runner settings
|
|
1058
|
+
if "test" in data:
|
|
1059
|
+
self.test = TestConfig.from_toml_dict(data["test"])
|
|
1060
|
+
|
|
1061
|
+
# Research workflows settings
|
|
1062
|
+
if "research" in data:
|
|
1063
|
+
self.research = ResearchConfig.from_toml_dict(data["research"])
|
|
1064
|
+
|
|
502
1065
|
except Exception as e:
|
|
503
1066
|
logger.error(f"Error loading config file {path}: {e}")
|
|
504
1067
|
|
|
@@ -516,6 +1079,10 @@ class ServerConfig:
|
|
|
516
1079
|
if journals := os.environ.get("FOUNDRY_MCP_JOURNALS_PATH"):
|
|
517
1080
|
self.journals_path = Path(journals)
|
|
518
1081
|
|
|
1082
|
+
# Bikelane directory (intake queue storage)
|
|
1083
|
+
if bikelane := os.environ.get("FOUNDRY_MCP_BIKELANE_DIR"):
|
|
1084
|
+
self.bikelane_dir = Path(bikelane)
|
|
1085
|
+
|
|
519
1086
|
# Log level
|
|
520
1087
|
if level := os.environ.get("FOUNDRY_MCP_LOG_LEVEL"):
|
|
521
1088
|
self.log_level = level.upper()
|
|
@@ -531,8 +1098,6 @@ class ServerConfig:
|
|
|
531
1098
|
# Git settings
|
|
532
1099
|
if git_enabled := os.environ.get("FOUNDRY_MCP_GIT_ENABLED"):
|
|
533
1100
|
self.git.enabled = _parse_bool(git_enabled)
|
|
534
|
-
if git_auto_branch := os.environ.get("FOUNDRY_MCP_GIT_AUTO_BRANCH"):
|
|
535
|
-
self.git.auto_branch = _parse_bool(git_auto_branch)
|
|
536
1101
|
if git_auto_commit := os.environ.get("FOUNDRY_MCP_GIT_AUTO_COMMIT"):
|
|
537
1102
|
self.git.auto_commit = _parse_bool(git_auto_commit)
|
|
538
1103
|
if git_auto_push := os.environ.get("FOUNDRY_MCP_GIT_AUTO_PUSH"):
|
|
@@ -671,6 +1236,23 @@ class ServerConfig:
|
|
|
671
1236
|
except ValueError:
|
|
672
1237
|
pass
|
|
673
1238
|
|
|
1239
|
+
# Search provider API keys (direct env vars, no FOUNDRY_MCP_ prefix)
|
|
1240
|
+
# These use standard env var names that match provider documentation
|
|
1241
|
+
if tavily_key := os.environ.get("TAVILY_API_KEY"):
|
|
1242
|
+
self.research.tavily_api_key = tavily_key
|
|
1243
|
+
if perplexity_key := os.environ.get("PERPLEXITY_API_KEY"):
|
|
1244
|
+
self.research.perplexity_api_key = perplexity_key
|
|
1245
|
+
if google_key := os.environ.get("GOOGLE_API_KEY"):
|
|
1246
|
+
self.research.google_api_key = google_key
|
|
1247
|
+
if google_cse := os.environ.get("GOOGLE_CSE_ID"):
|
|
1248
|
+
self.research.google_cse_id = google_cse
|
|
1249
|
+
if semantic_scholar_key := os.environ.get("SEMANTIC_SCHOLAR_API_KEY"):
|
|
1250
|
+
self.research.semantic_scholar_api_key = semantic_scholar_key
|
|
1251
|
+
|
|
1252
|
+
# Disabled tools (comma-separated list)
|
|
1253
|
+
if disabled := os.environ.get("FOUNDRY_MCP_DISABLED_TOOLS"):
|
|
1254
|
+
self.disabled_tools = [t.strip() for t in disabled.split(",") if t.strip()]
|
|
1255
|
+
|
|
674
1256
|
def validate_api_key(self, key: Optional[str]) -> bool:
|
|
675
1257
|
"""
|
|
676
1258
|
Validate an API key.
|
|
@@ -689,6 +1271,28 @@ class ServerConfig:
|
|
|
689
1271
|
|
|
690
1272
|
return key in self.api_keys
|
|
691
1273
|
|
|
1274
|
+
def get_bikelane_dir(self, specs_dir: Optional[Path] = None) -> Path:
|
|
1275
|
+
"""
|
|
1276
|
+
Get the resolved bikelane directory path.
|
|
1277
|
+
|
|
1278
|
+
Priority:
|
|
1279
|
+
1. Explicitly configured bikelane_dir (from TOML or env var)
|
|
1280
|
+
2. Default: specs_dir/.bikelane (where specs_dir is resolved)
|
|
1281
|
+
|
|
1282
|
+
Args:
|
|
1283
|
+
specs_dir: Optional specs directory to use for default path.
|
|
1284
|
+
If not provided, uses self.specs_dir or "./specs"
|
|
1285
|
+
|
|
1286
|
+
Returns:
|
|
1287
|
+
Path to bikelane directory
|
|
1288
|
+
"""
|
|
1289
|
+
if self.bikelane_dir is not None:
|
|
1290
|
+
return self.bikelane_dir.expanduser()
|
|
1291
|
+
|
|
1292
|
+
# Fall back to default: specs/.bikelane
|
|
1293
|
+
base_specs = specs_dir or self.specs_dir or Path("./specs")
|
|
1294
|
+
return base_specs / ".bikelane"
|
|
1295
|
+
|
|
692
1296
|
def setup_logging(self) -> None:
|
|
693
1297
|
"""Configure logging based on settings."""
|
|
694
1298
|
level = getattr(logging, self.log_level, logging.INFO)
|