foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
foundry_mcp/config.py
ADDED
|
@@ -0,0 +1,1454 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Server configuration for foundry-mcp.
|
|
3
|
+
|
|
4
|
+
Supports configuration via:
|
|
5
|
+
1. Environment variables (highest priority)
|
|
6
|
+
2. TOML config file (foundry-mcp.toml)
|
|
7
|
+
3. Default values (lowest priority)
|
|
8
|
+
|
|
9
|
+
Environment variables:
|
|
10
|
+
- FOUNDRY_MCP_WORKSPACE_ROOTS: Comma-separated list of workspace root paths
|
|
11
|
+
- FOUNDRY_MCP_SPECS_DIR: Path to specs directory
|
|
12
|
+
- FOUNDRY_MCP_JOURNALS_PATH: Path to journals directory
|
|
13
|
+
- FOUNDRY_MCP_BIKELANE_DIR: Path to bikelane intake queue directory (default: specs/.bikelane)
|
|
14
|
+
- FOUNDRY_MCP_LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR)
|
|
15
|
+
- FOUNDRY_MCP_API_KEYS: Comma-separated list of valid API keys (optional)
|
|
16
|
+
- FOUNDRY_MCP_REQUIRE_AUTH: Whether to require API key authentication (true/false)
|
|
17
|
+
- FOUNDRY_MCP_CONFIG_FILE: Path to TOML config file
|
|
18
|
+
|
|
19
|
+
Search Provider API Keys (for deep research workflow):
|
|
20
|
+
- TAVILY_API_KEY: API key for Tavily web search (https://tavily.com/)
|
|
21
|
+
- PERPLEXITY_API_KEY: API key for Perplexity Search (https://docs.perplexity.ai/)
|
|
22
|
+
- GOOGLE_API_KEY: API key for Google Custom Search (https://console.cloud.google.com/)
|
|
23
|
+
- GOOGLE_CSE_ID: Google Custom Search Engine ID (https://cse.google.com/)
|
|
24
|
+
- SEMANTIC_SCHOLAR_API_KEY: API key for Semantic Scholar academic search (optional for basic tier)
|
|
25
|
+
|
|
26
|
+
API Key Security:
|
|
27
|
+
- Keys should be rotated regularly (recommended: every 90 days)
|
|
28
|
+
- To revoke a key: remove it from FOUNDRY_MCP_API_KEYS and restart server
|
|
29
|
+
- Keys are validated on every tool/resource request when auth is required
|
|
30
|
+
- Use tenant-scoped keys for multi-tenant deployments (prefix with tenant ID)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import os
|
|
34
|
+
import logging
|
|
35
|
+
import functools
|
|
36
|
+
import time
|
|
37
|
+
from dataclasses import dataclass, field
|
|
38
|
+
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
|
39
|
+
from pathlib import Path
|
|
40
|
+
from typing import Optional, List, Dict, Any, Callable, TypeVar, Tuple
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
import tomllib
|
|
44
|
+
except ImportError:
|
|
45
|
+
import tomli as tomllib # Python < 3.11 fallback
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
logger = logging.getLogger(__name__)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _get_version() -> str:
|
|
52
|
+
"""Get package version from metadata (single source of truth: pyproject.toml)."""
|
|
53
|
+
try:
|
|
54
|
+
return get_package_version("foundry-mcp")
|
|
55
|
+
except PackageNotFoundError:
|
|
56
|
+
return "0.5.0" # Fallback for dev without install
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
_PACKAGE_VERSION = _get_version()
|
|
60
|
+
|
|
61
|
+
T = TypeVar("T")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@dataclass
|
|
65
|
+
class GitSettings:
|
|
66
|
+
"""Git workflow preferences for CLI + MCP surfaces."""
|
|
67
|
+
|
|
68
|
+
enabled: bool = False
|
|
69
|
+
auto_commit: bool = False
|
|
70
|
+
auto_push: bool = False
|
|
71
|
+
auto_pr: bool = False
|
|
72
|
+
commit_cadence: str = "manual"
|
|
73
|
+
show_before_commit: bool = True
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class ObservabilityConfig:
|
|
78
|
+
"""Configuration for observability stack (OTel + Prometheus).
|
|
79
|
+
|
|
80
|
+
Attributes:
|
|
81
|
+
enabled: Master switch for all observability features
|
|
82
|
+
otel_enabled: Enable OpenTelemetry tracing and metrics
|
|
83
|
+
otel_endpoint: OTLP exporter endpoint
|
|
84
|
+
otel_service_name: Service name for traces
|
|
85
|
+
otel_sample_rate: Trace sampling rate (0.0 to 1.0)
|
|
86
|
+
prometheus_enabled: Enable Prometheus metrics
|
|
87
|
+
prometheus_port: HTTP server port for /metrics (0 = no server)
|
|
88
|
+
prometheus_host: HTTP server host
|
|
89
|
+
prometheus_namespace: Metric namespace prefix
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
enabled: bool = False
|
|
93
|
+
otel_enabled: bool = False
|
|
94
|
+
otel_endpoint: str = "localhost:4317"
|
|
95
|
+
otel_service_name: str = "foundry-mcp"
|
|
96
|
+
otel_sample_rate: float = 1.0
|
|
97
|
+
prometheus_enabled: bool = False
|
|
98
|
+
prometheus_port: int = 0
|
|
99
|
+
prometheus_host: str = "0.0.0.0"
|
|
100
|
+
prometheus_namespace: str = "foundry_mcp"
|
|
101
|
+
|
|
102
|
+
@classmethod
|
|
103
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ObservabilityConfig":
|
|
104
|
+
"""Create config from TOML dict (typically [observability] section).
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
data: Dict from TOML parsing
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
ObservabilityConfig instance
|
|
111
|
+
"""
|
|
112
|
+
return cls(
|
|
113
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
114
|
+
otel_enabled=_parse_bool(data.get("otel_enabled", False)),
|
|
115
|
+
otel_endpoint=str(data.get("otel_endpoint", "localhost:4317")),
|
|
116
|
+
otel_service_name=str(data.get("otel_service_name", "foundry-mcp")),
|
|
117
|
+
otel_sample_rate=float(data.get("otel_sample_rate", 1.0)),
|
|
118
|
+
prometheus_enabled=_parse_bool(data.get("prometheus_enabled", False)),
|
|
119
|
+
prometheus_port=int(data.get("prometheus_port", 0)),
|
|
120
|
+
prometheus_host=str(data.get("prometheus_host", "0.0.0.0")),
|
|
121
|
+
prometheus_namespace=str(data.get("prometheus_namespace", "foundry_mcp")),
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@dataclass
|
|
126
|
+
class HealthConfig:
|
|
127
|
+
"""Configuration for health checks and probes.
|
|
128
|
+
|
|
129
|
+
Attributes:
|
|
130
|
+
enabled: Whether health checks are enabled
|
|
131
|
+
liveness_timeout: Timeout for liveness checks (seconds)
|
|
132
|
+
readiness_timeout: Timeout for readiness checks (seconds)
|
|
133
|
+
health_timeout: Timeout for full health checks (seconds)
|
|
134
|
+
disk_space_threshold_mb: Minimum disk space (MB) before unhealthy
|
|
135
|
+
disk_space_warning_mb: Minimum disk space (MB) before degraded
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
enabled: bool = True
|
|
139
|
+
liveness_timeout: float = 1.0
|
|
140
|
+
readiness_timeout: float = 5.0
|
|
141
|
+
health_timeout: float = 10.0
|
|
142
|
+
disk_space_threshold_mb: int = 100
|
|
143
|
+
disk_space_warning_mb: int = 500
|
|
144
|
+
|
|
145
|
+
@classmethod
|
|
146
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "HealthConfig":
|
|
147
|
+
"""Create config from TOML dict (typically [health] section).
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
data: Dict from TOML parsing
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
HealthConfig instance
|
|
154
|
+
"""
|
|
155
|
+
return cls(
|
|
156
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
157
|
+
liveness_timeout=float(data.get("liveness_timeout", 1.0)),
|
|
158
|
+
readiness_timeout=float(data.get("readiness_timeout", 5.0)),
|
|
159
|
+
health_timeout=float(data.get("health_timeout", 10.0)),
|
|
160
|
+
disk_space_threshold_mb=int(data.get("disk_space_threshold_mb", 100)),
|
|
161
|
+
disk_space_warning_mb=int(data.get("disk_space_warning_mb", 500)),
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
@dataclass
|
|
166
|
+
class ErrorCollectionConfig:
|
|
167
|
+
"""Configuration for error data collection infrastructure.
|
|
168
|
+
|
|
169
|
+
Attributes:
|
|
170
|
+
enabled: Whether error collection is enabled
|
|
171
|
+
storage_path: Directory path for error storage (default: ~/.foundry-mcp/errors)
|
|
172
|
+
retention_days: Delete records older than this many days
|
|
173
|
+
max_errors: Maximum number of error records to keep
|
|
174
|
+
include_stack_traces: Whether to include stack traces in error records
|
|
175
|
+
redact_inputs: Whether to redact sensitive data from input parameters
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
enabled: bool = True
|
|
179
|
+
storage_path: str = "" # Empty string means use default
|
|
180
|
+
retention_days: int = 30
|
|
181
|
+
max_errors: int = 10000
|
|
182
|
+
include_stack_traces: bool = True
|
|
183
|
+
redact_inputs: bool = True
|
|
184
|
+
|
|
185
|
+
@classmethod
|
|
186
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ErrorCollectionConfig":
|
|
187
|
+
"""Create config from TOML dict (typically [error_collection] section).
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
data: Dict from TOML parsing
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
ErrorCollectionConfig instance
|
|
194
|
+
"""
|
|
195
|
+
return cls(
|
|
196
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
197
|
+
storage_path=str(data.get("storage_path", "")),
|
|
198
|
+
retention_days=int(data.get("retention_days", 30)),
|
|
199
|
+
max_errors=int(data.get("max_errors", 10000)),
|
|
200
|
+
include_stack_traces=_parse_bool(data.get("include_stack_traces", True)),
|
|
201
|
+
redact_inputs=_parse_bool(data.get("redact_inputs", True)),
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def get_storage_path(self) -> Path:
|
|
205
|
+
"""Get the resolved storage path.
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
Path to error storage directory
|
|
209
|
+
"""
|
|
210
|
+
if self.storage_path:
|
|
211
|
+
return Path(self.storage_path).expanduser()
|
|
212
|
+
return Path.home() / ".foundry-mcp" / "errors"
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
@dataclass
|
|
216
|
+
class MetricsPersistenceConfig:
|
|
217
|
+
"""Configuration for metrics persistence infrastructure.
|
|
218
|
+
|
|
219
|
+
Persists time-series metrics to disk so they survive server restarts.
|
|
220
|
+
Metrics are aggregated into time buckets before storage to reduce
|
|
221
|
+
disk usage while maintaining useful historical data.
|
|
222
|
+
|
|
223
|
+
Attributes:
|
|
224
|
+
enabled: Whether metrics persistence is enabled
|
|
225
|
+
storage_path: Directory path for metrics storage (default: ~/.foundry-mcp/metrics)
|
|
226
|
+
retention_days: Delete records older than this many days
|
|
227
|
+
max_records: Maximum number of metric data points to keep
|
|
228
|
+
bucket_interval_seconds: Aggregation bucket interval (default: 60s = 1 minute)
|
|
229
|
+
flush_interval_seconds: How often to flush buffer to disk (default: 30s)
|
|
230
|
+
persist_metrics: List of metric names to persist (empty = persist all)
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
enabled: bool = False
|
|
234
|
+
storage_path: str = "" # Empty string means use default
|
|
235
|
+
retention_days: int = 7
|
|
236
|
+
max_records: int = 100000
|
|
237
|
+
bucket_interval_seconds: int = 60
|
|
238
|
+
flush_interval_seconds: int = 30
|
|
239
|
+
persist_metrics: List[str] = field(default_factory=lambda: [
|
|
240
|
+
"tool_invocations_total",
|
|
241
|
+
"tool_duration_seconds",
|
|
242
|
+
"tool_errors_total",
|
|
243
|
+
"health_status",
|
|
244
|
+
])
|
|
245
|
+
|
|
246
|
+
@classmethod
|
|
247
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "MetricsPersistenceConfig":
|
|
248
|
+
"""Create config from TOML dict (typically [metrics_persistence] section).
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
data: Dict from TOML parsing
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
MetricsPersistenceConfig instance
|
|
255
|
+
"""
|
|
256
|
+
persist_metrics = data.get("persist_metrics", [
|
|
257
|
+
"tool_invocations_total",
|
|
258
|
+
"tool_duration_seconds",
|
|
259
|
+
"tool_errors_total",
|
|
260
|
+
"health_status",
|
|
261
|
+
])
|
|
262
|
+
# Handle both list and comma-separated string
|
|
263
|
+
if isinstance(persist_metrics, str):
|
|
264
|
+
persist_metrics = [m.strip() for m in persist_metrics.split(",") if m.strip()]
|
|
265
|
+
|
|
266
|
+
return cls(
|
|
267
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
268
|
+
storage_path=str(data.get("storage_path", "")),
|
|
269
|
+
retention_days=int(data.get("retention_days", 7)),
|
|
270
|
+
max_records=int(data.get("max_records", 100000)),
|
|
271
|
+
bucket_interval_seconds=int(data.get("bucket_interval_seconds", 60)),
|
|
272
|
+
flush_interval_seconds=int(data.get("flush_interval_seconds", 30)),
|
|
273
|
+
persist_metrics=persist_metrics,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
def get_storage_path(self) -> Path:
|
|
277
|
+
"""Get the resolved storage path.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
Path to metrics storage directory
|
|
281
|
+
"""
|
|
282
|
+
if self.storage_path:
|
|
283
|
+
return Path(self.storage_path).expanduser()
|
|
284
|
+
return Path.home() / ".foundry-mcp" / "metrics"
|
|
285
|
+
|
|
286
|
+
def should_persist_metric(self, metric_name: str) -> bool:
|
|
287
|
+
"""Check if a metric should be persisted.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
metric_name: Name of the metric
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
True if the metric should be persisted
|
|
294
|
+
"""
|
|
295
|
+
# Empty list means persist all metrics
|
|
296
|
+
if not self.persist_metrics:
|
|
297
|
+
return True
|
|
298
|
+
return metric_name in self.persist_metrics
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
@dataclass
|
|
302
|
+
class DashboardConfig:
|
|
303
|
+
"""Configuration for built-in web dashboard.
|
|
304
|
+
|
|
305
|
+
The dashboard provides a web UI for viewing errors, metrics, and
|
|
306
|
+
AI provider status without requiring external tools like Grafana.
|
|
307
|
+
|
|
308
|
+
Attributes:
|
|
309
|
+
enabled: Whether the dashboard server is enabled
|
|
310
|
+
port: HTTP port for dashboard (default: 8080)
|
|
311
|
+
host: Host to bind to (default: 127.0.0.1 for localhost only)
|
|
312
|
+
auto_open_browser: Open browser when dashboard starts
|
|
313
|
+
refresh_interval_ms: Auto-refresh interval in milliseconds
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
enabled: bool = False
|
|
317
|
+
port: int = 8501 # Streamlit default port
|
|
318
|
+
host: str = "127.0.0.1"
|
|
319
|
+
auto_open_browser: bool = False
|
|
320
|
+
refresh_interval_ms: int = 5000
|
|
321
|
+
|
|
322
|
+
@classmethod
|
|
323
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "DashboardConfig":
|
|
324
|
+
"""Create config from TOML dict (typically [dashboard] section).
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
data: Dict from TOML parsing
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
DashboardConfig instance
|
|
331
|
+
"""
|
|
332
|
+
return cls(
|
|
333
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
334
|
+
port=int(data.get("port", 8501)), # Streamlit default
|
|
335
|
+
host=str(data.get("host", "127.0.0.1")),
|
|
336
|
+
auto_open_browser=_parse_bool(data.get("auto_open_browser", False)),
|
|
337
|
+
refresh_interval_ms=int(data.get("refresh_interval_ms", 5000)),
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
@dataclass
|
|
342
|
+
class RunnerConfig:
|
|
343
|
+
"""Configuration for a test runner (pytest, go, npm, etc.).
|
|
344
|
+
|
|
345
|
+
Attributes:
|
|
346
|
+
command: Command to execute (e.g., ["go", "test"] or ["python", "-m", "pytest"])
|
|
347
|
+
run_args: Additional arguments for running tests
|
|
348
|
+
discover_args: Arguments for test discovery
|
|
349
|
+
pattern: File pattern for test discovery (e.g., "*_test.go", "test_*.py")
|
|
350
|
+
timeout: Default timeout in seconds
|
|
351
|
+
"""
|
|
352
|
+
|
|
353
|
+
command: List[str] = field(default_factory=list)
|
|
354
|
+
run_args: List[str] = field(default_factory=list)
|
|
355
|
+
discover_args: List[str] = field(default_factory=list)
|
|
356
|
+
pattern: str = "*"
|
|
357
|
+
timeout: int = 300
|
|
358
|
+
|
|
359
|
+
@classmethod
|
|
360
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "RunnerConfig":
|
|
361
|
+
"""Create config from TOML dict.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
data: Dict from TOML parsing
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
RunnerConfig instance
|
|
368
|
+
"""
|
|
369
|
+
command = data.get("command", [])
|
|
370
|
+
# Handle string command (convert to list)
|
|
371
|
+
if isinstance(command, str):
|
|
372
|
+
command = command.split()
|
|
373
|
+
|
|
374
|
+
run_args = data.get("run_args", [])
|
|
375
|
+
if isinstance(run_args, str):
|
|
376
|
+
run_args = run_args.split()
|
|
377
|
+
|
|
378
|
+
discover_args = data.get("discover_args", [])
|
|
379
|
+
if isinstance(discover_args, str):
|
|
380
|
+
discover_args = discover_args.split()
|
|
381
|
+
|
|
382
|
+
return cls(
|
|
383
|
+
command=command,
|
|
384
|
+
run_args=run_args,
|
|
385
|
+
discover_args=discover_args,
|
|
386
|
+
pattern=str(data.get("pattern", "*")),
|
|
387
|
+
timeout=int(data.get("timeout", 300)),
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
@dataclass
|
|
392
|
+
class TestConfig:
|
|
393
|
+
"""Configuration for test runners.
|
|
394
|
+
|
|
395
|
+
Supports multiple test runners (pytest, go, npm, etc.) with configurable
|
|
396
|
+
commands and arguments. Runners can be defined in TOML config and selected
|
|
397
|
+
at runtime via the 'runner' parameter.
|
|
398
|
+
|
|
399
|
+
Attributes:
|
|
400
|
+
default_runner: Default runner to use when none specified
|
|
401
|
+
runners: Dict of runner name to RunnerConfig
|
|
402
|
+
"""
|
|
403
|
+
|
|
404
|
+
default_runner: str = "pytest"
|
|
405
|
+
runners: Dict[str, RunnerConfig] = field(default_factory=dict)
|
|
406
|
+
|
|
407
|
+
@classmethod
|
|
408
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "TestConfig":
|
|
409
|
+
"""Create config from TOML dict (typically [test] section).
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
data: Dict from TOML parsing
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
TestConfig instance
|
|
416
|
+
"""
|
|
417
|
+
runners = {}
|
|
418
|
+
runners_data = data.get("runners", {})
|
|
419
|
+
for name, runner_data in runners_data.items():
|
|
420
|
+
runners[name] = RunnerConfig.from_toml_dict(runner_data)
|
|
421
|
+
|
|
422
|
+
return cls(
|
|
423
|
+
default_runner=str(data.get("default_runner", "pytest")),
|
|
424
|
+
runners=runners,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
def get_runner(self, name: Optional[str] = None) -> Optional[RunnerConfig]:
|
|
428
|
+
"""Get runner config by name.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
name: Runner name, or None to use default
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
RunnerConfig if found, None otherwise
|
|
435
|
+
"""
|
|
436
|
+
runner_name = name or self.default_runner
|
|
437
|
+
return self.runners.get(runner_name)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
@dataclass
|
|
441
|
+
class ResearchConfig:
|
|
442
|
+
"""Configuration for research workflows (CHAT, CONSENSUS, THINKDEEP, IDEATE, DEEP_RESEARCH).
|
|
443
|
+
|
|
444
|
+
Attributes:
|
|
445
|
+
enabled: Master switch for research tools
|
|
446
|
+
storage_path: Directory for research state persistence (default: ~/.foundry-mcp/research)
|
|
447
|
+
storage_backend: Storage backend type (currently only 'file' supported)
|
|
448
|
+
ttl_hours: Time-to-live for stored states in hours
|
|
449
|
+
max_messages_per_thread: Maximum messages retained in a conversation thread
|
|
450
|
+
default_provider: Default LLM provider for single-model workflows
|
|
451
|
+
consensus_providers: List of provider IDs for CONSENSUS workflow
|
|
452
|
+
thinkdeep_max_depth: Maximum investigation depth for THINKDEEP workflow
|
|
453
|
+
ideate_perspectives: List of perspectives for IDEATE brainstorming
|
|
454
|
+
default_timeout: Default timeout in seconds for provider calls (thinkdeep uses 2x)
|
|
455
|
+
deep_research_max_iterations: Maximum refinement iterations for DEEP_RESEARCH
|
|
456
|
+
deep_research_max_sub_queries: Maximum sub-queries for query decomposition
|
|
457
|
+
deep_research_max_sources: Maximum sources per sub-query
|
|
458
|
+
deep_research_follow_links: Whether to follow and extract content from links
|
|
459
|
+
deep_research_timeout: Default timeout per operation in seconds
|
|
460
|
+
deep_research_max_concurrent: Maximum concurrent operations
|
|
461
|
+
deep_research_providers: Ordered list of search providers for deep research
|
|
462
|
+
deep_research_audit_artifacts: Whether to write per-run audit artifacts
|
|
463
|
+
search_rate_limit: Global rate limit for search APIs (requests per minute)
|
|
464
|
+
max_concurrent_searches: Maximum concurrent search requests (for asyncio.Semaphore)
|
|
465
|
+
per_provider_rate_limits: Per-provider rate limits in requests per minute
|
|
466
|
+
tavily_api_key: API key for Tavily search provider (optional, reads from TAVILY_API_KEY env var)
|
|
467
|
+
perplexity_api_key: API key for Perplexity Search (optional, reads from PERPLEXITY_API_KEY env var)
|
|
468
|
+
google_api_key: API key for Google Custom Search (optional, reads from GOOGLE_API_KEY env var)
|
|
469
|
+
google_cse_id: Google Custom Search Engine ID (optional, reads from GOOGLE_CSE_ID env var)
|
|
470
|
+
semantic_scholar_api_key: API key for Semantic Scholar (optional, reads from SEMANTIC_SCHOLAR_API_KEY env var)
|
|
471
|
+
"""
|
|
472
|
+
|
|
473
|
+
enabled: bool = True
|
|
474
|
+
storage_path: str = "" # Empty = use default (~/.foundry-mcp/research)
|
|
475
|
+
storage_backend: str = "file"
|
|
476
|
+
ttl_hours: int = 24
|
|
477
|
+
max_messages_per_thread: int = 100
|
|
478
|
+
default_provider: str = "gemini"
|
|
479
|
+
consensus_providers: List[str] = field(
|
|
480
|
+
default_factory=lambda: ["gemini", "claude"]
|
|
481
|
+
)
|
|
482
|
+
thinkdeep_max_depth: int = 5
|
|
483
|
+
ideate_perspectives: List[str] = field(
|
|
484
|
+
default_factory=lambda: ["technical", "creative", "practical", "visionary"]
|
|
485
|
+
)
|
|
486
|
+
default_timeout: float = 360.0 # 360 seconds default for AI CLI providers
|
|
487
|
+
# Deep research configuration
|
|
488
|
+
deep_research_max_iterations: int = 3
|
|
489
|
+
deep_research_max_sub_queries: int = 5
|
|
490
|
+
deep_research_max_sources: int = 5
|
|
491
|
+
deep_research_follow_links: bool = True
|
|
492
|
+
deep_research_timeout: float = 600.0 # Whole workflow timeout
|
|
493
|
+
deep_research_max_concurrent: int = 3
|
|
494
|
+
# Per-phase timeout overrides (seconds) - uses deep_research_timeout if not set
|
|
495
|
+
deep_research_planning_timeout: float = 360.0
|
|
496
|
+
deep_research_analysis_timeout: float = 360.0
|
|
497
|
+
deep_research_synthesis_timeout: float = 600.0 # Synthesis may take longer
|
|
498
|
+
deep_research_refinement_timeout: float = 360.0
|
|
499
|
+
# Per-phase provider overrides - uses default_provider if not set
|
|
500
|
+
deep_research_planning_provider: Optional[str] = None
|
|
501
|
+
deep_research_analysis_provider: Optional[str] = None
|
|
502
|
+
deep_research_synthesis_provider: Optional[str] = None
|
|
503
|
+
deep_research_refinement_provider: Optional[str] = None
|
|
504
|
+
deep_research_providers: List[str] = field(
|
|
505
|
+
default_factory=lambda: ["tavily", "google", "semantic_scholar"]
|
|
506
|
+
)
|
|
507
|
+
deep_research_audit_artifacts: bool = True
|
|
508
|
+
# Research mode: "general" | "academic" | "technical"
|
|
509
|
+
deep_research_mode: str = "general"
|
|
510
|
+
# Search rate limiting configuration
|
|
511
|
+
search_rate_limit: int = 60 # requests per minute (global default)
|
|
512
|
+
max_concurrent_searches: int = 3 # for asyncio.Semaphore in gathering phase
|
|
513
|
+
per_provider_rate_limits: Dict[str, int] = field(
|
|
514
|
+
default_factory=lambda: {
|
|
515
|
+
"tavily": 60, # Tavily free tier: ~1 req/sec
|
|
516
|
+
"perplexity": 60, # Perplexity: ~1 req/sec (pricing: $5/1k requests)
|
|
517
|
+
"google": 100, # Google CSE: 100 queries/day free, ~100/min paid
|
|
518
|
+
"semantic_scholar": 100, # Semantic Scholar: 100 req/5min unauthenticated
|
|
519
|
+
}
|
|
520
|
+
)
|
|
521
|
+
# Search provider API keys (all optional, read from env vars if not set)
|
|
522
|
+
tavily_api_key: Optional[str] = None
|
|
523
|
+
perplexity_api_key: Optional[str] = None
|
|
524
|
+
google_api_key: Optional[str] = None
|
|
525
|
+
google_cse_id: Optional[str] = None
|
|
526
|
+
semantic_scholar_api_key: Optional[str] = None
|
|
527
|
+
|
|
528
|
+
@classmethod
|
|
529
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ResearchConfig":
|
|
530
|
+
"""Create config from TOML dict (typically [research] section).
|
|
531
|
+
|
|
532
|
+
Args:
|
|
533
|
+
data: Dict from TOML parsing
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
ResearchConfig instance
|
|
537
|
+
"""
|
|
538
|
+
# Parse consensus_providers - handle both string and list
|
|
539
|
+
consensus_providers = data.get("consensus_providers", ["gemini", "claude"])
|
|
540
|
+
if isinstance(consensus_providers, str):
|
|
541
|
+
consensus_providers = [p.strip() for p in consensus_providers.split(",")]
|
|
542
|
+
|
|
543
|
+
# Parse ideate_perspectives - handle both string and list
|
|
544
|
+
ideate_perspectives = data.get(
|
|
545
|
+
"ideate_perspectives", ["technical", "creative", "practical", "visionary"]
|
|
546
|
+
)
|
|
547
|
+
if isinstance(ideate_perspectives, str):
|
|
548
|
+
ideate_perspectives = [p.strip() for p in ideate_perspectives.split(",")]
|
|
549
|
+
|
|
550
|
+
# Parse deep_research_providers - handle both string and list
|
|
551
|
+
deep_research_providers = data.get(
|
|
552
|
+
"deep_research_providers", ["tavily", "google", "semantic_scholar"]
|
|
553
|
+
)
|
|
554
|
+
if isinstance(deep_research_providers, str):
|
|
555
|
+
deep_research_providers = [
|
|
556
|
+
p.strip() for p in deep_research_providers.split(",") if p.strip()
|
|
557
|
+
]
|
|
558
|
+
|
|
559
|
+
# Parse per_provider_rate_limits - handle dict from TOML
|
|
560
|
+
per_provider_rate_limits = data.get("per_provider_rate_limits", {
|
|
561
|
+
"tavily": 60,
|
|
562
|
+
"perplexity": 60,
|
|
563
|
+
"google": 100,
|
|
564
|
+
"semantic_scholar": 100,
|
|
565
|
+
})
|
|
566
|
+
if isinstance(per_provider_rate_limits, dict):
|
|
567
|
+
# Convert values to int
|
|
568
|
+
per_provider_rate_limits = {
|
|
569
|
+
k: int(v) for k, v in per_provider_rate_limits.items()
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
return cls(
|
|
573
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
574
|
+
storage_path=str(data.get("storage_path", "")),
|
|
575
|
+
storage_backend=str(data.get("storage_backend", "file")),
|
|
576
|
+
ttl_hours=int(data.get("ttl_hours", 24)),
|
|
577
|
+
max_messages_per_thread=int(data.get("max_messages_per_thread", 100)),
|
|
578
|
+
default_provider=str(data.get("default_provider", "gemini")),
|
|
579
|
+
consensus_providers=consensus_providers,
|
|
580
|
+
thinkdeep_max_depth=int(data.get("thinkdeep_max_depth", 5)),
|
|
581
|
+
ideate_perspectives=ideate_perspectives,
|
|
582
|
+
default_timeout=float(data.get("default_timeout", 360.0)),
|
|
583
|
+
# Deep research configuration
|
|
584
|
+
deep_research_max_iterations=int(data.get("deep_research_max_iterations", 3)),
|
|
585
|
+
deep_research_max_sub_queries=int(data.get("deep_research_max_sub_queries", 5)),
|
|
586
|
+
deep_research_max_sources=int(data.get("deep_research_max_sources", 5)),
|
|
587
|
+
deep_research_follow_links=_parse_bool(data.get("deep_research_follow_links", True)),
|
|
588
|
+
deep_research_timeout=float(data.get("deep_research_timeout", 120.0)),
|
|
589
|
+
deep_research_max_concurrent=int(data.get("deep_research_max_concurrent", 3)),
|
|
590
|
+
# Per-phase timeout overrides
|
|
591
|
+
deep_research_planning_timeout=float(data.get("deep_research_planning_timeout", 60.0)),
|
|
592
|
+
deep_research_analysis_timeout=float(data.get("deep_research_analysis_timeout", 90.0)),
|
|
593
|
+
deep_research_synthesis_timeout=float(data.get("deep_research_synthesis_timeout", 180.0)),
|
|
594
|
+
deep_research_refinement_timeout=float(data.get("deep_research_refinement_timeout", 60.0)),
|
|
595
|
+
# Per-phase provider overrides
|
|
596
|
+
deep_research_planning_provider=data.get("deep_research_planning_provider"),
|
|
597
|
+
deep_research_analysis_provider=data.get("deep_research_analysis_provider"),
|
|
598
|
+
deep_research_synthesis_provider=data.get("deep_research_synthesis_provider"),
|
|
599
|
+
deep_research_refinement_provider=data.get("deep_research_refinement_provider"),
|
|
600
|
+
deep_research_providers=deep_research_providers,
|
|
601
|
+
deep_research_audit_artifacts=_parse_bool(
|
|
602
|
+
data.get("deep_research_audit_artifacts", True)
|
|
603
|
+
),
|
|
604
|
+
# Research mode
|
|
605
|
+
deep_research_mode=str(data.get("deep_research_mode", "general")),
|
|
606
|
+
# Search rate limiting configuration
|
|
607
|
+
search_rate_limit=int(data.get("search_rate_limit", 60)),
|
|
608
|
+
max_concurrent_searches=int(data.get("max_concurrent_searches", 3)),
|
|
609
|
+
per_provider_rate_limits=per_provider_rate_limits,
|
|
610
|
+
# Search provider API keys (None means not set in TOML, will check env vars)
|
|
611
|
+
tavily_api_key=data.get("tavily_api_key"),
|
|
612
|
+
perplexity_api_key=data.get("perplexity_api_key"),
|
|
613
|
+
google_api_key=data.get("google_api_key"),
|
|
614
|
+
google_cse_id=data.get("google_cse_id"),
|
|
615
|
+
semantic_scholar_api_key=data.get("semantic_scholar_api_key"),
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
def get_storage_path(self) -> Path:
|
|
619
|
+
"""Get resolved storage path.
|
|
620
|
+
|
|
621
|
+
Returns:
|
|
622
|
+
Path to storage directory (creates if needed)
|
|
623
|
+
"""
|
|
624
|
+
if self.storage_path:
|
|
625
|
+
return Path(self.storage_path).expanduser()
|
|
626
|
+
return Path.home() / ".foundry-mcp" / "research"
|
|
627
|
+
|
|
628
|
+
def get_provider_rate_limit(self, provider: str) -> int:
|
|
629
|
+
"""Get rate limit for a specific provider.
|
|
630
|
+
|
|
631
|
+
Returns the provider-specific rate limit if configured,
|
|
632
|
+
otherwise falls back to the global search_rate_limit.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
provider: Provider name (e.g., "tavily", "google", "semantic_scholar")
|
|
636
|
+
|
|
637
|
+
Returns:
|
|
638
|
+
Rate limit in requests per minute
|
|
639
|
+
"""
|
|
640
|
+
return self.per_provider_rate_limits.get(provider, self.search_rate_limit)
|
|
641
|
+
|
|
642
|
+
def get_phase_timeout(self, phase: str) -> float:
|
|
643
|
+
"""Get timeout for a specific deep research phase.
|
|
644
|
+
|
|
645
|
+
Returns the phase-specific timeout if configured, otherwise
|
|
646
|
+
falls back to deep_research_timeout.
|
|
647
|
+
|
|
648
|
+
Args:
|
|
649
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement", "gathering")
|
|
650
|
+
|
|
651
|
+
Returns:
|
|
652
|
+
Timeout in seconds for the phase
|
|
653
|
+
"""
|
|
654
|
+
phase_timeouts = {
|
|
655
|
+
"planning": self.deep_research_planning_timeout,
|
|
656
|
+
"analysis": self.deep_research_analysis_timeout,
|
|
657
|
+
"synthesis": self.deep_research_synthesis_timeout,
|
|
658
|
+
"refinement": self.deep_research_refinement_timeout,
|
|
659
|
+
"gathering": self.deep_research_timeout, # Gathering uses default
|
|
660
|
+
}
|
|
661
|
+
return phase_timeouts.get(phase.lower(), self.deep_research_timeout)
|
|
662
|
+
|
|
663
|
+
def get_phase_provider(self, phase: str) -> str:
|
|
664
|
+
"""Get LLM provider ID for a specific deep research phase.
|
|
665
|
+
|
|
666
|
+
Returns the phase-specific provider if configured, otherwise
|
|
667
|
+
falls back to default_provider. Supports both simple names ("gemini")
|
|
668
|
+
and ProviderSpec format ("[cli]gemini:pro").
|
|
669
|
+
|
|
670
|
+
Args:
|
|
671
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement")
|
|
672
|
+
|
|
673
|
+
Returns:
|
|
674
|
+
Provider ID for the phase (e.g., "gemini", "opencode")
|
|
675
|
+
"""
|
|
676
|
+
provider_id, _ = self.resolve_phase_provider(phase)
|
|
677
|
+
return provider_id
|
|
678
|
+
|
|
679
|
+
def resolve_phase_provider(self, phase: str) -> Tuple[str, Optional[str]]:
|
|
680
|
+
"""Resolve provider ID and model for a deep research phase.
|
|
681
|
+
|
|
682
|
+
Parses ProviderSpec format ("[cli]gemini:pro") or simple names ("gemini").
|
|
683
|
+
Returns (provider_id, model) tuple for use with the provider registry.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
phase: Phase name ("planning", "analysis", "synthesis", "refinement")
|
|
687
|
+
|
|
688
|
+
Returns:
|
|
689
|
+
Tuple of (provider_id, model) where model may be None
|
|
690
|
+
"""
|
|
691
|
+
phase_providers = {
|
|
692
|
+
"planning": self.deep_research_planning_provider,
|
|
693
|
+
"analysis": self.deep_research_analysis_provider,
|
|
694
|
+
"synthesis": self.deep_research_synthesis_provider,
|
|
695
|
+
"refinement": self.deep_research_refinement_provider,
|
|
696
|
+
}
|
|
697
|
+
spec_str = phase_providers.get(phase.lower()) or self.default_provider
|
|
698
|
+
return _parse_provider_spec(spec_str)
|
|
699
|
+
|
|
700
|
+
def get_search_provider_api_key(
|
|
701
|
+
self,
|
|
702
|
+
provider: str,
|
|
703
|
+
required: bool = True,
|
|
704
|
+
) -> Optional[str]:
|
|
705
|
+
"""Get API key for a search provider with fallback to environment variables.
|
|
706
|
+
|
|
707
|
+
Checks config value first, then falls back to environment variable.
|
|
708
|
+
Raises ValueError with clear error message if required and not found.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
provider: Provider name ("tavily", "google", "semantic_scholar")
|
|
712
|
+
required: If True, raises ValueError when key is missing (default: True)
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
API key string, or None if not required and not found
|
|
716
|
+
|
|
717
|
+
Raises:
|
|
718
|
+
ValueError: If required=True and no API key is found
|
|
719
|
+
|
|
720
|
+
Example:
|
|
721
|
+
# Get Tavily API key (will raise if missing)
|
|
722
|
+
api_key = config.research.get_search_provider_api_key("tavily")
|
|
723
|
+
|
|
724
|
+
# Get Semantic Scholar API key (optional, returns None if missing)
|
|
725
|
+
api_key = config.research.get_search_provider_api_key(
|
|
726
|
+
"semantic_scholar", required=False
|
|
727
|
+
)
|
|
728
|
+
"""
|
|
729
|
+
# Map provider names to config attributes and env vars
|
|
730
|
+
provider_config = {
|
|
731
|
+
"tavily": {
|
|
732
|
+
"config_key": "tavily_api_key",
|
|
733
|
+
"env_var": "TAVILY_API_KEY",
|
|
734
|
+
"setup_url": "https://tavily.com/",
|
|
735
|
+
},
|
|
736
|
+
"perplexity": {
|
|
737
|
+
"config_key": "perplexity_api_key",
|
|
738
|
+
"env_var": "PERPLEXITY_API_KEY",
|
|
739
|
+
"setup_url": "https://docs.perplexity.ai/",
|
|
740
|
+
},
|
|
741
|
+
"google": {
|
|
742
|
+
"config_key": "google_api_key",
|
|
743
|
+
"env_var": "GOOGLE_API_KEY",
|
|
744
|
+
"setup_url": "https://console.cloud.google.com/apis/credentials",
|
|
745
|
+
},
|
|
746
|
+
"google_cse": {
|
|
747
|
+
"config_key": "google_cse_id",
|
|
748
|
+
"env_var": "GOOGLE_CSE_ID",
|
|
749
|
+
"setup_url": "https://cse.google.com/",
|
|
750
|
+
},
|
|
751
|
+
"semantic_scholar": {
|
|
752
|
+
"config_key": "semantic_scholar_api_key",
|
|
753
|
+
"env_var": "SEMANTIC_SCHOLAR_API_KEY",
|
|
754
|
+
"setup_url": "https://www.semanticscholar.org/product/api",
|
|
755
|
+
},
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
provider_lower = provider.lower()
|
|
759
|
+
if provider_lower not in provider_config:
|
|
760
|
+
raise ValueError(
|
|
761
|
+
f"Unknown search provider: '{provider}'. "
|
|
762
|
+
f"Valid providers: {', '.join(provider_config.keys())}"
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
config_info = provider_config[provider_lower]
|
|
766
|
+
config_key = config_info["config_key"]
|
|
767
|
+
env_var = config_info["env_var"]
|
|
768
|
+
|
|
769
|
+
# Check config value first
|
|
770
|
+
api_key = getattr(self, config_key, None)
|
|
771
|
+
|
|
772
|
+
# Fall back to environment variable
|
|
773
|
+
if not api_key:
|
|
774
|
+
api_key = os.environ.get(env_var)
|
|
775
|
+
|
|
776
|
+
# Handle missing key
|
|
777
|
+
if not api_key:
|
|
778
|
+
if required:
|
|
779
|
+
raise ValueError(
|
|
780
|
+
f"{provider.title()} API key not configured. "
|
|
781
|
+
f"Set via {env_var} environment variable or "
|
|
782
|
+
f"'research.{config_key}' in foundry-mcp.toml. "
|
|
783
|
+
f"Get an API key at: {config_info['setup_url']}"
|
|
784
|
+
)
|
|
785
|
+
return None
|
|
786
|
+
|
|
787
|
+
return api_key
|
|
788
|
+
|
|
789
|
+
def get_google_credentials(self, required: bool = True) -> tuple[Optional[str], Optional[str]]:
|
|
790
|
+
"""Get both Google API key and CSE ID for Google Custom Search.
|
|
791
|
+
|
|
792
|
+
Convenience method that retrieves both required credentials for
|
|
793
|
+
Google Custom Search API.
|
|
794
|
+
|
|
795
|
+
Args:
|
|
796
|
+
required: If True, raises ValueError when either credential is missing
|
|
797
|
+
|
|
798
|
+
Returns:
|
|
799
|
+
Tuple of (api_key, cse_id)
|
|
800
|
+
|
|
801
|
+
Raises:
|
|
802
|
+
ValueError: If required=True and either credential is missing
|
|
803
|
+
"""
|
|
804
|
+
api_key = self.get_search_provider_api_key("google", required=required)
|
|
805
|
+
cse_id = self.get_search_provider_api_key("google_cse", required=required)
|
|
806
|
+
return api_key, cse_id
|
|
807
|
+
|
|
808
|
+
def get_default_provider_spec(self) -> "ProviderSpec":
|
|
809
|
+
"""Parse default_provider into a ProviderSpec."""
|
|
810
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
811
|
+
return ProviderSpec.parse_flexible(self.default_provider)
|
|
812
|
+
|
|
813
|
+
def get_consensus_provider_specs(self) -> List["ProviderSpec"]:
|
|
814
|
+
"""Parse consensus_providers into ProviderSpec list."""
|
|
815
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
816
|
+
return [ProviderSpec.parse_flexible(p) for p in self.consensus_providers]
|
|
817
|
+
|
|
818
|
+
|
|
819
|
+
_VALID_COMMIT_CADENCE = {"manual", "task", "phase"}
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
def _normalize_commit_cadence(value: str) -> str:
|
|
823
|
+
normalized = value.strip().lower()
|
|
824
|
+
if normalized not in _VALID_COMMIT_CADENCE:
|
|
825
|
+
logger.warning(
|
|
826
|
+
"Invalid commit cadence '%s'. Falling back to 'manual'. Valid options: %s",
|
|
827
|
+
value,
|
|
828
|
+
", ".join(sorted(_VALID_COMMIT_CADENCE)),
|
|
829
|
+
)
|
|
830
|
+
return "manual"
|
|
831
|
+
return normalized
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
def _parse_provider_spec(spec: str) -> Tuple[str, Optional[str]]:
|
|
835
|
+
"""Parse a provider specification into (provider_id, model).
|
|
836
|
+
|
|
837
|
+
Supports both simple names and ProviderSpec bracket notation:
|
|
838
|
+
- "gemini" -> ("gemini", None)
|
|
839
|
+
- "[cli]gemini:pro" -> ("gemini", "pro")
|
|
840
|
+
- "[cli]opencode:openai/gpt-5.2" -> ("opencode", "openai/gpt-5.2")
|
|
841
|
+
- "[api]openai/gpt-4.1" -> ("openai", "gpt-4.1")
|
|
842
|
+
|
|
843
|
+
Args:
|
|
844
|
+
spec: Provider specification string
|
|
845
|
+
|
|
846
|
+
Returns:
|
|
847
|
+
Tuple of (provider_id, model) where model may be None
|
|
848
|
+
"""
|
|
849
|
+
spec = spec.strip()
|
|
850
|
+
|
|
851
|
+
# Simple name (no brackets) - backward compatible
|
|
852
|
+
if not spec.startswith("["):
|
|
853
|
+
return (spec, None)
|
|
854
|
+
|
|
855
|
+
# Try to parse with ProviderSpec
|
|
856
|
+
try:
|
|
857
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
858
|
+
|
|
859
|
+
parsed = ProviderSpec.parse(spec)
|
|
860
|
+
# Build model string with backend routing if present
|
|
861
|
+
model = None
|
|
862
|
+
if parsed.backend and parsed.model:
|
|
863
|
+
model = f"{parsed.backend}/{parsed.model}"
|
|
864
|
+
elif parsed.model:
|
|
865
|
+
model = parsed.model
|
|
866
|
+
return (parsed.provider, model)
|
|
867
|
+
except (ValueError, ImportError) as e:
|
|
868
|
+
logger.warning("Failed to parse provider spec '%s': %s", spec, e)
|
|
869
|
+
# Fall back to treating as simple name (strip brackets)
|
|
870
|
+
return (spec.split("]")[-1].split(":")[0], None)
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
def _parse_bool(value: Any) -> bool:
|
|
874
|
+
if isinstance(value, bool):
|
|
875
|
+
return value
|
|
876
|
+
return str(value).strip().lower() in {"true", "1", "yes", "on"}
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
@dataclass
|
|
880
|
+
class ServerConfig:
|
|
881
|
+
"""Server configuration with support for env vars and TOML overrides."""
|
|
882
|
+
|
|
883
|
+
# Workspace configuration
|
|
884
|
+
workspace_roots: List[Path] = field(default_factory=list)
|
|
885
|
+
specs_dir: Optional[Path] = None
|
|
886
|
+
journals_path: Optional[Path] = None
|
|
887
|
+
bikelane_dir: Optional[Path] = None # Intake queue storage (default: specs/.bikelane)
|
|
888
|
+
|
|
889
|
+
# Logging configuration
|
|
890
|
+
log_level: str = "INFO"
|
|
891
|
+
structured_logging: bool = True
|
|
892
|
+
|
|
893
|
+
# Authentication configuration
|
|
894
|
+
api_keys: List[str] = field(default_factory=list)
|
|
895
|
+
require_auth: bool = False
|
|
896
|
+
|
|
897
|
+
# Server configuration
|
|
898
|
+
server_name: str = "foundry-mcp"
|
|
899
|
+
server_version: str = field(default_factory=lambda: _PACKAGE_VERSION)
|
|
900
|
+
|
|
901
|
+
# Git workflow configuration
|
|
902
|
+
git: GitSettings = field(default_factory=GitSettings)
|
|
903
|
+
|
|
904
|
+
# Observability configuration
|
|
905
|
+
observability: ObservabilityConfig = field(default_factory=ObservabilityConfig)
|
|
906
|
+
|
|
907
|
+
# Health check configuration
|
|
908
|
+
health: HealthConfig = field(default_factory=HealthConfig)
|
|
909
|
+
|
|
910
|
+
# Error collection configuration
|
|
911
|
+
error_collection: ErrorCollectionConfig = field(default_factory=ErrorCollectionConfig)
|
|
912
|
+
|
|
913
|
+
# Metrics persistence configuration
|
|
914
|
+
metrics_persistence: MetricsPersistenceConfig = field(default_factory=MetricsPersistenceConfig)
|
|
915
|
+
|
|
916
|
+
# Dashboard configuration
|
|
917
|
+
dashboard: DashboardConfig = field(default_factory=DashboardConfig)
|
|
918
|
+
|
|
919
|
+
# Test runner configuration
|
|
920
|
+
test: TestConfig = field(default_factory=TestConfig)
|
|
921
|
+
|
|
922
|
+
# Research workflows configuration
|
|
923
|
+
research: ResearchConfig = field(default_factory=ResearchConfig)
|
|
924
|
+
|
|
925
|
+
# Tool registration control
|
|
926
|
+
disabled_tools: List[str] = field(default_factory=list)
|
|
927
|
+
|
|
928
|
+
@classmethod
|
|
929
|
+
def from_env(cls, config_file: Optional[str] = None) -> "ServerConfig":
|
|
930
|
+
"""
|
|
931
|
+
Create configuration from environment variables and optional TOML file.
|
|
932
|
+
|
|
933
|
+
Priority (highest to lowest):
|
|
934
|
+
1. Environment variables
|
|
935
|
+
2. TOML config file
|
|
936
|
+
3. Default values
|
|
937
|
+
"""
|
|
938
|
+
config = cls()
|
|
939
|
+
|
|
940
|
+
# Load TOML config if available
|
|
941
|
+
toml_path = config_file or os.environ.get("FOUNDRY_MCP_CONFIG_FILE")
|
|
942
|
+
if toml_path:
|
|
943
|
+
config._load_toml(Path(toml_path))
|
|
944
|
+
else:
|
|
945
|
+
# Try default locations
|
|
946
|
+
for default_path in ["foundry-mcp.toml", ".foundry-mcp.toml"]:
|
|
947
|
+
if Path(default_path).exists():
|
|
948
|
+
config._load_toml(Path(default_path))
|
|
949
|
+
break
|
|
950
|
+
|
|
951
|
+
# Override with environment variables
|
|
952
|
+
config._load_env()
|
|
953
|
+
|
|
954
|
+
return config
|
|
955
|
+
|
|
956
|
+
def _load_toml(self, path: Path) -> None:
|
|
957
|
+
"""Load configuration from TOML file."""
|
|
958
|
+
if not path.exists():
|
|
959
|
+
logger.warning(f"Config file not found: {path}")
|
|
960
|
+
return
|
|
961
|
+
|
|
962
|
+
try:
|
|
963
|
+
with open(path, "rb") as f:
|
|
964
|
+
data = tomllib.load(f)
|
|
965
|
+
|
|
966
|
+
# Workspace settings
|
|
967
|
+
if "workspace" in data:
|
|
968
|
+
ws = data["workspace"]
|
|
969
|
+
if "roots" in ws:
|
|
970
|
+
self.workspace_roots = [Path(p) for p in ws["roots"]]
|
|
971
|
+
if "specs_dir" in ws:
|
|
972
|
+
self.specs_dir = Path(ws["specs_dir"])
|
|
973
|
+
if "journals_path" in ws:
|
|
974
|
+
self.journals_path = Path(ws["journals_path"])
|
|
975
|
+
if "bikelane_dir" in ws:
|
|
976
|
+
self.bikelane_dir = Path(ws["bikelane_dir"])
|
|
977
|
+
|
|
978
|
+
# Logging settings
|
|
979
|
+
if "logging" in data:
|
|
980
|
+
log = data["logging"]
|
|
981
|
+
if "level" in log:
|
|
982
|
+
self.log_level = log["level"].upper()
|
|
983
|
+
if "structured" in log:
|
|
984
|
+
self.structured_logging = log["structured"]
|
|
985
|
+
|
|
986
|
+
# Auth settings
|
|
987
|
+
if "auth" in data:
|
|
988
|
+
auth = data["auth"]
|
|
989
|
+
if "api_keys" in auth:
|
|
990
|
+
self.api_keys = auth["api_keys"]
|
|
991
|
+
if "require_auth" in auth:
|
|
992
|
+
self.require_auth = auth["require_auth"]
|
|
993
|
+
|
|
994
|
+
# Server settings
|
|
995
|
+
if "server" in data:
|
|
996
|
+
srv = data["server"]
|
|
997
|
+
if "name" in srv:
|
|
998
|
+
self.server_name = srv["name"]
|
|
999
|
+
if "version" in srv:
|
|
1000
|
+
self.server_version = srv["version"]
|
|
1001
|
+
# Legacy: disabled_tools under [server] (deprecated)
|
|
1002
|
+
if "disabled_tools" in srv:
|
|
1003
|
+
self.disabled_tools = srv["disabled_tools"]
|
|
1004
|
+
|
|
1005
|
+
# Tools configuration (preferred location for disabled_tools)
|
|
1006
|
+
if "tools" in data:
|
|
1007
|
+
tools_cfg = data["tools"]
|
|
1008
|
+
if "disabled_tools" in tools_cfg:
|
|
1009
|
+
self.disabled_tools = tools_cfg["disabled_tools"]
|
|
1010
|
+
|
|
1011
|
+
# Git workflow settings
|
|
1012
|
+
if "git" in data:
|
|
1013
|
+
git_cfg = data["git"]
|
|
1014
|
+
if "enabled" in git_cfg:
|
|
1015
|
+
self.git.enabled = _parse_bool(git_cfg["enabled"])
|
|
1016
|
+
if "auto_commit" in git_cfg:
|
|
1017
|
+
self.git.auto_commit = _parse_bool(git_cfg["auto_commit"])
|
|
1018
|
+
if "auto_push" in git_cfg:
|
|
1019
|
+
self.git.auto_push = _parse_bool(git_cfg["auto_push"])
|
|
1020
|
+
if "auto_pr" in git_cfg:
|
|
1021
|
+
self.git.auto_pr = _parse_bool(git_cfg["auto_pr"])
|
|
1022
|
+
if "show_before_commit" in git_cfg:
|
|
1023
|
+
self.git.show_before_commit = _parse_bool(
|
|
1024
|
+
git_cfg["show_before_commit"]
|
|
1025
|
+
)
|
|
1026
|
+
if "commit_cadence" in git_cfg:
|
|
1027
|
+
self.git.commit_cadence = _normalize_commit_cadence(
|
|
1028
|
+
str(git_cfg["commit_cadence"])
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
# Observability settings
|
|
1032
|
+
if "observability" in data:
|
|
1033
|
+
self.observability = ObservabilityConfig.from_toml_dict(
|
|
1034
|
+
data["observability"]
|
|
1035
|
+
)
|
|
1036
|
+
|
|
1037
|
+
# Health check settings
|
|
1038
|
+
if "health" in data:
|
|
1039
|
+
self.health = HealthConfig.from_toml_dict(data["health"])
|
|
1040
|
+
|
|
1041
|
+
# Error collection settings
|
|
1042
|
+
if "error_collection" in data:
|
|
1043
|
+
self.error_collection = ErrorCollectionConfig.from_toml_dict(
|
|
1044
|
+
data["error_collection"]
|
|
1045
|
+
)
|
|
1046
|
+
|
|
1047
|
+
# Metrics persistence settings
|
|
1048
|
+
if "metrics_persistence" in data:
|
|
1049
|
+
self.metrics_persistence = MetricsPersistenceConfig.from_toml_dict(
|
|
1050
|
+
data["metrics_persistence"]
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
# Dashboard settings
|
|
1054
|
+
if "dashboard" in data:
|
|
1055
|
+
self.dashboard = DashboardConfig.from_toml_dict(data["dashboard"])
|
|
1056
|
+
|
|
1057
|
+
# Test runner settings
|
|
1058
|
+
if "test" in data:
|
|
1059
|
+
self.test = TestConfig.from_toml_dict(data["test"])
|
|
1060
|
+
|
|
1061
|
+
# Research workflows settings
|
|
1062
|
+
if "research" in data:
|
|
1063
|
+
self.research = ResearchConfig.from_toml_dict(data["research"])
|
|
1064
|
+
|
|
1065
|
+
except Exception as e:
|
|
1066
|
+
logger.error(f"Error loading config file {path}: {e}")
|
|
1067
|
+
|
|
1068
|
+
def _load_env(self) -> None:
|
|
1069
|
+
"""Load configuration from environment variables."""
|
|
1070
|
+
# Workspace roots
|
|
1071
|
+
if roots := os.environ.get("FOUNDRY_MCP_WORKSPACE_ROOTS"):
|
|
1072
|
+
self.workspace_roots = [Path(p.strip()) for p in roots.split(",")]
|
|
1073
|
+
|
|
1074
|
+
# Specs directory
|
|
1075
|
+
if specs := os.environ.get("FOUNDRY_MCP_SPECS_DIR"):
|
|
1076
|
+
self.specs_dir = Path(specs)
|
|
1077
|
+
|
|
1078
|
+
# Journals path
|
|
1079
|
+
if journals := os.environ.get("FOUNDRY_MCP_JOURNALS_PATH"):
|
|
1080
|
+
self.journals_path = Path(journals)
|
|
1081
|
+
|
|
1082
|
+
# Bikelane directory (intake queue storage)
|
|
1083
|
+
if bikelane := os.environ.get("FOUNDRY_MCP_BIKELANE_DIR"):
|
|
1084
|
+
self.bikelane_dir = Path(bikelane)
|
|
1085
|
+
|
|
1086
|
+
# Log level
|
|
1087
|
+
if level := os.environ.get("FOUNDRY_MCP_LOG_LEVEL"):
|
|
1088
|
+
self.log_level = level.upper()
|
|
1089
|
+
|
|
1090
|
+
# API keys
|
|
1091
|
+
if keys := os.environ.get("FOUNDRY_MCP_API_KEYS"):
|
|
1092
|
+
self.api_keys = [k.strip() for k in keys.split(",") if k.strip()]
|
|
1093
|
+
|
|
1094
|
+
# Require auth
|
|
1095
|
+
if require := os.environ.get("FOUNDRY_MCP_REQUIRE_AUTH"):
|
|
1096
|
+
self.require_auth = require.lower() in ("true", "1", "yes")
|
|
1097
|
+
|
|
1098
|
+
# Git settings
|
|
1099
|
+
if git_enabled := os.environ.get("FOUNDRY_MCP_GIT_ENABLED"):
|
|
1100
|
+
self.git.enabled = _parse_bool(git_enabled)
|
|
1101
|
+
if git_auto_commit := os.environ.get("FOUNDRY_MCP_GIT_AUTO_COMMIT"):
|
|
1102
|
+
self.git.auto_commit = _parse_bool(git_auto_commit)
|
|
1103
|
+
if git_auto_push := os.environ.get("FOUNDRY_MCP_GIT_AUTO_PUSH"):
|
|
1104
|
+
self.git.auto_push = _parse_bool(git_auto_push)
|
|
1105
|
+
if git_auto_pr := os.environ.get("FOUNDRY_MCP_GIT_AUTO_PR"):
|
|
1106
|
+
self.git.auto_pr = _parse_bool(git_auto_pr)
|
|
1107
|
+
if git_show_preview := os.environ.get("FOUNDRY_MCP_GIT_SHOW_PREVIEW"):
|
|
1108
|
+
self.git.show_before_commit = _parse_bool(git_show_preview)
|
|
1109
|
+
if git_cadence := os.environ.get("FOUNDRY_MCP_GIT_COMMIT_CADENCE"):
|
|
1110
|
+
self.git.commit_cadence = _normalize_commit_cadence(git_cadence)
|
|
1111
|
+
|
|
1112
|
+
# Observability settings
|
|
1113
|
+
if obs_enabled := os.environ.get("FOUNDRY_MCP_OBSERVABILITY_ENABLED"):
|
|
1114
|
+
self.observability.enabled = _parse_bool(obs_enabled)
|
|
1115
|
+
if otel_enabled := os.environ.get("FOUNDRY_MCP_OTEL_ENABLED"):
|
|
1116
|
+
self.observability.otel_enabled = _parse_bool(otel_enabled)
|
|
1117
|
+
if otel_endpoint := os.environ.get("FOUNDRY_MCP_OTEL_ENDPOINT"):
|
|
1118
|
+
self.observability.otel_endpoint = otel_endpoint
|
|
1119
|
+
if otel_service := os.environ.get("FOUNDRY_MCP_OTEL_SERVICE_NAME"):
|
|
1120
|
+
self.observability.otel_service_name = otel_service
|
|
1121
|
+
if otel_sample := os.environ.get("FOUNDRY_MCP_OTEL_SAMPLE_RATE"):
|
|
1122
|
+
try:
|
|
1123
|
+
self.observability.otel_sample_rate = float(otel_sample)
|
|
1124
|
+
except ValueError:
|
|
1125
|
+
pass
|
|
1126
|
+
if prom_enabled := os.environ.get("FOUNDRY_MCP_PROMETHEUS_ENABLED"):
|
|
1127
|
+
self.observability.prometheus_enabled = _parse_bool(prom_enabled)
|
|
1128
|
+
if prom_port := os.environ.get("FOUNDRY_MCP_PROMETHEUS_PORT"):
|
|
1129
|
+
try:
|
|
1130
|
+
self.observability.prometheus_port = int(prom_port)
|
|
1131
|
+
except ValueError:
|
|
1132
|
+
pass
|
|
1133
|
+
if prom_host := os.environ.get("FOUNDRY_MCP_PROMETHEUS_HOST"):
|
|
1134
|
+
self.observability.prometheus_host = prom_host
|
|
1135
|
+
if prom_ns := os.environ.get("FOUNDRY_MCP_PROMETHEUS_NAMESPACE"):
|
|
1136
|
+
self.observability.prometheus_namespace = prom_ns
|
|
1137
|
+
|
|
1138
|
+
# Health check settings
|
|
1139
|
+
if health_enabled := os.environ.get("FOUNDRY_MCP_HEALTH_ENABLED"):
|
|
1140
|
+
self.health.enabled = _parse_bool(health_enabled)
|
|
1141
|
+
if health_liveness_timeout := os.environ.get(
|
|
1142
|
+
"FOUNDRY_MCP_HEALTH_LIVENESS_TIMEOUT"
|
|
1143
|
+
):
|
|
1144
|
+
try:
|
|
1145
|
+
self.health.liveness_timeout = float(health_liveness_timeout)
|
|
1146
|
+
except ValueError:
|
|
1147
|
+
pass
|
|
1148
|
+
if health_readiness_timeout := os.environ.get(
|
|
1149
|
+
"FOUNDRY_MCP_HEALTH_READINESS_TIMEOUT"
|
|
1150
|
+
):
|
|
1151
|
+
try:
|
|
1152
|
+
self.health.readiness_timeout = float(health_readiness_timeout)
|
|
1153
|
+
except ValueError:
|
|
1154
|
+
pass
|
|
1155
|
+
if health_timeout := os.environ.get("FOUNDRY_MCP_HEALTH_TIMEOUT"):
|
|
1156
|
+
try:
|
|
1157
|
+
self.health.health_timeout = float(health_timeout)
|
|
1158
|
+
except ValueError:
|
|
1159
|
+
pass
|
|
1160
|
+
if disk_threshold := os.environ.get("FOUNDRY_MCP_DISK_SPACE_THRESHOLD_MB"):
|
|
1161
|
+
try:
|
|
1162
|
+
self.health.disk_space_threshold_mb = int(disk_threshold)
|
|
1163
|
+
except ValueError:
|
|
1164
|
+
pass
|
|
1165
|
+
if disk_warning := os.environ.get("FOUNDRY_MCP_DISK_SPACE_WARNING_MB"):
|
|
1166
|
+
try:
|
|
1167
|
+
self.health.disk_space_warning_mb = int(disk_warning)
|
|
1168
|
+
except ValueError:
|
|
1169
|
+
pass
|
|
1170
|
+
|
|
1171
|
+
# Error collection settings
|
|
1172
|
+
if err_enabled := os.environ.get("FOUNDRY_MCP_ERROR_COLLECTION_ENABLED"):
|
|
1173
|
+
self.error_collection.enabled = _parse_bool(err_enabled)
|
|
1174
|
+
if err_storage := os.environ.get("FOUNDRY_MCP_ERROR_STORAGE_PATH"):
|
|
1175
|
+
self.error_collection.storage_path = err_storage
|
|
1176
|
+
if err_retention := os.environ.get("FOUNDRY_MCP_ERROR_RETENTION_DAYS"):
|
|
1177
|
+
try:
|
|
1178
|
+
self.error_collection.retention_days = int(err_retention)
|
|
1179
|
+
except ValueError:
|
|
1180
|
+
pass
|
|
1181
|
+
if err_max := os.environ.get("FOUNDRY_MCP_ERROR_MAX_ERRORS"):
|
|
1182
|
+
try:
|
|
1183
|
+
self.error_collection.max_errors = int(err_max)
|
|
1184
|
+
except ValueError:
|
|
1185
|
+
pass
|
|
1186
|
+
if err_stack := os.environ.get("FOUNDRY_MCP_ERROR_INCLUDE_STACK_TRACES"):
|
|
1187
|
+
self.error_collection.include_stack_traces = _parse_bool(err_stack)
|
|
1188
|
+
if err_redact := os.environ.get("FOUNDRY_MCP_ERROR_REDACT_INPUTS"):
|
|
1189
|
+
self.error_collection.redact_inputs = _parse_bool(err_redact)
|
|
1190
|
+
|
|
1191
|
+
# Metrics persistence settings
|
|
1192
|
+
if metrics_enabled := os.environ.get("FOUNDRY_MCP_METRICS_PERSISTENCE_ENABLED"):
|
|
1193
|
+
self.metrics_persistence.enabled = _parse_bool(metrics_enabled)
|
|
1194
|
+
if metrics_storage := os.environ.get("FOUNDRY_MCP_METRICS_STORAGE_PATH"):
|
|
1195
|
+
self.metrics_persistence.storage_path = metrics_storage
|
|
1196
|
+
if metrics_retention := os.environ.get("FOUNDRY_MCP_METRICS_RETENTION_DAYS"):
|
|
1197
|
+
try:
|
|
1198
|
+
self.metrics_persistence.retention_days = int(metrics_retention)
|
|
1199
|
+
except ValueError:
|
|
1200
|
+
pass
|
|
1201
|
+
if metrics_max := os.environ.get("FOUNDRY_MCP_METRICS_MAX_RECORDS"):
|
|
1202
|
+
try:
|
|
1203
|
+
self.metrics_persistence.max_records = int(metrics_max)
|
|
1204
|
+
except ValueError:
|
|
1205
|
+
pass
|
|
1206
|
+
if metrics_bucket := os.environ.get("FOUNDRY_MCP_METRICS_BUCKET_INTERVAL"):
|
|
1207
|
+
try:
|
|
1208
|
+
self.metrics_persistence.bucket_interval_seconds = int(metrics_bucket)
|
|
1209
|
+
except ValueError:
|
|
1210
|
+
pass
|
|
1211
|
+
if metrics_flush := os.environ.get("FOUNDRY_MCP_METRICS_FLUSH_INTERVAL"):
|
|
1212
|
+
try:
|
|
1213
|
+
self.metrics_persistence.flush_interval_seconds = int(metrics_flush)
|
|
1214
|
+
except ValueError:
|
|
1215
|
+
pass
|
|
1216
|
+
if persist_list := os.environ.get("FOUNDRY_MCP_METRICS_PERSIST_METRICS"):
|
|
1217
|
+
self.metrics_persistence.persist_metrics = [
|
|
1218
|
+
m.strip() for m in persist_list.split(",") if m.strip()
|
|
1219
|
+
]
|
|
1220
|
+
|
|
1221
|
+
# Dashboard settings
|
|
1222
|
+
if dash_enabled := os.environ.get("FOUNDRY_MCP_DASHBOARD_ENABLED"):
|
|
1223
|
+
self.dashboard.enabled = _parse_bool(dash_enabled)
|
|
1224
|
+
if dash_port := os.environ.get("FOUNDRY_MCP_DASHBOARD_PORT"):
|
|
1225
|
+
try:
|
|
1226
|
+
self.dashboard.port = int(dash_port)
|
|
1227
|
+
except ValueError:
|
|
1228
|
+
pass
|
|
1229
|
+
if dash_host := os.environ.get("FOUNDRY_MCP_DASHBOARD_HOST"):
|
|
1230
|
+
self.dashboard.host = dash_host
|
|
1231
|
+
if dash_auto_open := os.environ.get("FOUNDRY_MCP_DASHBOARD_AUTO_OPEN"):
|
|
1232
|
+
self.dashboard.auto_open_browser = _parse_bool(dash_auto_open)
|
|
1233
|
+
if dash_refresh := os.environ.get("FOUNDRY_MCP_DASHBOARD_REFRESH_INTERVAL"):
|
|
1234
|
+
try:
|
|
1235
|
+
self.dashboard.refresh_interval_ms = int(dash_refresh)
|
|
1236
|
+
except ValueError:
|
|
1237
|
+
pass
|
|
1238
|
+
|
|
1239
|
+
# Search provider API keys (direct env vars, no FOUNDRY_MCP_ prefix)
|
|
1240
|
+
# These use standard env var names that match provider documentation
|
|
1241
|
+
if tavily_key := os.environ.get("TAVILY_API_KEY"):
|
|
1242
|
+
self.research.tavily_api_key = tavily_key
|
|
1243
|
+
if perplexity_key := os.environ.get("PERPLEXITY_API_KEY"):
|
|
1244
|
+
self.research.perplexity_api_key = perplexity_key
|
|
1245
|
+
if google_key := os.environ.get("GOOGLE_API_KEY"):
|
|
1246
|
+
self.research.google_api_key = google_key
|
|
1247
|
+
if google_cse := os.environ.get("GOOGLE_CSE_ID"):
|
|
1248
|
+
self.research.google_cse_id = google_cse
|
|
1249
|
+
if semantic_scholar_key := os.environ.get("SEMANTIC_SCHOLAR_API_KEY"):
|
|
1250
|
+
self.research.semantic_scholar_api_key = semantic_scholar_key
|
|
1251
|
+
|
|
1252
|
+
# Disabled tools (comma-separated list)
|
|
1253
|
+
if disabled := os.environ.get("FOUNDRY_MCP_DISABLED_TOOLS"):
|
|
1254
|
+
self.disabled_tools = [t.strip() for t in disabled.split(",") if t.strip()]
|
|
1255
|
+
|
|
1256
|
+
def validate_api_key(self, key: Optional[str]) -> bool:
|
|
1257
|
+
"""
|
|
1258
|
+
Validate an API key.
|
|
1259
|
+
|
|
1260
|
+
Args:
|
|
1261
|
+
key: API key to validate
|
|
1262
|
+
|
|
1263
|
+
Returns:
|
|
1264
|
+
True if valid (or auth not required), False otherwise
|
|
1265
|
+
"""
|
|
1266
|
+
if not self.require_auth:
|
|
1267
|
+
return True
|
|
1268
|
+
|
|
1269
|
+
if not key:
|
|
1270
|
+
return False
|
|
1271
|
+
|
|
1272
|
+
return key in self.api_keys
|
|
1273
|
+
|
|
1274
|
+
def get_bikelane_dir(self, specs_dir: Optional[Path] = None) -> Path:
|
|
1275
|
+
"""
|
|
1276
|
+
Get the resolved bikelane directory path.
|
|
1277
|
+
|
|
1278
|
+
Priority:
|
|
1279
|
+
1. Explicitly configured bikelane_dir (from TOML or env var)
|
|
1280
|
+
2. Default: specs_dir/.bikelane (where specs_dir is resolved)
|
|
1281
|
+
|
|
1282
|
+
Args:
|
|
1283
|
+
specs_dir: Optional specs directory to use for default path.
|
|
1284
|
+
If not provided, uses self.specs_dir or "./specs"
|
|
1285
|
+
|
|
1286
|
+
Returns:
|
|
1287
|
+
Path to bikelane directory
|
|
1288
|
+
"""
|
|
1289
|
+
if self.bikelane_dir is not None:
|
|
1290
|
+
return self.bikelane_dir.expanduser()
|
|
1291
|
+
|
|
1292
|
+
# Fall back to default: specs/.bikelane
|
|
1293
|
+
base_specs = specs_dir or self.specs_dir or Path("./specs")
|
|
1294
|
+
return base_specs / ".bikelane"
|
|
1295
|
+
|
|
1296
|
+
def setup_logging(self) -> None:
|
|
1297
|
+
"""Configure logging based on settings."""
|
|
1298
|
+
level = getattr(logging, self.log_level, logging.INFO)
|
|
1299
|
+
|
|
1300
|
+
if self.structured_logging:
|
|
1301
|
+
# JSON-style structured logging
|
|
1302
|
+
formatter = logging.Formatter(
|
|
1303
|
+
'{"timestamp":"%(asctime)s","level":"%(levelname)s",'
|
|
1304
|
+
'"logger":"%(name)s","message":"%(message)s"}'
|
|
1305
|
+
)
|
|
1306
|
+
else:
|
|
1307
|
+
formatter = logging.Formatter(
|
|
1308
|
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
1309
|
+
)
|
|
1310
|
+
|
|
1311
|
+
handler = logging.StreamHandler()
|
|
1312
|
+
handler.setFormatter(formatter)
|
|
1313
|
+
|
|
1314
|
+
root_logger = logging.getLogger("foundry_mcp")
|
|
1315
|
+
root_logger.setLevel(level)
|
|
1316
|
+
root_logger.addHandler(handler)
|
|
1317
|
+
|
|
1318
|
+
|
|
1319
|
+
# Global configuration instance
|
|
1320
|
+
_config: Optional[ServerConfig] = None
|
|
1321
|
+
|
|
1322
|
+
|
|
1323
|
+
def get_config() -> ServerConfig:
|
|
1324
|
+
"""Get the global configuration instance."""
|
|
1325
|
+
global _config
|
|
1326
|
+
if _config is None:
|
|
1327
|
+
_config = ServerConfig.from_env()
|
|
1328
|
+
return _config
|
|
1329
|
+
|
|
1330
|
+
|
|
1331
|
+
def set_config(config: ServerConfig) -> None:
|
|
1332
|
+
"""Set the global configuration instance."""
|
|
1333
|
+
global _config
|
|
1334
|
+
_config = config
|
|
1335
|
+
|
|
1336
|
+
|
|
1337
|
+
# Metrics and observability decorators
|
|
1338
|
+
|
|
1339
|
+
|
|
1340
|
+
def log_call(
|
|
1341
|
+
logger_name: Optional[str] = None,
|
|
1342
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
1343
|
+
"""
|
|
1344
|
+
Decorator to log function calls with structured data.
|
|
1345
|
+
|
|
1346
|
+
Args:
|
|
1347
|
+
logger_name: Optional logger name (defaults to function module)
|
|
1348
|
+
"""
|
|
1349
|
+
|
|
1350
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
1351
|
+
log = logging.getLogger(logger_name or func.__module__)
|
|
1352
|
+
|
|
1353
|
+
@functools.wraps(func)
|
|
1354
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
1355
|
+
log.debug(
|
|
1356
|
+
f"Calling {func.__name__}",
|
|
1357
|
+
extra={
|
|
1358
|
+
"function": func.__name__,
|
|
1359
|
+
"args_count": len(args),
|
|
1360
|
+
"kwargs_keys": list(kwargs.keys()),
|
|
1361
|
+
},
|
|
1362
|
+
)
|
|
1363
|
+
try:
|
|
1364
|
+
result = func(*args, **kwargs)
|
|
1365
|
+
log.debug(
|
|
1366
|
+
f"Completed {func.__name__}",
|
|
1367
|
+
extra={
|
|
1368
|
+
"function": func.__name__,
|
|
1369
|
+
"success": True,
|
|
1370
|
+
},
|
|
1371
|
+
)
|
|
1372
|
+
return result
|
|
1373
|
+
except Exception as e:
|
|
1374
|
+
log.error(
|
|
1375
|
+
f"Error in {func.__name__}: {e}",
|
|
1376
|
+
extra={
|
|
1377
|
+
"function": func.__name__,
|
|
1378
|
+
"error": str(e),
|
|
1379
|
+
"error_type": type(e).__name__,
|
|
1380
|
+
},
|
|
1381
|
+
)
|
|
1382
|
+
raise
|
|
1383
|
+
|
|
1384
|
+
return wrapper
|
|
1385
|
+
|
|
1386
|
+
return decorator
|
|
1387
|
+
|
|
1388
|
+
|
|
1389
|
+
def timed(
|
|
1390
|
+
metric_name: Optional[str] = None,
|
|
1391
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
1392
|
+
"""
|
|
1393
|
+
Decorator to measure and log function execution time.
|
|
1394
|
+
|
|
1395
|
+
Args:
|
|
1396
|
+
metric_name: Optional metric name (defaults to function name)
|
|
1397
|
+
"""
|
|
1398
|
+
|
|
1399
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
1400
|
+
name = metric_name or func.__name__
|
|
1401
|
+
log = logging.getLogger(func.__module__)
|
|
1402
|
+
|
|
1403
|
+
@functools.wraps(func)
|
|
1404
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
1405
|
+
start = time.perf_counter()
|
|
1406
|
+
try:
|
|
1407
|
+
result = func(*args, **kwargs)
|
|
1408
|
+
elapsed = time.perf_counter() - start
|
|
1409
|
+
log.info(
|
|
1410
|
+
f"Timer: {name}",
|
|
1411
|
+
extra={
|
|
1412
|
+
"metric": name,
|
|
1413
|
+
"duration_ms": round(elapsed * 1000, 2),
|
|
1414
|
+
"success": True,
|
|
1415
|
+
},
|
|
1416
|
+
)
|
|
1417
|
+
return result
|
|
1418
|
+
except Exception as e:
|
|
1419
|
+
elapsed = time.perf_counter() - start
|
|
1420
|
+
log.info(
|
|
1421
|
+
f"Timer: {name}",
|
|
1422
|
+
extra={
|
|
1423
|
+
"metric": name,
|
|
1424
|
+
"duration_ms": round(elapsed * 1000, 2),
|
|
1425
|
+
"success": False,
|
|
1426
|
+
"error": str(e),
|
|
1427
|
+
},
|
|
1428
|
+
)
|
|
1429
|
+
raise
|
|
1430
|
+
|
|
1431
|
+
return wrapper
|
|
1432
|
+
|
|
1433
|
+
return decorator
|
|
1434
|
+
|
|
1435
|
+
|
|
1436
|
+
def require_auth(func: Callable[..., T]) -> Callable[..., T]:
|
|
1437
|
+
"""
|
|
1438
|
+
Decorator to require API key authentication for a function.
|
|
1439
|
+
|
|
1440
|
+
The function must accept an 'api_key' keyword argument.
|
|
1441
|
+
Raises ValueError if authentication fails.
|
|
1442
|
+
"""
|
|
1443
|
+
|
|
1444
|
+
@functools.wraps(func)
|
|
1445
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
1446
|
+
config = get_config()
|
|
1447
|
+
api_key = kwargs.get("api_key")
|
|
1448
|
+
|
|
1449
|
+
if not config.validate_api_key(api_key):
|
|
1450
|
+
raise ValueError("Invalid or missing API key")
|
|
1451
|
+
|
|
1452
|
+
return func(*args, **kwargs)
|
|
1453
|
+
|
|
1454
|
+
return wrapper
|