foundry-mcp 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -0
- foundry_mcp/cli/__init__.py +80 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +633 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +652 -0
- foundry_mcp/cli/commands/session.py +479 -0
- foundry_mcp/cli/commands/specs.py +856 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +259 -0
- foundry_mcp/cli/flags.py +266 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +850 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1636 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/feature_flags.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/journal.py +694 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1350 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +123 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +317 -0
- foundry_mcp/core/prometheus.py +577 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +546 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
- foundry_mcp/core/prompts/plan_review.py +623 -0
- foundry_mcp/core/providers/__init__.py +225 -0
- foundry_mcp/core/providers/base.py +476 -0
- foundry_mcp/core/providers/claude.py +460 -0
- foundry_mcp/core/providers/codex.py +619 -0
- foundry_mcp/core/providers/cursor_agent.py +642 -0
- foundry_mcp/core/providers/detectors.py +488 -0
- foundry_mcp/core/providers/gemini.py +405 -0
- foundry_mcp/core/providers/opencode.py +616 -0
- foundry_mcp/core/providers/opencode_wrapper.js +302 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +729 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +934 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +1650 -0
- foundry_mcp/core/task.py +1289 -0
- foundry_mcp/core/testing.py +450 -0
- foundry_mcp/core/validation.py +2081 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +234 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +289 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +174 -0
- foundry_mcp/dashboard/views/overview.py +160 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/sdd-spec-schema.json +386 -0
- foundry_mcp/server.py +164 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +71 -0
- foundry_mcp/tools/unified/authoring.py +1487 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +198 -0
- foundry_mcp/tools/unified/environment.py +939 -0
- foundry_mcp/tools/unified/error.py +462 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +632 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +745 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +629 -0
- foundry_mcp/tools/unified/review.py +685 -0
- foundry_mcp/tools/unified/review_helpers.py +299 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +580 -0
- foundry_mcp/tools/unified/spec.py +808 -0
- foundry_mcp/tools/unified/task.py +2202 -0
- foundry_mcp/tools/unified/test.py +370 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.3.3.dist-info/METADATA +337 -0
- foundry_mcp-0.3.3.dist-info/RECORD +135 -0
- foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
- foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
foundry_mcp/config.py
ADDED
|
@@ -0,0 +1,850 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Server configuration for foundry-mcp.
|
|
3
|
+
|
|
4
|
+
Supports configuration via:
|
|
5
|
+
1. Environment variables (highest priority)
|
|
6
|
+
2. TOML config file (foundry-mcp.toml)
|
|
7
|
+
3. Default values (lowest priority)
|
|
8
|
+
|
|
9
|
+
Environment variables:
|
|
10
|
+
- FOUNDRY_MCP_WORKSPACE_ROOTS: Comma-separated list of workspace root paths
|
|
11
|
+
- FOUNDRY_MCP_SPECS_DIR: Path to specs directory
|
|
12
|
+
- FOUNDRY_MCP_JOURNALS_PATH: Path to journals directory
|
|
13
|
+
- FOUNDRY_MCP_LOG_LEVEL: Logging level (DEBUG, INFO, WARNING, ERROR)
|
|
14
|
+
- FOUNDRY_MCP_API_KEYS: Comma-separated list of valid API keys (optional)
|
|
15
|
+
- FOUNDRY_MCP_REQUIRE_AUTH: Whether to require API key authentication (true/false)
|
|
16
|
+
- FOUNDRY_MCP_CONFIG_FILE: Path to TOML config file
|
|
17
|
+
|
|
18
|
+
API Key Security:
|
|
19
|
+
- Keys should be rotated regularly (recommended: every 90 days)
|
|
20
|
+
- To revoke a key: remove it from FOUNDRY_MCP_API_KEYS and restart server
|
|
21
|
+
- Keys are validated on every tool/resource request when auth is required
|
|
22
|
+
- Use tenant-scoped keys for multi-tenant deployments (prefix with tenant ID)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import os
|
|
26
|
+
import logging
|
|
27
|
+
import functools
|
|
28
|
+
import time
|
|
29
|
+
from dataclasses import dataclass, field
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Optional, List, Dict, Any, Callable, TypeVar
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
import tomllib
|
|
35
|
+
except ImportError:
|
|
36
|
+
import tomli as tomllib # Python < 3.11 fallback
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
logger = logging.getLogger(__name__)
|
|
40
|
+
|
|
41
|
+
T = TypeVar("T")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class GitSettings:
|
|
46
|
+
"""Git workflow preferences for CLI + MCP surfaces."""
|
|
47
|
+
|
|
48
|
+
enabled: bool = False
|
|
49
|
+
auto_branch: bool = False
|
|
50
|
+
auto_commit: bool = False
|
|
51
|
+
auto_push: bool = False
|
|
52
|
+
auto_pr: bool = False
|
|
53
|
+
commit_cadence: str = "manual"
|
|
54
|
+
show_before_commit: bool = True
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@dataclass
|
|
58
|
+
class ObservabilityConfig:
|
|
59
|
+
"""Configuration for observability stack (OTel + Prometheus).
|
|
60
|
+
|
|
61
|
+
Attributes:
|
|
62
|
+
enabled: Master switch for all observability features
|
|
63
|
+
otel_enabled: Enable OpenTelemetry tracing and metrics
|
|
64
|
+
otel_endpoint: OTLP exporter endpoint
|
|
65
|
+
otel_service_name: Service name for traces
|
|
66
|
+
otel_sample_rate: Trace sampling rate (0.0 to 1.0)
|
|
67
|
+
prometheus_enabled: Enable Prometheus metrics
|
|
68
|
+
prometheus_port: HTTP server port for /metrics (0 = no server)
|
|
69
|
+
prometheus_host: HTTP server host
|
|
70
|
+
prometheus_namespace: Metric namespace prefix
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
enabled: bool = False
|
|
74
|
+
otel_enabled: bool = False
|
|
75
|
+
otel_endpoint: str = "localhost:4317"
|
|
76
|
+
otel_service_name: str = "foundry-mcp"
|
|
77
|
+
otel_sample_rate: float = 1.0
|
|
78
|
+
prometheus_enabled: bool = False
|
|
79
|
+
prometheus_port: int = 0
|
|
80
|
+
prometheus_host: str = "0.0.0.0"
|
|
81
|
+
prometheus_namespace: str = "foundry_mcp"
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ObservabilityConfig":
|
|
85
|
+
"""Create config from TOML dict (typically [observability] section).
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
data: Dict from TOML parsing
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
ObservabilityConfig instance
|
|
92
|
+
"""
|
|
93
|
+
return cls(
|
|
94
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
95
|
+
otel_enabled=_parse_bool(data.get("otel_enabled", False)),
|
|
96
|
+
otel_endpoint=str(data.get("otel_endpoint", "localhost:4317")),
|
|
97
|
+
otel_service_name=str(data.get("otel_service_name", "foundry-mcp")),
|
|
98
|
+
otel_sample_rate=float(data.get("otel_sample_rate", 1.0)),
|
|
99
|
+
prometheus_enabled=_parse_bool(data.get("prometheus_enabled", False)),
|
|
100
|
+
prometheus_port=int(data.get("prometheus_port", 0)),
|
|
101
|
+
prometheus_host=str(data.get("prometheus_host", "0.0.0.0")),
|
|
102
|
+
prometheus_namespace=str(data.get("prometheus_namespace", "foundry_mcp")),
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@dataclass
|
|
107
|
+
class HealthConfig:
|
|
108
|
+
"""Configuration for health checks and probes.
|
|
109
|
+
|
|
110
|
+
Attributes:
|
|
111
|
+
enabled: Whether health checks are enabled
|
|
112
|
+
liveness_timeout: Timeout for liveness checks (seconds)
|
|
113
|
+
readiness_timeout: Timeout for readiness checks (seconds)
|
|
114
|
+
health_timeout: Timeout for full health checks (seconds)
|
|
115
|
+
disk_space_threshold_mb: Minimum disk space (MB) before unhealthy
|
|
116
|
+
disk_space_warning_mb: Minimum disk space (MB) before degraded
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
enabled: bool = True
|
|
120
|
+
liveness_timeout: float = 1.0
|
|
121
|
+
readiness_timeout: float = 5.0
|
|
122
|
+
health_timeout: float = 10.0
|
|
123
|
+
disk_space_threshold_mb: int = 100
|
|
124
|
+
disk_space_warning_mb: int = 500
|
|
125
|
+
|
|
126
|
+
@classmethod
|
|
127
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "HealthConfig":
|
|
128
|
+
"""Create config from TOML dict (typically [health] section).
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
data: Dict from TOML parsing
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
HealthConfig instance
|
|
135
|
+
"""
|
|
136
|
+
return cls(
|
|
137
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
138
|
+
liveness_timeout=float(data.get("liveness_timeout", 1.0)),
|
|
139
|
+
readiness_timeout=float(data.get("readiness_timeout", 5.0)),
|
|
140
|
+
health_timeout=float(data.get("health_timeout", 10.0)),
|
|
141
|
+
disk_space_threshold_mb=int(data.get("disk_space_threshold_mb", 100)),
|
|
142
|
+
disk_space_warning_mb=int(data.get("disk_space_warning_mb", 500)),
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@dataclass
|
|
147
|
+
class ErrorCollectionConfig:
|
|
148
|
+
"""Configuration for error data collection infrastructure.
|
|
149
|
+
|
|
150
|
+
Attributes:
|
|
151
|
+
enabled: Whether error collection is enabled
|
|
152
|
+
storage_path: Directory path for error storage (default: .cache/foundry-mcp/errors)
|
|
153
|
+
retention_days: Delete records older than this many days
|
|
154
|
+
max_errors: Maximum number of error records to keep
|
|
155
|
+
include_stack_traces: Whether to include stack traces in error records
|
|
156
|
+
redact_inputs: Whether to redact sensitive data from input parameters
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
enabled: bool = True
|
|
160
|
+
storage_path: str = "" # Empty string means use default
|
|
161
|
+
retention_days: int = 30
|
|
162
|
+
max_errors: int = 10000
|
|
163
|
+
include_stack_traces: bool = True
|
|
164
|
+
redact_inputs: bool = True
|
|
165
|
+
|
|
166
|
+
@classmethod
|
|
167
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "ErrorCollectionConfig":
|
|
168
|
+
"""Create config from TOML dict (typically [error_collection] section).
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
data: Dict from TOML parsing
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
ErrorCollectionConfig instance
|
|
175
|
+
"""
|
|
176
|
+
return cls(
|
|
177
|
+
enabled=_parse_bool(data.get("enabled", True)),
|
|
178
|
+
storage_path=str(data.get("storage_path", "")),
|
|
179
|
+
retention_days=int(data.get("retention_days", 30)),
|
|
180
|
+
max_errors=int(data.get("max_errors", 10000)),
|
|
181
|
+
include_stack_traces=_parse_bool(data.get("include_stack_traces", True)),
|
|
182
|
+
redact_inputs=_parse_bool(data.get("redact_inputs", True)),
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def get_storage_path(self) -> Path:
|
|
186
|
+
"""Get the resolved storage path.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Path to error storage directory
|
|
190
|
+
"""
|
|
191
|
+
if self.storage_path:
|
|
192
|
+
return Path(self.storage_path).expanduser()
|
|
193
|
+
return Path.home() / ".cache" / "foundry-mcp" / "errors"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
@dataclass
|
|
197
|
+
class MetricsPersistenceConfig:
|
|
198
|
+
"""Configuration for metrics persistence infrastructure.
|
|
199
|
+
|
|
200
|
+
Persists time-series metrics to disk so they survive server restarts.
|
|
201
|
+
Metrics are aggregated into time buckets before storage to reduce
|
|
202
|
+
disk usage while maintaining useful historical data.
|
|
203
|
+
|
|
204
|
+
Attributes:
|
|
205
|
+
enabled: Whether metrics persistence is enabled
|
|
206
|
+
storage_path: Directory path for metrics storage (default: .cache/foundry-mcp/metrics)
|
|
207
|
+
retention_days: Delete records older than this many days
|
|
208
|
+
max_records: Maximum number of metric data points to keep
|
|
209
|
+
bucket_interval_seconds: Aggregation bucket interval (default: 60s = 1 minute)
|
|
210
|
+
flush_interval_seconds: How often to flush buffer to disk (default: 30s)
|
|
211
|
+
persist_metrics: List of metric names to persist (empty = persist all)
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
enabled: bool = False
|
|
215
|
+
storage_path: str = "" # Empty string means use default
|
|
216
|
+
retention_days: int = 7
|
|
217
|
+
max_records: int = 100000
|
|
218
|
+
bucket_interval_seconds: int = 60
|
|
219
|
+
flush_interval_seconds: int = 30
|
|
220
|
+
persist_metrics: List[str] = field(default_factory=lambda: [
|
|
221
|
+
"tool_invocations_total",
|
|
222
|
+
"tool_duration_seconds",
|
|
223
|
+
"tool_errors_total",
|
|
224
|
+
"health_status",
|
|
225
|
+
])
|
|
226
|
+
|
|
227
|
+
@classmethod
|
|
228
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "MetricsPersistenceConfig":
|
|
229
|
+
"""Create config from TOML dict (typically [metrics_persistence] section).
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
data: Dict from TOML parsing
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
MetricsPersistenceConfig instance
|
|
236
|
+
"""
|
|
237
|
+
persist_metrics = data.get("persist_metrics", [
|
|
238
|
+
"tool_invocations_total",
|
|
239
|
+
"tool_duration_seconds",
|
|
240
|
+
"tool_errors_total",
|
|
241
|
+
"health_status",
|
|
242
|
+
])
|
|
243
|
+
# Handle both list and comma-separated string
|
|
244
|
+
if isinstance(persist_metrics, str):
|
|
245
|
+
persist_metrics = [m.strip() for m in persist_metrics.split(",") if m.strip()]
|
|
246
|
+
|
|
247
|
+
return cls(
|
|
248
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
249
|
+
storage_path=str(data.get("storage_path", "")),
|
|
250
|
+
retention_days=int(data.get("retention_days", 7)),
|
|
251
|
+
max_records=int(data.get("max_records", 100000)),
|
|
252
|
+
bucket_interval_seconds=int(data.get("bucket_interval_seconds", 60)),
|
|
253
|
+
flush_interval_seconds=int(data.get("flush_interval_seconds", 30)),
|
|
254
|
+
persist_metrics=persist_metrics,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
def get_storage_path(self) -> Path:
|
|
258
|
+
"""Get the resolved storage path.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Path to metrics storage directory
|
|
262
|
+
"""
|
|
263
|
+
if self.storage_path:
|
|
264
|
+
return Path(self.storage_path).expanduser()
|
|
265
|
+
return Path.home() / ".cache" / "foundry-mcp" / "metrics"
|
|
266
|
+
|
|
267
|
+
def should_persist_metric(self, metric_name: str) -> bool:
|
|
268
|
+
"""Check if a metric should be persisted.
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
metric_name: Name of the metric
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
True if the metric should be persisted
|
|
275
|
+
"""
|
|
276
|
+
# Empty list means persist all metrics
|
|
277
|
+
if not self.persist_metrics:
|
|
278
|
+
return True
|
|
279
|
+
return metric_name in self.persist_metrics
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
@dataclass
|
|
283
|
+
class DashboardConfig:
|
|
284
|
+
"""Configuration for built-in web dashboard.
|
|
285
|
+
|
|
286
|
+
The dashboard provides a web UI for viewing errors, metrics, and
|
|
287
|
+
AI provider status without requiring external tools like Grafana.
|
|
288
|
+
|
|
289
|
+
Attributes:
|
|
290
|
+
enabled: Whether the dashboard server is enabled
|
|
291
|
+
port: HTTP port for dashboard (default: 8080)
|
|
292
|
+
host: Host to bind to (default: 127.0.0.1 for localhost only)
|
|
293
|
+
auto_open_browser: Open browser when dashboard starts
|
|
294
|
+
refresh_interval_ms: Auto-refresh interval in milliseconds
|
|
295
|
+
"""
|
|
296
|
+
|
|
297
|
+
enabled: bool = False
|
|
298
|
+
port: int = 8501 # Streamlit default port
|
|
299
|
+
host: str = "127.0.0.1"
|
|
300
|
+
auto_open_browser: bool = False
|
|
301
|
+
refresh_interval_ms: int = 5000
|
|
302
|
+
|
|
303
|
+
@classmethod
|
|
304
|
+
def from_toml_dict(cls, data: Dict[str, Any]) -> "DashboardConfig":
|
|
305
|
+
"""Create config from TOML dict (typically [dashboard] section).
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
data: Dict from TOML parsing
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
DashboardConfig instance
|
|
312
|
+
"""
|
|
313
|
+
return cls(
|
|
314
|
+
enabled=_parse_bool(data.get("enabled", False)),
|
|
315
|
+
port=int(data.get("port", 8501)), # Streamlit default
|
|
316
|
+
host=str(data.get("host", "127.0.0.1")),
|
|
317
|
+
auto_open_browser=_parse_bool(data.get("auto_open_browser", False)),
|
|
318
|
+
refresh_interval_ms=int(data.get("refresh_interval_ms", 5000)),
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
_VALID_COMMIT_CADENCE = {"manual", "task", "phase"}
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def _normalize_commit_cadence(value: str) -> str:
|
|
326
|
+
normalized = value.strip().lower()
|
|
327
|
+
if normalized not in _VALID_COMMIT_CADENCE:
|
|
328
|
+
logger.warning(
|
|
329
|
+
"Invalid commit cadence '%s'. Falling back to 'manual'. Valid options: %s",
|
|
330
|
+
value,
|
|
331
|
+
", ".join(sorted(_VALID_COMMIT_CADENCE)),
|
|
332
|
+
)
|
|
333
|
+
return "manual"
|
|
334
|
+
return normalized
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def _parse_bool(value: Any) -> bool:
|
|
338
|
+
if isinstance(value, bool):
|
|
339
|
+
return value
|
|
340
|
+
return str(value).strip().lower() in {"true", "1", "yes", "on"}
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
@dataclass
|
|
344
|
+
class ServerConfig:
|
|
345
|
+
"""Server configuration with support for env vars and TOML overrides."""
|
|
346
|
+
|
|
347
|
+
# Workspace configuration
|
|
348
|
+
workspace_roots: List[Path] = field(default_factory=list)
|
|
349
|
+
specs_dir: Optional[Path] = None
|
|
350
|
+
journals_path: Optional[Path] = None
|
|
351
|
+
|
|
352
|
+
# Logging configuration
|
|
353
|
+
log_level: str = "INFO"
|
|
354
|
+
structured_logging: bool = True
|
|
355
|
+
|
|
356
|
+
# Authentication configuration
|
|
357
|
+
api_keys: List[str] = field(default_factory=list)
|
|
358
|
+
require_auth: bool = False
|
|
359
|
+
|
|
360
|
+
# Server configuration
|
|
361
|
+
server_name: str = "foundry-mcp"
|
|
362
|
+
server_version: str = "0.1.0"
|
|
363
|
+
|
|
364
|
+
# Git workflow configuration
|
|
365
|
+
git: GitSettings = field(default_factory=GitSettings)
|
|
366
|
+
|
|
367
|
+
# Observability configuration
|
|
368
|
+
observability: ObservabilityConfig = field(default_factory=ObservabilityConfig)
|
|
369
|
+
|
|
370
|
+
# Health check configuration
|
|
371
|
+
health: HealthConfig = field(default_factory=HealthConfig)
|
|
372
|
+
|
|
373
|
+
# Error collection configuration
|
|
374
|
+
error_collection: ErrorCollectionConfig = field(default_factory=ErrorCollectionConfig)
|
|
375
|
+
|
|
376
|
+
# Metrics persistence configuration
|
|
377
|
+
metrics_persistence: MetricsPersistenceConfig = field(default_factory=MetricsPersistenceConfig)
|
|
378
|
+
|
|
379
|
+
# Dashboard configuration
|
|
380
|
+
dashboard: DashboardConfig = field(default_factory=DashboardConfig)
|
|
381
|
+
|
|
382
|
+
@classmethod
|
|
383
|
+
def from_env(cls, config_file: Optional[str] = None) -> "ServerConfig":
|
|
384
|
+
"""
|
|
385
|
+
Create configuration from environment variables and optional TOML file.
|
|
386
|
+
|
|
387
|
+
Priority (highest to lowest):
|
|
388
|
+
1. Environment variables
|
|
389
|
+
2. TOML config file
|
|
390
|
+
3. Default values
|
|
391
|
+
"""
|
|
392
|
+
config = cls()
|
|
393
|
+
|
|
394
|
+
# Load TOML config if available
|
|
395
|
+
toml_path = config_file or os.environ.get("FOUNDRY_MCP_CONFIG_FILE")
|
|
396
|
+
if toml_path:
|
|
397
|
+
config._load_toml(Path(toml_path))
|
|
398
|
+
else:
|
|
399
|
+
# Try default locations
|
|
400
|
+
for default_path in ["foundry-mcp.toml", ".foundry-mcp.toml"]:
|
|
401
|
+
if Path(default_path).exists():
|
|
402
|
+
config._load_toml(Path(default_path))
|
|
403
|
+
break
|
|
404
|
+
|
|
405
|
+
# Override with environment variables
|
|
406
|
+
config._load_env()
|
|
407
|
+
|
|
408
|
+
return config
|
|
409
|
+
|
|
410
|
+
def _load_toml(self, path: Path) -> None:
|
|
411
|
+
"""Load configuration from TOML file."""
|
|
412
|
+
if not path.exists():
|
|
413
|
+
logger.warning(f"Config file not found: {path}")
|
|
414
|
+
return
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
with open(path, "rb") as f:
|
|
418
|
+
data = tomllib.load(f)
|
|
419
|
+
|
|
420
|
+
# Workspace settings
|
|
421
|
+
if "workspace" in data:
|
|
422
|
+
ws = data["workspace"]
|
|
423
|
+
if "roots" in ws:
|
|
424
|
+
self.workspace_roots = [Path(p) for p in ws["roots"]]
|
|
425
|
+
if "specs_dir" in ws:
|
|
426
|
+
self.specs_dir = Path(ws["specs_dir"])
|
|
427
|
+
if "journals_path" in ws:
|
|
428
|
+
self.journals_path = Path(ws["journals_path"])
|
|
429
|
+
|
|
430
|
+
# Logging settings
|
|
431
|
+
if "logging" in data:
|
|
432
|
+
log = data["logging"]
|
|
433
|
+
if "level" in log:
|
|
434
|
+
self.log_level = log["level"].upper()
|
|
435
|
+
if "structured" in log:
|
|
436
|
+
self.structured_logging = log["structured"]
|
|
437
|
+
|
|
438
|
+
# Auth settings
|
|
439
|
+
if "auth" in data:
|
|
440
|
+
auth = data["auth"]
|
|
441
|
+
if "api_keys" in auth:
|
|
442
|
+
self.api_keys = auth["api_keys"]
|
|
443
|
+
if "require_auth" in auth:
|
|
444
|
+
self.require_auth = auth["require_auth"]
|
|
445
|
+
|
|
446
|
+
# Server settings
|
|
447
|
+
if "server" in data:
|
|
448
|
+
srv = data["server"]
|
|
449
|
+
if "name" in srv:
|
|
450
|
+
self.server_name = srv["name"]
|
|
451
|
+
if "version" in srv:
|
|
452
|
+
self.server_version = srv["version"]
|
|
453
|
+
|
|
454
|
+
# Git workflow settings
|
|
455
|
+
if "git" in data:
|
|
456
|
+
git_cfg = data["git"]
|
|
457
|
+
if "enabled" in git_cfg:
|
|
458
|
+
self.git.enabled = _parse_bool(git_cfg["enabled"])
|
|
459
|
+
if "auto_branch" in git_cfg:
|
|
460
|
+
self.git.auto_branch = _parse_bool(git_cfg["auto_branch"])
|
|
461
|
+
if "auto_commit" in git_cfg:
|
|
462
|
+
self.git.auto_commit = _parse_bool(git_cfg["auto_commit"])
|
|
463
|
+
if "auto_push" in git_cfg:
|
|
464
|
+
self.git.auto_push = _parse_bool(git_cfg["auto_push"])
|
|
465
|
+
if "auto_pr" in git_cfg:
|
|
466
|
+
self.git.auto_pr = _parse_bool(git_cfg["auto_pr"])
|
|
467
|
+
if "show_before_commit" in git_cfg:
|
|
468
|
+
self.git.show_before_commit = _parse_bool(
|
|
469
|
+
git_cfg["show_before_commit"]
|
|
470
|
+
)
|
|
471
|
+
if "commit_cadence" in git_cfg:
|
|
472
|
+
self.git.commit_cadence = _normalize_commit_cadence(
|
|
473
|
+
str(git_cfg["commit_cadence"])
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
# Observability settings
|
|
477
|
+
if "observability" in data:
|
|
478
|
+
self.observability = ObservabilityConfig.from_toml_dict(
|
|
479
|
+
data["observability"]
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
# Health check settings
|
|
483
|
+
if "health" in data:
|
|
484
|
+
self.health = HealthConfig.from_toml_dict(data["health"])
|
|
485
|
+
|
|
486
|
+
# Error collection settings
|
|
487
|
+
if "error_collection" in data:
|
|
488
|
+
self.error_collection = ErrorCollectionConfig.from_toml_dict(
|
|
489
|
+
data["error_collection"]
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
# Metrics persistence settings
|
|
493
|
+
if "metrics_persistence" in data:
|
|
494
|
+
self.metrics_persistence = MetricsPersistenceConfig.from_toml_dict(
|
|
495
|
+
data["metrics_persistence"]
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
# Dashboard settings
|
|
499
|
+
if "dashboard" in data:
|
|
500
|
+
self.dashboard = DashboardConfig.from_toml_dict(data["dashboard"])
|
|
501
|
+
|
|
502
|
+
except Exception as e:
|
|
503
|
+
logger.error(f"Error loading config file {path}: {e}")
|
|
504
|
+
|
|
505
|
+
def _load_env(self) -> None:
|
|
506
|
+
"""Load configuration from environment variables."""
|
|
507
|
+
# Workspace roots
|
|
508
|
+
if roots := os.environ.get("FOUNDRY_MCP_WORKSPACE_ROOTS"):
|
|
509
|
+
self.workspace_roots = [Path(p.strip()) for p in roots.split(",")]
|
|
510
|
+
|
|
511
|
+
# Specs directory
|
|
512
|
+
if specs := os.environ.get("FOUNDRY_MCP_SPECS_DIR"):
|
|
513
|
+
self.specs_dir = Path(specs)
|
|
514
|
+
|
|
515
|
+
# Journals path
|
|
516
|
+
if journals := os.environ.get("FOUNDRY_MCP_JOURNALS_PATH"):
|
|
517
|
+
self.journals_path = Path(journals)
|
|
518
|
+
|
|
519
|
+
# Log level
|
|
520
|
+
if level := os.environ.get("FOUNDRY_MCP_LOG_LEVEL"):
|
|
521
|
+
self.log_level = level.upper()
|
|
522
|
+
|
|
523
|
+
# API keys
|
|
524
|
+
if keys := os.environ.get("FOUNDRY_MCP_API_KEYS"):
|
|
525
|
+
self.api_keys = [k.strip() for k in keys.split(",") if k.strip()]
|
|
526
|
+
|
|
527
|
+
# Require auth
|
|
528
|
+
if require := os.environ.get("FOUNDRY_MCP_REQUIRE_AUTH"):
|
|
529
|
+
self.require_auth = require.lower() in ("true", "1", "yes")
|
|
530
|
+
|
|
531
|
+
# Git settings
|
|
532
|
+
if git_enabled := os.environ.get("FOUNDRY_MCP_GIT_ENABLED"):
|
|
533
|
+
self.git.enabled = _parse_bool(git_enabled)
|
|
534
|
+
if git_auto_branch := os.environ.get("FOUNDRY_MCP_GIT_AUTO_BRANCH"):
|
|
535
|
+
self.git.auto_branch = _parse_bool(git_auto_branch)
|
|
536
|
+
if git_auto_commit := os.environ.get("FOUNDRY_MCP_GIT_AUTO_COMMIT"):
|
|
537
|
+
self.git.auto_commit = _parse_bool(git_auto_commit)
|
|
538
|
+
if git_auto_push := os.environ.get("FOUNDRY_MCP_GIT_AUTO_PUSH"):
|
|
539
|
+
self.git.auto_push = _parse_bool(git_auto_push)
|
|
540
|
+
if git_auto_pr := os.environ.get("FOUNDRY_MCP_GIT_AUTO_PR"):
|
|
541
|
+
self.git.auto_pr = _parse_bool(git_auto_pr)
|
|
542
|
+
if git_show_preview := os.environ.get("FOUNDRY_MCP_GIT_SHOW_PREVIEW"):
|
|
543
|
+
self.git.show_before_commit = _parse_bool(git_show_preview)
|
|
544
|
+
if git_cadence := os.environ.get("FOUNDRY_MCP_GIT_COMMIT_CADENCE"):
|
|
545
|
+
self.git.commit_cadence = _normalize_commit_cadence(git_cadence)
|
|
546
|
+
|
|
547
|
+
# Observability settings
|
|
548
|
+
if obs_enabled := os.environ.get("FOUNDRY_MCP_OBSERVABILITY_ENABLED"):
|
|
549
|
+
self.observability.enabled = _parse_bool(obs_enabled)
|
|
550
|
+
if otel_enabled := os.environ.get("FOUNDRY_MCP_OTEL_ENABLED"):
|
|
551
|
+
self.observability.otel_enabled = _parse_bool(otel_enabled)
|
|
552
|
+
if otel_endpoint := os.environ.get("FOUNDRY_MCP_OTEL_ENDPOINT"):
|
|
553
|
+
self.observability.otel_endpoint = otel_endpoint
|
|
554
|
+
if otel_service := os.environ.get("FOUNDRY_MCP_OTEL_SERVICE_NAME"):
|
|
555
|
+
self.observability.otel_service_name = otel_service
|
|
556
|
+
if otel_sample := os.environ.get("FOUNDRY_MCP_OTEL_SAMPLE_RATE"):
|
|
557
|
+
try:
|
|
558
|
+
self.observability.otel_sample_rate = float(otel_sample)
|
|
559
|
+
except ValueError:
|
|
560
|
+
pass
|
|
561
|
+
if prom_enabled := os.environ.get("FOUNDRY_MCP_PROMETHEUS_ENABLED"):
|
|
562
|
+
self.observability.prometheus_enabled = _parse_bool(prom_enabled)
|
|
563
|
+
if prom_port := os.environ.get("FOUNDRY_MCP_PROMETHEUS_PORT"):
|
|
564
|
+
try:
|
|
565
|
+
self.observability.prometheus_port = int(prom_port)
|
|
566
|
+
except ValueError:
|
|
567
|
+
pass
|
|
568
|
+
if prom_host := os.environ.get("FOUNDRY_MCP_PROMETHEUS_HOST"):
|
|
569
|
+
self.observability.prometheus_host = prom_host
|
|
570
|
+
if prom_ns := os.environ.get("FOUNDRY_MCP_PROMETHEUS_NAMESPACE"):
|
|
571
|
+
self.observability.prometheus_namespace = prom_ns
|
|
572
|
+
|
|
573
|
+
# Health check settings
|
|
574
|
+
if health_enabled := os.environ.get("FOUNDRY_MCP_HEALTH_ENABLED"):
|
|
575
|
+
self.health.enabled = _parse_bool(health_enabled)
|
|
576
|
+
if health_liveness_timeout := os.environ.get(
|
|
577
|
+
"FOUNDRY_MCP_HEALTH_LIVENESS_TIMEOUT"
|
|
578
|
+
):
|
|
579
|
+
try:
|
|
580
|
+
self.health.liveness_timeout = float(health_liveness_timeout)
|
|
581
|
+
except ValueError:
|
|
582
|
+
pass
|
|
583
|
+
if health_readiness_timeout := os.environ.get(
|
|
584
|
+
"FOUNDRY_MCP_HEALTH_READINESS_TIMEOUT"
|
|
585
|
+
):
|
|
586
|
+
try:
|
|
587
|
+
self.health.readiness_timeout = float(health_readiness_timeout)
|
|
588
|
+
except ValueError:
|
|
589
|
+
pass
|
|
590
|
+
if health_timeout := os.environ.get("FOUNDRY_MCP_HEALTH_TIMEOUT"):
|
|
591
|
+
try:
|
|
592
|
+
self.health.health_timeout = float(health_timeout)
|
|
593
|
+
except ValueError:
|
|
594
|
+
pass
|
|
595
|
+
if disk_threshold := os.environ.get("FOUNDRY_MCP_DISK_SPACE_THRESHOLD_MB"):
|
|
596
|
+
try:
|
|
597
|
+
self.health.disk_space_threshold_mb = int(disk_threshold)
|
|
598
|
+
except ValueError:
|
|
599
|
+
pass
|
|
600
|
+
if disk_warning := os.environ.get("FOUNDRY_MCP_DISK_SPACE_WARNING_MB"):
|
|
601
|
+
try:
|
|
602
|
+
self.health.disk_space_warning_mb = int(disk_warning)
|
|
603
|
+
except ValueError:
|
|
604
|
+
pass
|
|
605
|
+
|
|
606
|
+
# Error collection settings
|
|
607
|
+
if err_enabled := os.environ.get("FOUNDRY_MCP_ERROR_COLLECTION_ENABLED"):
|
|
608
|
+
self.error_collection.enabled = _parse_bool(err_enabled)
|
|
609
|
+
if err_storage := os.environ.get("FOUNDRY_MCP_ERROR_STORAGE_PATH"):
|
|
610
|
+
self.error_collection.storage_path = err_storage
|
|
611
|
+
if err_retention := os.environ.get("FOUNDRY_MCP_ERROR_RETENTION_DAYS"):
|
|
612
|
+
try:
|
|
613
|
+
self.error_collection.retention_days = int(err_retention)
|
|
614
|
+
except ValueError:
|
|
615
|
+
pass
|
|
616
|
+
if err_max := os.environ.get("FOUNDRY_MCP_ERROR_MAX_ERRORS"):
|
|
617
|
+
try:
|
|
618
|
+
self.error_collection.max_errors = int(err_max)
|
|
619
|
+
except ValueError:
|
|
620
|
+
pass
|
|
621
|
+
if err_stack := os.environ.get("FOUNDRY_MCP_ERROR_INCLUDE_STACK_TRACES"):
|
|
622
|
+
self.error_collection.include_stack_traces = _parse_bool(err_stack)
|
|
623
|
+
if err_redact := os.environ.get("FOUNDRY_MCP_ERROR_REDACT_INPUTS"):
|
|
624
|
+
self.error_collection.redact_inputs = _parse_bool(err_redact)
|
|
625
|
+
|
|
626
|
+
# Metrics persistence settings
|
|
627
|
+
if metrics_enabled := os.environ.get("FOUNDRY_MCP_METRICS_PERSISTENCE_ENABLED"):
|
|
628
|
+
self.metrics_persistence.enabled = _parse_bool(metrics_enabled)
|
|
629
|
+
if metrics_storage := os.environ.get("FOUNDRY_MCP_METRICS_STORAGE_PATH"):
|
|
630
|
+
self.metrics_persistence.storage_path = metrics_storage
|
|
631
|
+
if metrics_retention := os.environ.get("FOUNDRY_MCP_METRICS_RETENTION_DAYS"):
|
|
632
|
+
try:
|
|
633
|
+
self.metrics_persistence.retention_days = int(metrics_retention)
|
|
634
|
+
except ValueError:
|
|
635
|
+
pass
|
|
636
|
+
if metrics_max := os.environ.get("FOUNDRY_MCP_METRICS_MAX_RECORDS"):
|
|
637
|
+
try:
|
|
638
|
+
self.metrics_persistence.max_records = int(metrics_max)
|
|
639
|
+
except ValueError:
|
|
640
|
+
pass
|
|
641
|
+
if metrics_bucket := os.environ.get("FOUNDRY_MCP_METRICS_BUCKET_INTERVAL"):
|
|
642
|
+
try:
|
|
643
|
+
self.metrics_persistence.bucket_interval_seconds = int(metrics_bucket)
|
|
644
|
+
except ValueError:
|
|
645
|
+
pass
|
|
646
|
+
if metrics_flush := os.environ.get("FOUNDRY_MCP_METRICS_FLUSH_INTERVAL"):
|
|
647
|
+
try:
|
|
648
|
+
self.metrics_persistence.flush_interval_seconds = int(metrics_flush)
|
|
649
|
+
except ValueError:
|
|
650
|
+
pass
|
|
651
|
+
if persist_list := os.environ.get("FOUNDRY_MCP_METRICS_PERSIST_METRICS"):
|
|
652
|
+
self.metrics_persistence.persist_metrics = [
|
|
653
|
+
m.strip() for m in persist_list.split(",") if m.strip()
|
|
654
|
+
]
|
|
655
|
+
|
|
656
|
+
# Dashboard settings
|
|
657
|
+
if dash_enabled := os.environ.get("FOUNDRY_MCP_DASHBOARD_ENABLED"):
|
|
658
|
+
self.dashboard.enabled = _parse_bool(dash_enabled)
|
|
659
|
+
if dash_port := os.environ.get("FOUNDRY_MCP_DASHBOARD_PORT"):
|
|
660
|
+
try:
|
|
661
|
+
self.dashboard.port = int(dash_port)
|
|
662
|
+
except ValueError:
|
|
663
|
+
pass
|
|
664
|
+
if dash_host := os.environ.get("FOUNDRY_MCP_DASHBOARD_HOST"):
|
|
665
|
+
self.dashboard.host = dash_host
|
|
666
|
+
if dash_auto_open := os.environ.get("FOUNDRY_MCP_DASHBOARD_AUTO_OPEN"):
|
|
667
|
+
self.dashboard.auto_open_browser = _parse_bool(dash_auto_open)
|
|
668
|
+
if dash_refresh := os.environ.get("FOUNDRY_MCP_DASHBOARD_REFRESH_INTERVAL"):
|
|
669
|
+
try:
|
|
670
|
+
self.dashboard.refresh_interval_ms = int(dash_refresh)
|
|
671
|
+
except ValueError:
|
|
672
|
+
pass
|
|
673
|
+
|
|
674
|
+
def validate_api_key(self, key: Optional[str]) -> bool:
|
|
675
|
+
"""
|
|
676
|
+
Validate an API key.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
key: API key to validate
|
|
680
|
+
|
|
681
|
+
Returns:
|
|
682
|
+
True if valid (or auth not required), False otherwise
|
|
683
|
+
"""
|
|
684
|
+
if not self.require_auth:
|
|
685
|
+
return True
|
|
686
|
+
|
|
687
|
+
if not key:
|
|
688
|
+
return False
|
|
689
|
+
|
|
690
|
+
return key in self.api_keys
|
|
691
|
+
|
|
692
|
+
def setup_logging(self) -> None:
|
|
693
|
+
"""Configure logging based on settings."""
|
|
694
|
+
level = getattr(logging, self.log_level, logging.INFO)
|
|
695
|
+
|
|
696
|
+
if self.structured_logging:
|
|
697
|
+
# JSON-style structured logging
|
|
698
|
+
formatter = logging.Formatter(
|
|
699
|
+
'{"timestamp":"%(asctime)s","level":"%(levelname)s",'
|
|
700
|
+
'"logger":"%(name)s","message":"%(message)s"}'
|
|
701
|
+
)
|
|
702
|
+
else:
|
|
703
|
+
formatter = logging.Formatter(
|
|
704
|
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
handler = logging.StreamHandler()
|
|
708
|
+
handler.setFormatter(formatter)
|
|
709
|
+
|
|
710
|
+
root_logger = logging.getLogger("foundry_mcp")
|
|
711
|
+
root_logger.setLevel(level)
|
|
712
|
+
root_logger.addHandler(handler)
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
# Global configuration instance
|
|
716
|
+
_config: Optional[ServerConfig] = None
|
|
717
|
+
|
|
718
|
+
|
|
719
|
+
def get_config() -> ServerConfig:
|
|
720
|
+
"""Get the global configuration instance."""
|
|
721
|
+
global _config
|
|
722
|
+
if _config is None:
|
|
723
|
+
_config = ServerConfig.from_env()
|
|
724
|
+
return _config
|
|
725
|
+
|
|
726
|
+
|
|
727
|
+
def set_config(config: ServerConfig) -> None:
|
|
728
|
+
"""Set the global configuration instance."""
|
|
729
|
+
global _config
|
|
730
|
+
_config = config
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
# Metrics and observability decorators
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
def log_call(
|
|
737
|
+
logger_name: Optional[str] = None,
|
|
738
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
739
|
+
"""
|
|
740
|
+
Decorator to log function calls with structured data.
|
|
741
|
+
|
|
742
|
+
Args:
|
|
743
|
+
logger_name: Optional logger name (defaults to function module)
|
|
744
|
+
"""
|
|
745
|
+
|
|
746
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
747
|
+
log = logging.getLogger(logger_name or func.__module__)
|
|
748
|
+
|
|
749
|
+
@functools.wraps(func)
|
|
750
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
751
|
+
log.debug(
|
|
752
|
+
f"Calling {func.__name__}",
|
|
753
|
+
extra={
|
|
754
|
+
"function": func.__name__,
|
|
755
|
+
"args_count": len(args),
|
|
756
|
+
"kwargs_keys": list(kwargs.keys()),
|
|
757
|
+
},
|
|
758
|
+
)
|
|
759
|
+
try:
|
|
760
|
+
result = func(*args, **kwargs)
|
|
761
|
+
log.debug(
|
|
762
|
+
f"Completed {func.__name__}",
|
|
763
|
+
extra={
|
|
764
|
+
"function": func.__name__,
|
|
765
|
+
"success": True,
|
|
766
|
+
},
|
|
767
|
+
)
|
|
768
|
+
return result
|
|
769
|
+
except Exception as e:
|
|
770
|
+
log.error(
|
|
771
|
+
f"Error in {func.__name__}: {e}",
|
|
772
|
+
extra={
|
|
773
|
+
"function": func.__name__,
|
|
774
|
+
"error": str(e),
|
|
775
|
+
"error_type": type(e).__name__,
|
|
776
|
+
},
|
|
777
|
+
)
|
|
778
|
+
raise
|
|
779
|
+
|
|
780
|
+
return wrapper
|
|
781
|
+
|
|
782
|
+
return decorator
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def timed(
|
|
786
|
+
metric_name: Optional[str] = None,
|
|
787
|
+
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
788
|
+
"""
|
|
789
|
+
Decorator to measure and log function execution time.
|
|
790
|
+
|
|
791
|
+
Args:
|
|
792
|
+
metric_name: Optional metric name (defaults to function name)
|
|
793
|
+
"""
|
|
794
|
+
|
|
795
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
796
|
+
name = metric_name or func.__name__
|
|
797
|
+
log = logging.getLogger(func.__module__)
|
|
798
|
+
|
|
799
|
+
@functools.wraps(func)
|
|
800
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
801
|
+
start = time.perf_counter()
|
|
802
|
+
try:
|
|
803
|
+
result = func(*args, **kwargs)
|
|
804
|
+
elapsed = time.perf_counter() - start
|
|
805
|
+
log.info(
|
|
806
|
+
f"Timer: {name}",
|
|
807
|
+
extra={
|
|
808
|
+
"metric": name,
|
|
809
|
+
"duration_ms": round(elapsed * 1000, 2),
|
|
810
|
+
"success": True,
|
|
811
|
+
},
|
|
812
|
+
)
|
|
813
|
+
return result
|
|
814
|
+
except Exception as e:
|
|
815
|
+
elapsed = time.perf_counter() - start
|
|
816
|
+
log.info(
|
|
817
|
+
f"Timer: {name}",
|
|
818
|
+
extra={
|
|
819
|
+
"metric": name,
|
|
820
|
+
"duration_ms": round(elapsed * 1000, 2),
|
|
821
|
+
"success": False,
|
|
822
|
+
"error": str(e),
|
|
823
|
+
},
|
|
824
|
+
)
|
|
825
|
+
raise
|
|
826
|
+
|
|
827
|
+
return wrapper
|
|
828
|
+
|
|
829
|
+
return decorator
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
def require_auth(func: Callable[..., T]) -> Callable[..., T]:
|
|
833
|
+
"""
|
|
834
|
+
Decorator to require API key authentication for a function.
|
|
835
|
+
|
|
836
|
+
The function must accept an 'api_key' keyword argument.
|
|
837
|
+
Raises ValueError if authentication fails.
|
|
838
|
+
"""
|
|
839
|
+
|
|
840
|
+
@functools.wraps(func)
|
|
841
|
+
def wrapper(*args: Any, **kwargs: Any) -> T:
|
|
842
|
+
config = get_config()
|
|
843
|
+
api_key = kwargs.get("api_key")
|
|
844
|
+
|
|
845
|
+
if not config.validate_api_key(api_key):
|
|
846
|
+
raise ValueError("Invalid or missing API key")
|
|
847
|
+
|
|
848
|
+
return func(*args, **kwargs)
|
|
849
|
+
|
|
850
|
+
return wrapper
|