foundry-mcp 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. foundry_mcp/__init__.py +7 -0
  2. foundry_mcp/cli/__init__.py +80 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +633 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +652 -0
  15. foundry_mcp/cli/commands/session.py +479 -0
  16. foundry_mcp/cli/commands/specs.py +856 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +259 -0
  22. foundry_mcp/cli/flags.py +266 -0
  23. foundry_mcp/cli/logging.py +212 -0
  24. foundry_mcp/cli/main.py +44 -0
  25. foundry_mcp/cli/output.py +122 -0
  26. foundry_mcp/cli/registry.py +110 -0
  27. foundry_mcp/cli/resilience.py +178 -0
  28. foundry_mcp/cli/transcript.py +217 -0
  29. foundry_mcp/config.py +850 -0
  30. foundry_mcp/core/__init__.py +144 -0
  31. foundry_mcp/core/ai_consultation.py +1636 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/feature_flags.py +592 -0
  40. foundry_mcp/core/health.py +749 -0
  41. foundry_mcp/core/journal.py +694 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1350 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +123 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +317 -0
  57. foundry_mcp/core/prometheus.py +577 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +546 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
  61. foundry_mcp/core/prompts/plan_review.py +623 -0
  62. foundry_mcp/core/providers/__init__.py +225 -0
  63. foundry_mcp/core/providers/base.py +476 -0
  64. foundry_mcp/core/providers/claude.py +460 -0
  65. foundry_mcp/core/providers/codex.py +619 -0
  66. foundry_mcp/core/providers/cursor_agent.py +642 -0
  67. foundry_mcp/core/providers/detectors.py +488 -0
  68. foundry_mcp/core/providers/gemini.py +405 -0
  69. foundry_mcp/core/providers/opencode.py +616 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +302 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +729 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/resilience.py +600 -0
  78. foundry_mcp/core/responses.py +934 -0
  79. foundry_mcp/core/review.py +366 -0
  80. foundry_mcp/core/security.py +438 -0
  81. foundry_mcp/core/spec.py +1650 -0
  82. foundry_mcp/core/task.py +1289 -0
  83. foundry_mcp/core/testing.py +450 -0
  84. foundry_mcp/core/validation.py +2081 -0
  85. foundry_mcp/dashboard/__init__.py +32 -0
  86. foundry_mcp/dashboard/app.py +119 -0
  87. foundry_mcp/dashboard/components/__init__.py +17 -0
  88. foundry_mcp/dashboard/components/cards.py +88 -0
  89. foundry_mcp/dashboard/components/charts.py +234 -0
  90. foundry_mcp/dashboard/components/filters.py +136 -0
  91. foundry_mcp/dashboard/components/tables.py +195 -0
  92. foundry_mcp/dashboard/data/__init__.py +11 -0
  93. foundry_mcp/dashboard/data/stores.py +433 -0
  94. foundry_mcp/dashboard/launcher.py +289 -0
  95. foundry_mcp/dashboard/views/__init__.py +12 -0
  96. foundry_mcp/dashboard/views/errors.py +217 -0
  97. foundry_mcp/dashboard/views/metrics.py +174 -0
  98. foundry_mcp/dashboard/views/overview.py +160 -0
  99. foundry_mcp/dashboard/views/providers.py +83 -0
  100. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  101. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  102. foundry_mcp/prompts/__init__.py +9 -0
  103. foundry_mcp/prompts/workflows.py +525 -0
  104. foundry_mcp/resources/__init__.py +9 -0
  105. foundry_mcp/resources/specs.py +591 -0
  106. foundry_mcp/schemas/__init__.py +38 -0
  107. foundry_mcp/schemas/sdd-spec-schema.json +386 -0
  108. foundry_mcp/server.py +164 -0
  109. foundry_mcp/tools/__init__.py +10 -0
  110. foundry_mcp/tools/unified/__init__.py +71 -0
  111. foundry_mcp/tools/unified/authoring.py +1487 -0
  112. foundry_mcp/tools/unified/context_helpers.py +98 -0
  113. foundry_mcp/tools/unified/documentation_helpers.py +198 -0
  114. foundry_mcp/tools/unified/environment.py +939 -0
  115. foundry_mcp/tools/unified/error.py +462 -0
  116. foundry_mcp/tools/unified/health.py +225 -0
  117. foundry_mcp/tools/unified/journal.py +841 -0
  118. foundry_mcp/tools/unified/lifecycle.py +632 -0
  119. foundry_mcp/tools/unified/metrics.py +777 -0
  120. foundry_mcp/tools/unified/plan.py +745 -0
  121. foundry_mcp/tools/unified/pr.py +294 -0
  122. foundry_mcp/tools/unified/provider.py +629 -0
  123. foundry_mcp/tools/unified/review.py +685 -0
  124. foundry_mcp/tools/unified/review_helpers.py +299 -0
  125. foundry_mcp/tools/unified/router.py +102 -0
  126. foundry_mcp/tools/unified/server.py +580 -0
  127. foundry_mcp/tools/unified/spec.py +808 -0
  128. foundry_mcp/tools/unified/task.py +2202 -0
  129. foundry_mcp/tools/unified/test.py +370 -0
  130. foundry_mcp/tools/unified/verification.py +520 -0
  131. foundry_mcp-0.3.3.dist-info/METADATA +337 -0
  132. foundry_mcp-0.3.3.dist-info/RECORD +135 -0
  133. foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
  134. foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
  135. foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1350 @@
1
+ """
2
+ LLM configuration parsing for foundry-mcp.
3
+
4
+ Parses the [llm] section from foundry-mcp.toml to configure LLM provider settings.
5
+
6
+ TOML Configuration Example:
7
+ [llm]
8
+ provider = "openai" # Required: "openai", "anthropic", or "local"
9
+ api_key = "sk-..." # Optional: defaults to env var based on provider
10
+ model = "gpt-4.1" # Optional: provider-specific default
11
+ timeout = 30 # Optional: request timeout in seconds (default: 30)
12
+
13
+ Environment Variables (fallback if not in TOML):
14
+ - FOUNDRY_MCP_LLM_PROVIDER: LLM provider type ("openai", "anthropic", "local")
15
+ - FOUNDRY_MCP_LLM_API_KEY: API key (takes precedence over provider-specific keys)
16
+ - FOUNDRY_MCP_LLM_MODEL: Model identifier
17
+ - FOUNDRY_MCP_LLM_TIMEOUT: Request timeout in seconds
18
+ - FOUNDRY_MCP_LLM_BASE_URL: Custom API base URL
19
+ - FOUNDRY_MCP_LLM_MAX_TOKENS: Default max tokens
20
+ - FOUNDRY_MCP_LLM_TEMPERATURE: Default temperature
21
+ - FOUNDRY_MCP_LLM_ORGANIZATION: Organization ID (OpenAI)
22
+
23
+ Provider-specific API key fallbacks:
24
+ - OPENAI_API_KEY: OpenAI API key (if FOUNDRY_MCP_LLM_API_KEY not set)
25
+ - ANTHROPIC_API_KEY: Anthropic API key (if FOUNDRY_MCP_LLM_API_KEY not set)
26
+ """
27
+
28
+ import logging
29
+ import os
30
+ import re
31
+ from dataclasses import dataclass, field
32
+ from enum import Enum
33
+ from pathlib import Path
34
+ from typing import Optional, Dict, Any, List, Literal
35
+
36
+ try:
37
+ import tomllib
38
+ except ImportError:
39
+ import tomli as tomllib # Python < 3.11 fallback
40
+
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+
45
+ # =============================================================================
46
+ # Provider Specification (Unified Priority Notation)
47
+ # =============================================================================
48
+
49
+
50
+ @dataclass
51
+ class ProviderSpec:
52
+ """Parsed provider specification from hybrid notation.
53
+
54
+ Supports bracket-prefix notation for unified API/CLI provider configuration:
55
+ - [api]openai/gpt-4.1 -> API provider with model
56
+ - [api]anthropic/claude-sonnet-4 -> API provider with model
57
+ - [cli]gemini:pro -> CLI provider with model
58
+ - [cli]claude:opus -> CLI provider with model
59
+ - [cli]opencode:openai/gpt-5.2 -> CLI provider routing to backend
60
+ - [cli]codex -> CLI provider with default model
61
+
62
+ Grammar:
63
+ spec := "[api]" api_spec | "[cli]" cli_spec
64
+ api_spec := provider "/" model
65
+ cli_spec := transport (":" backend "/" model | ":" model | "")
66
+
67
+ Attributes:
68
+ type: Provider type - "api" for direct API calls, "cli" for CLI tools
69
+ provider: Provider/transport identifier (openai, gemini, opencode, etc.)
70
+ backend: Optional backend for CLI routing (openai, anthropic, gemini)
71
+ model: Optional model identifier (gpt-4.1, pro, opus, etc.)
72
+ raw: Original specification string for error messages
73
+ """
74
+
75
+ type: Literal["api", "cli"]
76
+ provider: str
77
+ backend: Optional[str] = None
78
+ model: Optional[str] = None
79
+ raw: str = ""
80
+
81
+ # Known providers for validation
82
+ KNOWN_API_PROVIDERS = {"openai", "anthropic", "local"}
83
+ KNOWN_CLI_PROVIDERS = {"gemini", "codex", "cursor-agent", "opencode", "claude"}
84
+ KNOWN_BACKENDS = {"openai", "anthropic", "gemini", "local"}
85
+
86
+ # Regex patterns for parsing
87
+ _API_PATTERN = re.compile(r"^\[api\]([^/]+)/(.+)$")
88
+ _CLI_FULL_PATTERN = re.compile(r"^\[cli\]([^:]+):([^/]+)/(.+)$") # transport:backend/model
89
+ _CLI_MODEL_PATTERN = re.compile(r"^\[cli\]([^:]+):([^/]+)$") # transport:model
90
+ _CLI_SIMPLE_PATTERN = re.compile(r"^\[cli\]([^:]+)$") # transport only
91
+
92
+ @classmethod
93
+ def parse(cls, spec: str) -> "ProviderSpec":
94
+ """Parse a provider specification string.
95
+
96
+ Args:
97
+ spec: Provider spec in bracket notation (e.g., "[api]openai/gpt-4.1")
98
+
99
+ Returns:
100
+ ProviderSpec instance with parsed components
101
+
102
+ Raises:
103
+ ValueError: If the spec format is invalid
104
+
105
+ Examples:
106
+ >>> ProviderSpec.parse("[api]openai/gpt-4.1")
107
+ ProviderSpec(type='api', provider='openai', model='gpt-4.1')
108
+
109
+ >>> ProviderSpec.parse("[cli]gemini:pro")
110
+ ProviderSpec(type='cli', provider='gemini', model='pro')
111
+
112
+ >>> ProviderSpec.parse("[cli]opencode:openai/gpt-5.2")
113
+ ProviderSpec(type='cli', provider='opencode', backend='openai', model='gpt-5.2')
114
+ """
115
+ spec = spec.strip()
116
+
117
+ if not spec:
118
+ raise ValueError("Provider spec cannot be empty")
119
+
120
+ # Try API pattern: [api]provider/model
121
+ if match := cls._API_PATTERN.match(spec):
122
+ provider, model = match.groups()
123
+ return cls(
124
+ type="api",
125
+ provider=provider.lower(),
126
+ model=model,
127
+ raw=spec,
128
+ )
129
+
130
+ # Try CLI full pattern: [cli]transport:backend/model
131
+ if match := cls._CLI_FULL_PATTERN.match(spec):
132
+ transport, backend, model = match.groups()
133
+ return cls(
134
+ type="cli",
135
+ provider=transport.lower(),
136
+ backend=backend.lower(),
137
+ model=model,
138
+ raw=spec,
139
+ )
140
+
141
+ # Try CLI model pattern: [cli]transport:model
142
+ if match := cls._CLI_MODEL_PATTERN.match(spec):
143
+ transport, model = match.groups()
144
+ return cls(
145
+ type="cli",
146
+ provider=transport.lower(),
147
+ model=model,
148
+ raw=spec,
149
+ )
150
+
151
+ # Try CLI simple pattern: [cli]transport
152
+ if match := cls._CLI_SIMPLE_PATTERN.match(spec):
153
+ transport = match.group(1)
154
+ return cls(
155
+ type="cli",
156
+ provider=transport.lower(),
157
+ raw=spec,
158
+ )
159
+
160
+ # Invalid format
161
+ raise ValueError(
162
+ f"Invalid provider spec '{spec}'. Expected format: "
163
+ "[api]provider/model or [cli]transport[:backend/model|:model]"
164
+ )
165
+
166
+ def validate(self) -> List[str]:
167
+ """Validate the provider specification.
168
+
169
+ Returns:
170
+ List of validation error messages (empty if valid)
171
+ """
172
+ errors = []
173
+
174
+ if self.type == "api":
175
+ if self.provider not in self.KNOWN_API_PROVIDERS:
176
+ errors.append(
177
+ f"Unknown API provider '{self.provider}'. "
178
+ f"Known: {sorted(self.KNOWN_API_PROVIDERS)}"
179
+ )
180
+ if not self.model:
181
+ errors.append("API provider spec requires a model")
182
+ else: # cli
183
+ if self.provider not in self.KNOWN_CLI_PROVIDERS:
184
+ errors.append(
185
+ f"Unknown CLI provider '{self.provider}'. "
186
+ f"Known: {sorted(self.KNOWN_CLI_PROVIDERS)}"
187
+ )
188
+ if self.backend and self.backend not in self.KNOWN_BACKENDS:
189
+ errors.append(
190
+ f"Unknown backend '{self.backend}'. "
191
+ f"Known: {sorted(self.KNOWN_BACKENDS)}"
192
+ )
193
+
194
+ return errors
195
+
196
+ def __str__(self) -> str:
197
+ """Return canonical string representation."""
198
+ if self.type == "api":
199
+ return f"[api]{self.provider}/{self.model}"
200
+ elif self.backend:
201
+ return f"[cli]{self.provider}:{self.backend}/{self.model}"
202
+ elif self.model:
203
+ return f"[cli]{self.provider}:{self.model}"
204
+ else:
205
+ return f"[cli]{self.provider}"
206
+
207
+
208
+ class LLMProviderType(str, Enum):
209
+ """Supported LLM provider types."""
210
+
211
+ OPENAI = "openai"
212
+ ANTHROPIC = "anthropic"
213
+ LOCAL = "local"
214
+
215
+
216
+ # Default models per provider
217
+ DEFAULT_MODELS: Dict[LLMProviderType, str] = {
218
+ LLMProviderType.OPENAI: "gpt-4.1",
219
+ LLMProviderType.ANTHROPIC: "claude-sonnet-4-5",
220
+ LLMProviderType.LOCAL: "llama4",
221
+ }
222
+
223
+ # Environment variable names for API keys
224
+ API_KEY_ENV_VARS: Dict[LLMProviderType, str] = {
225
+ LLMProviderType.OPENAI: "OPENAI_API_KEY",
226
+ LLMProviderType.ANTHROPIC: "ANTHROPIC_API_KEY",
227
+ LLMProviderType.LOCAL: "", # Local providers typically don't need keys
228
+ }
229
+
230
+
231
+ @dataclass
232
+ class LLMConfig:
233
+ """LLM configuration parsed from foundry-mcp.toml.
234
+
235
+ Attributes:
236
+ provider: The LLM provider type ("openai", "anthropic", "local")
237
+ api_key: API key for the provider (optional, falls back to env var)
238
+ model: Model identifier (optional, uses provider default)
239
+ timeout: Request timeout in seconds (default: 30)
240
+ base_url: Custom API base URL (optional, for proxies or local servers)
241
+ organization: Organization ID (OpenAI only)
242
+ max_tokens: Default max tokens for responses
243
+ temperature: Default temperature for generation
244
+ """
245
+
246
+ provider: LLMProviderType = LLMProviderType.OPENAI
247
+ api_key: Optional[str] = None
248
+ model: Optional[str] = None
249
+ timeout: int = 30
250
+ base_url: Optional[str] = None
251
+ organization: Optional[str] = None
252
+ max_tokens: int = 1024
253
+ temperature: float = 0.7
254
+
255
+ def get_api_key(self) -> Optional[str]:
256
+ """Get API key, falling back to environment variables if not set.
257
+
258
+ Priority:
259
+ 1. Explicit api_key set in config
260
+ 2. FOUNDRY_MCP_LLM_API_KEY environment variable
261
+ 3. Provider-specific env var (OPENAI_API_KEY, ANTHROPIC_API_KEY)
262
+
263
+ Returns:
264
+ API key string or None if not available
265
+ """
266
+ if self.api_key:
267
+ return self.api_key
268
+
269
+ # Check unified env var first
270
+ if unified_key := os.environ.get("FOUNDRY_MCP_LLM_API_KEY"):
271
+ return unified_key
272
+
273
+ # Fall back to provider-specific env var
274
+ env_var = API_KEY_ENV_VARS.get(self.provider, "")
275
+ if env_var:
276
+ return os.environ.get(env_var)
277
+
278
+ return None
279
+
280
+ def get_model(self) -> str:
281
+ """Get model, falling back to provider default if not set.
282
+
283
+ Returns:
284
+ Model identifier string
285
+ """
286
+ if self.model:
287
+ return self.model
288
+
289
+ return DEFAULT_MODELS.get(self.provider, "gpt-4.1")
290
+
291
+ def validate(self) -> None:
292
+ """Validate the configuration.
293
+
294
+ Raises:
295
+ ValueError: If configuration is invalid
296
+ """
297
+ # Validate timeout
298
+ if self.timeout <= 0:
299
+ raise ValueError(f"timeout must be positive, got {self.timeout}")
300
+
301
+ # Validate max_tokens
302
+ if self.max_tokens <= 0:
303
+ raise ValueError(f"max_tokens must be positive, got {self.max_tokens}")
304
+
305
+ # Validate temperature
306
+ if not 0 <= self.temperature <= 2:
307
+ raise ValueError(f"temperature must be between 0 and 2, got {self.temperature}")
308
+
309
+ # Check API key for non-local providers
310
+ if self.provider != LLMProviderType.LOCAL:
311
+ if not self.get_api_key():
312
+ env_var = API_KEY_ENV_VARS.get(self.provider, "")
313
+ raise ValueError(
314
+ f"API key required for {self.provider.value} provider. "
315
+ f"Set 'api_key' in config or {env_var} environment variable."
316
+ )
317
+
318
+ @classmethod
319
+ def from_toml(cls, path: Path) -> "LLMConfig":
320
+ """Load LLM configuration from a TOML file.
321
+
322
+ Args:
323
+ path: Path to the TOML configuration file
324
+
325
+ Returns:
326
+ LLMConfig instance with parsed settings
327
+
328
+ Raises:
329
+ FileNotFoundError: If the config file doesn't exist
330
+ ValueError: If the configuration is invalid
331
+ """
332
+ if not path.exists():
333
+ raise FileNotFoundError(f"Config file not found: {path}")
334
+
335
+ with open(path, "rb") as f:
336
+ data = tomllib.load(f)
337
+
338
+ return cls.from_dict(data.get("llm", {}))
339
+
340
+ @classmethod
341
+ def from_dict(cls, data: Dict[str, Any]) -> "LLMConfig":
342
+ """Create LLMConfig from a dictionary (typically the [llm] section).
343
+
344
+ Args:
345
+ data: Dictionary with LLM configuration values
346
+
347
+ Returns:
348
+ LLMConfig instance
349
+
350
+ Raises:
351
+ ValueError: If provider type is invalid
352
+ """
353
+ config = cls()
354
+
355
+ # Parse provider
356
+ if "provider" in data:
357
+ provider_str = data["provider"].lower()
358
+ try:
359
+ config.provider = LLMProviderType(provider_str)
360
+ except ValueError:
361
+ valid = [p.value for p in LLMProviderType]
362
+ raise ValueError(
363
+ f"Invalid provider '{provider_str}'. Must be one of: {valid}"
364
+ )
365
+
366
+ # Parse other fields
367
+ if "api_key" in data:
368
+ config.api_key = data["api_key"]
369
+
370
+ if "model" in data:
371
+ config.model = data["model"]
372
+
373
+ if "timeout" in data:
374
+ config.timeout = int(data["timeout"])
375
+
376
+ if "base_url" in data:
377
+ config.base_url = data["base_url"]
378
+
379
+ if "organization" in data:
380
+ config.organization = data["organization"]
381
+
382
+ if "max_tokens" in data:
383
+ config.max_tokens = int(data["max_tokens"])
384
+
385
+ if "temperature" in data:
386
+ config.temperature = float(data["temperature"])
387
+
388
+ return config
389
+
390
+ @classmethod
391
+ def from_env(cls) -> "LLMConfig":
392
+ """Create LLMConfig from environment variables only.
393
+
394
+ Environment variables:
395
+ - FOUNDRY_MCP_LLM_PROVIDER: Provider type ("openai", "anthropic", "local")
396
+ - FOUNDRY_MCP_LLM_API_KEY: API key (unified, takes precedence)
397
+ - FOUNDRY_MCP_LLM_MODEL: Model identifier
398
+ - FOUNDRY_MCP_LLM_TIMEOUT: Request timeout in seconds
399
+ - FOUNDRY_MCP_LLM_BASE_URL: Custom API base URL
400
+ - FOUNDRY_MCP_LLM_MAX_TOKENS: Default max tokens
401
+ - FOUNDRY_MCP_LLM_TEMPERATURE: Default temperature
402
+ - FOUNDRY_MCP_LLM_ORGANIZATION: Organization ID (OpenAI only)
403
+
404
+ Returns:
405
+ LLMConfig instance with environment-based settings
406
+ """
407
+ config = cls()
408
+
409
+ # Provider
410
+ if provider := os.environ.get("FOUNDRY_MCP_LLM_PROVIDER"):
411
+ try:
412
+ config.provider = LLMProviderType(provider.lower())
413
+ except ValueError:
414
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_PROVIDER: {provider}, using default")
415
+
416
+ # API Key (explicit env var, not the provider-specific fallback)
417
+ if api_key := os.environ.get("FOUNDRY_MCP_LLM_API_KEY"):
418
+ config.api_key = api_key
419
+
420
+ # Model
421
+ if model := os.environ.get("FOUNDRY_MCP_LLM_MODEL"):
422
+ config.model = model
423
+
424
+ # Timeout
425
+ if timeout := os.environ.get("FOUNDRY_MCP_LLM_TIMEOUT"):
426
+ try:
427
+ config.timeout = int(timeout)
428
+ except ValueError:
429
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_TIMEOUT: {timeout}, using default")
430
+
431
+ # Base URL
432
+ if base_url := os.environ.get("FOUNDRY_MCP_LLM_BASE_URL"):
433
+ config.base_url = base_url
434
+
435
+ # Max tokens
436
+ if max_tokens := os.environ.get("FOUNDRY_MCP_LLM_MAX_TOKENS"):
437
+ try:
438
+ config.max_tokens = int(max_tokens)
439
+ except ValueError:
440
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_MAX_TOKENS: {max_tokens}, using default")
441
+
442
+ # Temperature
443
+ if temperature := os.environ.get("FOUNDRY_MCP_LLM_TEMPERATURE"):
444
+ try:
445
+ config.temperature = float(temperature)
446
+ except ValueError:
447
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_TEMPERATURE: {temperature}, using default")
448
+
449
+ # Organization
450
+ if organization := os.environ.get("FOUNDRY_MCP_LLM_ORGANIZATION"):
451
+ config.organization = organization
452
+
453
+ return config
454
+
455
+
456
+ def load_llm_config(
457
+ config_file: Optional[Path] = None,
458
+ use_env_fallback: bool = True,
459
+ ) -> LLMConfig:
460
+ """Load LLM configuration from TOML file with environment fallback.
461
+
462
+ Priority (highest to lowest):
463
+ 1. TOML config file (if provided or found at default locations)
464
+ 2. Environment variables
465
+ 3. Default values
466
+
467
+ Args:
468
+ config_file: Optional path to TOML config file
469
+ use_env_fallback: Whether to use environment variables as fallback
470
+
471
+ Returns:
472
+ LLMConfig instance with merged settings
473
+ """
474
+ config = LLMConfig()
475
+
476
+ # Try to load from TOML
477
+ toml_loaded = False
478
+ if config_file and config_file.exists():
479
+ try:
480
+ config = LLMConfig.from_toml(config_file)
481
+ toml_loaded = True
482
+ logger.debug(f"Loaded LLM config from {config_file}")
483
+ except Exception as e:
484
+ logger.warning(f"Failed to load LLM config from {config_file}: {e}")
485
+ else:
486
+ # Try default locations
487
+ default_paths = [
488
+ Path("foundry-mcp.toml"),
489
+ Path(".foundry-mcp.toml"),
490
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
491
+ ]
492
+ for path in default_paths:
493
+ if path.exists():
494
+ try:
495
+ config = LLMConfig.from_toml(path)
496
+ toml_loaded = True
497
+ logger.debug(f"Loaded LLM config from {path}")
498
+ break
499
+ except Exception as e:
500
+ logger.debug(f"Failed to load from {path}: {e}")
501
+
502
+ # Apply environment variable overrides
503
+ if use_env_fallback:
504
+ env_config = LLMConfig.from_env()
505
+
506
+ # Only override if TOML didn't set the value
507
+ if not toml_loaded or config.provider == LLMProviderType.OPENAI:
508
+ if os.environ.get("FOUNDRY_MCP_LLM_PROVIDER"):
509
+ config.provider = env_config.provider
510
+
511
+ # API key: env overrides if set (explicit FOUNDRY_MCP_LLM_API_KEY)
512
+ if not config.api_key and env_config.api_key:
513
+ config.api_key = env_config.api_key
514
+
515
+ if not config.model and env_config.model:
516
+ config.model = env_config.model
517
+
518
+ if config.timeout == 30 and env_config.timeout != 30:
519
+ config.timeout = env_config.timeout
520
+
521
+ if not config.base_url and env_config.base_url:
522
+ config.base_url = env_config.base_url
523
+
524
+ if config.max_tokens == 1024 and env_config.max_tokens != 1024:
525
+ config.max_tokens = env_config.max_tokens
526
+
527
+ if config.temperature == 0.7 and env_config.temperature != 0.7:
528
+ config.temperature = env_config.temperature
529
+
530
+ if not config.organization and env_config.organization:
531
+ config.organization = env_config.organization
532
+
533
+ return config
534
+
535
+
536
+ # Global configuration instance
537
+ _llm_config: Optional[LLMConfig] = None
538
+
539
+
540
+ def get_llm_config() -> LLMConfig:
541
+ """Get the global LLM configuration instance.
542
+
543
+ Returns:
544
+ LLMConfig instance (loaded from file/env on first call)
545
+ """
546
+ global _llm_config
547
+ if _llm_config is None:
548
+ _llm_config = load_llm_config()
549
+ return _llm_config
550
+
551
+
552
+ def set_llm_config(config: LLMConfig) -> None:
553
+ """Set the global LLM configuration instance.
554
+
555
+ Args:
556
+ config: LLMConfig instance to use globally
557
+ """
558
+ global _llm_config
559
+ _llm_config = config
560
+
561
+
562
+ def reset_llm_config() -> None:
563
+ """Reset the global LLM configuration to None.
564
+
565
+ Useful for testing or reloading configuration.
566
+ """
567
+ global _llm_config
568
+ _llm_config = None
569
+
570
+
571
+ # =============================================================================
572
+ # Workflow Configuration
573
+ # =============================================================================
574
+
575
+
576
+ class WorkflowMode(str, Enum):
577
+ """Workflow execution modes.
578
+
579
+ SINGLE: Execute one task at a time with user approval between tasks
580
+ AUTONOMOUS: Execute all tasks in phase automatically until completion or blocker
581
+ BATCH: Execute a specified number of tasks, then pause for review
582
+ """
583
+
584
+ SINGLE = "single"
585
+ AUTONOMOUS = "autonomous"
586
+ BATCH = "batch"
587
+
588
+
589
+ @dataclass
590
+ class WorkflowConfig:
591
+ """Workflow configuration parsed from foundry-mcp.toml [workflow] section.
592
+
593
+ TOML Configuration Example:
594
+ [workflow]
595
+ mode = "single" # Execution mode: "single", "autonomous", or "batch"
596
+ auto_validate = true # Automatically run validation after task completion
597
+ journal_enabled = true # Enable journaling of task completions
598
+ batch_size = 5 # Number of tasks to execute in batch mode
599
+ context_threshold = 85 # Context usage threshold (%) to trigger pause
600
+
601
+ Environment Variables:
602
+ - FOUNDRY_MCP_WORKFLOW_MODE: Workflow execution mode
603
+ - FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE: Enable auto-validation (true/false)
604
+ - FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED: Enable journaling (true/false)
605
+ - FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: Batch size for batch mode
606
+ - FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: Context threshold percentage
607
+
608
+ Attributes:
609
+ mode: Workflow execution mode
610
+ auto_validate: Whether to run validation after task completion
611
+ journal_enabled: Whether to journal task completions
612
+ batch_size: Number of tasks to execute in batch mode
613
+ context_threshold: Context usage threshold to trigger pause (percentage)
614
+ """
615
+
616
+ mode: WorkflowMode = WorkflowMode.SINGLE
617
+ auto_validate: bool = True
618
+ journal_enabled: bool = True
619
+ batch_size: int = 5
620
+ context_threshold: int = 85
621
+
622
+ def validate(self) -> None:
623
+ """Validate the workflow configuration.
624
+
625
+ Raises:
626
+ ValueError: If configuration is invalid
627
+ """
628
+ if self.batch_size < 1:
629
+ raise ValueError(f"batch_size must be at least 1, got {self.batch_size}")
630
+
631
+ if not 50 <= self.context_threshold <= 100:
632
+ raise ValueError(
633
+ f"context_threshold must be between 50 and 100, got {self.context_threshold}"
634
+ )
635
+
636
+ @classmethod
637
+ def from_dict(cls, data: Dict[str, Any]) -> "WorkflowConfig":
638
+ """Create WorkflowConfig from a dictionary (typically the [workflow] section).
639
+
640
+ Args:
641
+ data: Dictionary with workflow configuration values
642
+
643
+ Returns:
644
+ WorkflowConfig instance
645
+
646
+ Raises:
647
+ ValueError: If mode is invalid
648
+ """
649
+ config = cls()
650
+
651
+ # Parse mode
652
+ if "mode" in data:
653
+ mode_str = data["mode"].lower()
654
+ try:
655
+ config.mode = WorkflowMode(mode_str)
656
+ except ValueError:
657
+ valid = [m.value for m in WorkflowMode]
658
+ raise ValueError(
659
+ f"Invalid workflow mode '{mode_str}'. Must be one of: {valid}"
660
+ )
661
+
662
+ # Parse boolean fields
663
+ if "auto_validate" in data:
664
+ config.auto_validate = bool(data["auto_validate"])
665
+
666
+ if "journal_enabled" in data:
667
+ config.journal_enabled = bool(data["journal_enabled"])
668
+
669
+ # Parse integer fields
670
+ if "batch_size" in data:
671
+ config.batch_size = int(data["batch_size"])
672
+
673
+ if "context_threshold" in data:
674
+ config.context_threshold = int(data["context_threshold"])
675
+
676
+ return config
677
+
678
+ @classmethod
679
+ def from_toml(cls, path: Path) -> "WorkflowConfig":
680
+ """Load workflow configuration from a TOML file.
681
+
682
+ Args:
683
+ path: Path to the TOML configuration file
684
+
685
+ Returns:
686
+ WorkflowConfig instance with parsed settings
687
+
688
+ Raises:
689
+ FileNotFoundError: If the config file doesn't exist
690
+ """
691
+ if not path.exists():
692
+ raise FileNotFoundError(f"Config file not found: {path}")
693
+
694
+ with open(path, "rb") as f:
695
+ data = tomllib.load(f)
696
+
697
+ return cls.from_dict(data.get("workflow", {}))
698
+
699
+ @classmethod
700
+ def from_env(cls) -> "WorkflowConfig":
701
+ """Create WorkflowConfig from environment variables only.
702
+
703
+ Environment variables:
704
+ - FOUNDRY_MCP_WORKFLOW_MODE: Workflow execution mode
705
+ - FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE: Enable auto-validation
706
+ - FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED: Enable journaling
707
+ - FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: Batch size
708
+ - FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: Context threshold
709
+
710
+ Returns:
711
+ WorkflowConfig instance with environment-based settings
712
+ """
713
+ config = cls()
714
+
715
+ # Mode
716
+ if mode := os.environ.get("FOUNDRY_MCP_WORKFLOW_MODE"):
717
+ try:
718
+ config.mode = WorkflowMode(mode.lower())
719
+ except ValueError:
720
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_MODE: {mode}, using default")
721
+
722
+ # Auto-validate
723
+ if auto_validate := os.environ.get("FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE"):
724
+ config.auto_validate = auto_validate.lower() in ("true", "1", "yes")
725
+
726
+ # Journal enabled
727
+ if journal := os.environ.get("FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED"):
728
+ config.journal_enabled = journal.lower() in ("true", "1", "yes")
729
+
730
+ # Batch size
731
+ if batch_size := os.environ.get("FOUNDRY_MCP_WORKFLOW_BATCH_SIZE"):
732
+ try:
733
+ config.batch_size = int(batch_size)
734
+ except ValueError:
735
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: {batch_size}, using default")
736
+
737
+ # Context threshold
738
+ if threshold := os.environ.get("FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD"):
739
+ try:
740
+ config.context_threshold = int(threshold)
741
+ except ValueError:
742
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: {threshold}, using default")
743
+
744
+ return config
745
+
746
+
747
+ def load_workflow_config(
748
+ config_file: Optional[Path] = None,
749
+ use_env_fallback: bool = True,
750
+ ) -> WorkflowConfig:
751
+ """Load workflow configuration from TOML file with environment fallback.
752
+
753
+ Priority (highest to lowest):
754
+ 1. TOML config file (if provided or found at default locations)
755
+ 2. Environment variables
756
+ 3. Default values
757
+
758
+ Args:
759
+ config_file: Optional path to TOML config file
760
+ use_env_fallback: Whether to use environment variables as fallback
761
+
762
+ Returns:
763
+ WorkflowConfig instance with merged settings
764
+ """
765
+ config = WorkflowConfig()
766
+
767
+ # Try to load from TOML
768
+ toml_loaded = False
769
+ if config_file and config_file.exists():
770
+ try:
771
+ config = WorkflowConfig.from_toml(config_file)
772
+ toml_loaded = True
773
+ logger.debug(f"Loaded workflow config from {config_file}")
774
+ except Exception as e:
775
+ logger.warning(f"Failed to load workflow config from {config_file}: {e}")
776
+ else:
777
+ # Try default locations
778
+ default_paths = [
779
+ Path("foundry-mcp.toml"),
780
+ Path(".foundry-mcp.toml"),
781
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
782
+ ]
783
+ for path in default_paths:
784
+ if path.exists():
785
+ try:
786
+ config = WorkflowConfig.from_toml(path)
787
+ toml_loaded = True
788
+ logger.debug(f"Loaded workflow config from {path}")
789
+ break
790
+ except Exception as e:
791
+ logger.debug(f"Failed to load from {path}: {e}")
792
+
793
+ # Apply environment variable overrides
794
+ if use_env_fallback:
795
+ env_config = WorkflowConfig.from_env()
796
+
797
+ # Mode override
798
+ if not toml_loaded or config.mode == WorkflowMode.SINGLE:
799
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_MODE"):
800
+ config.mode = env_config.mode
801
+
802
+ # Boolean overrides (env can override TOML)
803
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE"):
804
+ config.auto_validate = env_config.auto_validate
805
+
806
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED"):
807
+ config.journal_enabled = env_config.journal_enabled
808
+
809
+ # Integer overrides
810
+ if config.batch_size == 5 and env_config.batch_size != 5:
811
+ config.batch_size = env_config.batch_size
812
+
813
+ if config.context_threshold == 85 and env_config.context_threshold != 85:
814
+ config.context_threshold = env_config.context_threshold
815
+
816
+ return config
817
+
818
+
819
+ # Global workflow configuration instance
820
+ _workflow_config: Optional[WorkflowConfig] = None
821
+
822
+
823
+ def get_workflow_config() -> WorkflowConfig:
824
+ """Get the global workflow configuration instance.
825
+
826
+ Returns:
827
+ WorkflowConfig instance (loaded from file/env on first call)
828
+ """
829
+ global _workflow_config
830
+ if _workflow_config is None:
831
+ _workflow_config = load_workflow_config()
832
+ return _workflow_config
833
+
834
+
835
+ def set_workflow_config(config: WorkflowConfig) -> None:
836
+ """Set the global workflow configuration instance.
837
+
838
+ Args:
839
+ config: WorkflowConfig instance to use globally
840
+ """
841
+ global _workflow_config
842
+ _workflow_config = config
843
+
844
+
845
+ def reset_workflow_config() -> None:
846
+ """Reset the global workflow configuration to None.
847
+
848
+ Useful for testing or reloading configuration.
849
+ """
850
+ global _workflow_config
851
+ _workflow_config = None
852
+
853
+
854
+ # =============================================================================
855
+ # Consultation Configuration
856
+ # =============================================================================
857
+
858
+
859
+ @dataclass
860
+ class WorkflowConsultationConfig:
861
+ """Per-workflow consultation configuration overrides.
862
+
863
+ Allows individual workflows to specify minimum model requirements
864
+ and timeout overrides for AI consultations.
865
+
866
+ TOML Configuration Example:
867
+ [consultation.workflows.fidelity_review]
868
+ min_models = 2
869
+ timeout_override = 600.0
870
+
871
+ [consultation.workflows.plan_review]
872
+ min_models = 3
873
+
874
+ Attributes:
875
+ min_models: Minimum number of models required for consensus (default: 1).
876
+ When set > 1, the consultation orchestrator will gather
877
+ responses from multiple providers before synthesizing.
878
+ timeout_override: Optional timeout override in seconds. When set,
879
+ overrides the default_timeout from ConsultationConfig
880
+ for this specific workflow.
881
+ """
882
+
883
+ min_models: int = 1
884
+ timeout_override: Optional[float] = None
885
+
886
+ def validate(self) -> None:
887
+ """Validate the workflow consultation configuration.
888
+
889
+ Raises:
890
+ ValueError: If configuration is invalid
891
+ """
892
+ if self.min_models < 1:
893
+ raise ValueError(f"min_models must be at least 1, got {self.min_models}")
894
+
895
+ if self.timeout_override is not None and self.timeout_override <= 0:
896
+ raise ValueError(
897
+ f"timeout_override must be positive if set, got {self.timeout_override}"
898
+ )
899
+
900
+ @classmethod
901
+ def from_dict(cls, data: Dict[str, Any]) -> "WorkflowConsultationConfig":
902
+ """Create WorkflowConsultationConfig from a dictionary.
903
+
904
+ Args:
905
+ data: Dictionary with workflow consultation configuration values
906
+
907
+ Returns:
908
+ WorkflowConsultationConfig instance
909
+ """
910
+ config = cls()
911
+
912
+ if "min_models" in data:
913
+ config.min_models = int(data["min_models"])
914
+
915
+ if "timeout_override" in data:
916
+ value = data["timeout_override"]
917
+ if value is not None:
918
+ config.timeout_override = float(value)
919
+
920
+ return config
921
+
922
+
923
+ @dataclass
924
+ class ConsultationConfig:
925
+ """AI consultation configuration parsed from foundry-mcp.toml [consultation] section.
926
+
927
+ TOML Configuration Example:
928
+ [consultation]
929
+ # Provider priority list - first available wins
930
+ # Format: "[api]provider/model" or "[cli]transport[:backend/model|:model]"
931
+ priority = [
932
+ "[cli]gemini:pro",
933
+ "[cli]claude:opus",
934
+ "[cli]opencode:openai/gpt-5.2",
935
+ "[api]openai/gpt-4.1",
936
+ ]
937
+
938
+ # Per-provider overrides (optional)
939
+ [consultation.overrides]
940
+ "[cli]opencode:openai/gpt-5.2" = { timeout = 600 }
941
+ "[api]openai/gpt-4.1" = { temperature = 0.3 }
942
+
943
+ # Operational settings
944
+ default_timeout = 300 # Default timeout in seconds (default: 300)
945
+ max_retries = 2 # Max retry attempts on failure (default: 2)
946
+ retry_delay = 5.0 # Delay between retries in seconds (default: 5.0)
947
+ fallback_enabled = true # Enable fallback to next provider (default: true)
948
+ cache_ttl = 3600 # Cache TTL in seconds (default: 3600)
949
+
950
+ # Per-workflow configuration (optional)
951
+ [consultation.workflows.fidelity_review]
952
+ min_models = 2 # Require 2 models for consensus
953
+ timeout_override = 600.0 # Override default timeout
954
+
955
+ [consultation.workflows.plan_review]
956
+ min_models = 3 # Require 3 models for plan reviews
957
+
958
+ Environment Variables:
959
+ - FOUNDRY_MCP_CONSULTATION_TIMEOUT: Default timeout
960
+ - FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: Max retry attempts
961
+ - FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: Delay between retries
962
+ - FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED: Enable provider fallback
963
+ - FOUNDRY_MCP_CONSULTATION_CACHE_TTL: Cache TTL
964
+ - FOUNDRY_MCP_CONSULTATION_PRIORITY: Comma-separated priority list
965
+
966
+ Attributes:
967
+ priority: List of provider specs in priority order (first available wins)
968
+ overrides: Per-provider setting overrides (keyed by spec string)
969
+ default_timeout: Default timeout for AI consultations in seconds
970
+ max_retries: Maximum retry attempts on transient failures
971
+ retry_delay: Delay between retry attempts in seconds
972
+ fallback_enabled: Whether to try next provider on failure
973
+ cache_ttl: Time-to-live for cached consultation results in seconds
974
+ workflows: Per-workflow configuration overrides (keyed by workflow name)
975
+ """
976
+
977
+ priority: List[str] = field(default_factory=list)
978
+ overrides: Dict[str, Dict[str, Any]] = field(default_factory=dict)
979
+ default_timeout: float = 300.0
980
+ max_retries: int = 2
981
+ retry_delay: float = 5.0
982
+ fallback_enabled: bool = True
983
+ cache_ttl: int = 3600
984
+ workflows: Dict[str, WorkflowConsultationConfig] = field(default_factory=dict)
985
+
986
+ def get_provider_specs(self) -> List[ProviderSpec]:
987
+ """Parse priority list into ProviderSpec objects.
988
+
989
+ Returns:
990
+ List of parsed ProviderSpec instances
991
+
992
+ Raises:
993
+ ValueError: If any spec in priority list is invalid
994
+ """
995
+ return [ProviderSpec.parse(spec) for spec in self.priority]
996
+
997
+ def get_override(self, spec: str) -> Dict[str, Any]:
998
+ """Get override settings for a specific provider spec.
999
+
1000
+ Args:
1001
+ spec: Provider spec string (e.g., "[api]openai/gpt-4.1")
1002
+
1003
+ Returns:
1004
+ Override dictionary (empty if no overrides configured)
1005
+ """
1006
+ return self.overrides.get(spec, {})
1007
+
1008
+ def get_workflow_config(self, workflow_name: str) -> WorkflowConsultationConfig:
1009
+ """Get configuration for a specific workflow.
1010
+
1011
+ Args:
1012
+ workflow_name: Name of the workflow (e.g., "fidelity_review", "plan_review")
1013
+
1014
+ Returns:
1015
+ WorkflowConsultationConfig for the workflow. Returns a default instance
1016
+ with min_models=1 if no workflow-specific config exists.
1017
+
1018
+ Examples:
1019
+ >>> config = ConsultationConfig()
1020
+ >>> config.workflows["fidelity_review"] = WorkflowConsultationConfig(min_models=2)
1021
+ >>> fidelity = config.get_workflow_config("fidelity_review")
1022
+ >>> fidelity.min_models
1023
+ 2
1024
+ >>> unknown = config.get_workflow_config("unknown_workflow")
1025
+ >>> unknown.min_models
1026
+ 1
1027
+ """
1028
+ return self.workflows.get(workflow_name, WorkflowConsultationConfig())
1029
+
1030
+ def validate(self) -> None:
1031
+ """Validate the consultation configuration.
1032
+
1033
+ Raises:
1034
+ ValueError: If configuration is invalid
1035
+ """
1036
+ if self.default_timeout <= 0:
1037
+ raise ValueError(f"default_timeout must be positive, got {self.default_timeout}")
1038
+
1039
+ if self.max_retries < 0:
1040
+ raise ValueError(f"max_retries must be non-negative, got {self.max_retries}")
1041
+
1042
+ if self.retry_delay < 0:
1043
+ raise ValueError(f"retry_delay must be non-negative, got {self.retry_delay}")
1044
+
1045
+ if self.cache_ttl <= 0:
1046
+ raise ValueError(f"cache_ttl must be positive, got {self.cache_ttl}")
1047
+
1048
+ # Validate priority list
1049
+ all_errors = []
1050
+ for spec_str in self.priority:
1051
+ try:
1052
+ spec = ProviderSpec.parse(spec_str)
1053
+ errors = spec.validate()
1054
+ if errors:
1055
+ all_errors.extend([f"{spec_str}: {e}" for e in errors])
1056
+ except ValueError as e:
1057
+ all_errors.append(f"{spec_str}: {e}")
1058
+
1059
+ if all_errors:
1060
+ raise ValueError("Invalid provider specs in priority list:\n" + "\n".join(all_errors))
1061
+
1062
+ # Validate workflow configurations
1063
+ workflow_errors = []
1064
+ for workflow_name, workflow_config in self.workflows.items():
1065
+ try:
1066
+ workflow_config.validate()
1067
+ except ValueError as e:
1068
+ workflow_errors.append(f"workflows.{workflow_name}: {e}")
1069
+
1070
+ if workflow_errors:
1071
+ raise ValueError("Invalid workflow configurations:\n" + "\n".join(workflow_errors))
1072
+
1073
+ @classmethod
1074
+ def from_dict(cls, data: Dict[str, Any]) -> "ConsultationConfig":
1075
+ """Create ConsultationConfig from a dictionary (typically the [consultation] section).
1076
+
1077
+ Args:
1078
+ data: Dictionary with consultation configuration values
1079
+
1080
+ Returns:
1081
+ ConsultationConfig instance
1082
+ """
1083
+ config = cls()
1084
+
1085
+ # Parse priority list
1086
+ if "priority" in data:
1087
+ priority = data["priority"]
1088
+ if isinstance(priority, list):
1089
+ config.priority = [str(p) for p in priority]
1090
+ else:
1091
+ logger.warning(f"Invalid priority format (expected list): {type(priority)}")
1092
+
1093
+ # Parse overrides
1094
+ if "overrides" in data:
1095
+ overrides = data["overrides"]
1096
+ if isinstance(overrides, dict):
1097
+ config.overrides = {str(k): dict(v) for k, v in overrides.items()}
1098
+ else:
1099
+ logger.warning(f"Invalid overrides format (expected dict): {type(overrides)}")
1100
+
1101
+ if "default_timeout" in data:
1102
+ config.default_timeout = float(data["default_timeout"])
1103
+
1104
+ if "max_retries" in data:
1105
+ config.max_retries = int(data["max_retries"])
1106
+
1107
+ if "retry_delay" in data:
1108
+ config.retry_delay = float(data["retry_delay"])
1109
+
1110
+ if "fallback_enabled" in data:
1111
+ config.fallback_enabled = bool(data["fallback_enabled"])
1112
+
1113
+ if "cache_ttl" in data:
1114
+ config.cache_ttl = int(data["cache_ttl"])
1115
+
1116
+ # Parse workflow configurations
1117
+ if "workflows" in data:
1118
+ workflows = data["workflows"]
1119
+ if isinstance(workflows, dict):
1120
+ for workflow_name, workflow_data in workflows.items():
1121
+ if isinstance(workflow_data, dict):
1122
+ config.workflows[str(workflow_name)] = (
1123
+ WorkflowConsultationConfig.from_dict(workflow_data)
1124
+ )
1125
+ else:
1126
+ logger.warning(
1127
+ f"Invalid workflow config format for '{workflow_name}' "
1128
+ f"(expected dict): {type(workflow_data)}"
1129
+ )
1130
+ else:
1131
+ logger.warning(f"Invalid workflows format (expected dict): {type(workflows)}")
1132
+
1133
+ return config
1134
+
1135
+ @classmethod
1136
+ def from_toml(cls, path: Path) -> "ConsultationConfig":
1137
+ """Load consultation configuration from a TOML file.
1138
+
1139
+ Args:
1140
+ path: Path to the TOML configuration file
1141
+
1142
+ Returns:
1143
+ ConsultationConfig instance with parsed settings
1144
+
1145
+ Raises:
1146
+ FileNotFoundError: If the config file doesn't exist
1147
+ """
1148
+ if not path.exists():
1149
+ raise FileNotFoundError(f"Config file not found: {path}")
1150
+
1151
+ with open(path, "rb") as f:
1152
+ data = tomllib.load(f)
1153
+
1154
+ return cls.from_dict(data.get("consultation", {}))
1155
+
1156
+ @classmethod
1157
+ def from_env(cls) -> "ConsultationConfig":
1158
+ """Create ConsultationConfig from environment variables only.
1159
+
1160
+ Environment variables:
1161
+ - FOUNDRY_MCP_CONSULTATION_PRIORITY: Comma-separated priority list
1162
+ - FOUNDRY_MCP_CONSULTATION_TIMEOUT: Default timeout in seconds
1163
+ - FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: Max retry attempts
1164
+ - FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: Delay between retries
1165
+ - FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED: Enable fallback (true/false)
1166
+ - FOUNDRY_MCP_CONSULTATION_CACHE_TTL: Cache TTL in seconds
1167
+
1168
+ Returns:
1169
+ ConsultationConfig instance with environment-based settings
1170
+ """
1171
+ config = cls()
1172
+
1173
+ # Priority list (comma-separated)
1174
+ if priority := os.environ.get("FOUNDRY_MCP_CONSULTATION_PRIORITY"):
1175
+ config.priority = [p.strip() for p in priority.split(",") if p.strip()]
1176
+
1177
+ # Timeout
1178
+ if timeout := os.environ.get("FOUNDRY_MCP_CONSULTATION_TIMEOUT"):
1179
+ try:
1180
+ config.default_timeout = float(timeout)
1181
+ except ValueError:
1182
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_TIMEOUT: {timeout}, using default")
1183
+
1184
+ # Max retries
1185
+ if max_retries := os.environ.get("FOUNDRY_MCP_CONSULTATION_MAX_RETRIES"):
1186
+ try:
1187
+ config.max_retries = int(max_retries)
1188
+ except ValueError:
1189
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: {max_retries}, using default")
1190
+
1191
+ # Retry delay
1192
+ if retry_delay := os.environ.get("FOUNDRY_MCP_CONSULTATION_RETRY_DELAY"):
1193
+ try:
1194
+ config.retry_delay = float(retry_delay)
1195
+ except ValueError:
1196
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: {retry_delay}, using default")
1197
+
1198
+ # Fallback enabled
1199
+ if fallback := os.environ.get("FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED"):
1200
+ config.fallback_enabled = fallback.lower() in ("true", "1", "yes")
1201
+
1202
+ # Cache TTL
1203
+ if cache_ttl := os.environ.get("FOUNDRY_MCP_CONSULTATION_CACHE_TTL"):
1204
+ try:
1205
+ config.cache_ttl = int(cache_ttl)
1206
+ except ValueError:
1207
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_CACHE_TTL: {cache_ttl}, using default")
1208
+
1209
+ return config
1210
+
1211
+
1212
+ def load_consultation_config(
1213
+ config_file: Optional[Path] = None,
1214
+ use_env_fallback: bool = True,
1215
+ ) -> ConsultationConfig:
1216
+ """Load consultation configuration from TOML file with environment fallback.
1217
+
1218
+ Priority (highest to lowest):
1219
+ 1. TOML config file (if provided or found at default locations)
1220
+ 2. Environment variables
1221
+ 3. Default values
1222
+
1223
+ Args:
1224
+ config_file: Optional path to TOML config file
1225
+ use_env_fallback: Whether to use environment variables as fallback
1226
+
1227
+ Returns:
1228
+ ConsultationConfig instance with merged settings
1229
+ """
1230
+ config = ConsultationConfig()
1231
+
1232
+ # Try to load from TOML
1233
+ if config_file and config_file.exists():
1234
+ try:
1235
+ config = ConsultationConfig.from_toml(config_file)
1236
+ logger.debug(f"Loaded consultation config from {config_file}")
1237
+ except Exception as e:
1238
+ logger.warning(f"Failed to load consultation config from {config_file}: {e}")
1239
+ else:
1240
+ # Try default locations
1241
+ default_paths = [
1242
+ Path("foundry-mcp.toml"),
1243
+ Path(".foundry-mcp.toml"),
1244
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
1245
+ ]
1246
+ for path in default_paths:
1247
+ if path.exists():
1248
+ try:
1249
+ config = ConsultationConfig.from_toml(path)
1250
+ logger.debug(f"Loaded consultation config from {path}")
1251
+ break
1252
+ except Exception as e:
1253
+ logger.debug(f"Failed to load from {path}: {e}")
1254
+
1255
+ # Apply environment variable overrides
1256
+ if use_env_fallback:
1257
+ env_config = ConsultationConfig.from_env()
1258
+
1259
+ # Priority override (env can override TOML if set)
1260
+ if not config.priority and env_config.priority:
1261
+ config.priority = env_config.priority
1262
+ elif os.environ.get("FOUNDRY_MCP_CONSULTATION_PRIORITY"):
1263
+ # Explicit env var overrides TOML
1264
+ config.priority = env_config.priority
1265
+
1266
+ # Timeout override
1267
+ if config.default_timeout == 300.0 and env_config.default_timeout != 300.0:
1268
+ config.default_timeout = env_config.default_timeout
1269
+
1270
+ # Max retries override
1271
+ if config.max_retries == 2 and env_config.max_retries != 2:
1272
+ config.max_retries = env_config.max_retries
1273
+
1274
+ # Retry delay override
1275
+ if config.retry_delay == 5.0 and env_config.retry_delay != 5.0:
1276
+ config.retry_delay = env_config.retry_delay
1277
+
1278
+ # Fallback enabled (env can override TOML)
1279
+ if os.environ.get("FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED"):
1280
+ config.fallback_enabled = env_config.fallback_enabled
1281
+
1282
+ # Cache TTL override
1283
+ if config.cache_ttl == 3600 and env_config.cache_ttl != 3600:
1284
+ config.cache_ttl = env_config.cache_ttl
1285
+
1286
+ return config
1287
+
1288
+
1289
+ # Global consultation configuration instance
1290
+ _consultation_config: Optional[ConsultationConfig] = None
1291
+
1292
+
1293
+ def get_consultation_config() -> ConsultationConfig:
1294
+ """Get the global consultation configuration instance.
1295
+
1296
+ Returns:
1297
+ ConsultationConfig instance (loaded from file/env on first call)
1298
+ """
1299
+ global _consultation_config
1300
+ if _consultation_config is None:
1301
+ _consultation_config = load_consultation_config()
1302
+ return _consultation_config
1303
+
1304
+
1305
+ def set_consultation_config(config: ConsultationConfig) -> None:
1306
+ """Set the global consultation configuration instance.
1307
+
1308
+ Args:
1309
+ config: ConsultationConfig instance to use globally
1310
+ """
1311
+ global _consultation_config
1312
+ _consultation_config = config
1313
+
1314
+
1315
+ def reset_consultation_config() -> None:
1316
+ """Reset the global consultation configuration to None.
1317
+
1318
+ Useful for testing or reloading configuration.
1319
+ """
1320
+ global _consultation_config
1321
+ _consultation_config = None
1322
+
1323
+
1324
+ __all__ = [
1325
+ # Provider Spec (unified priority notation)
1326
+ "ProviderSpec",
1327
+ # LLM Config
1328
+ "LLMProviderType",
1329
+ "LLMConfig",
1330
+ "load_llm_config",
1331
+ "get_llm_config",
1332
+ "set_llm_config",
1333
+ "reset_llm_config",
1334
+ "DEFAULT_MODELS",
1335
+ "API_KEY_ENV_VARS",
1336
+ # Workflow Config
1337
+ "WorkflowMode",
1338
+ "WorkflowConfig",
1339
+ "load_workflow_config",
1340
+ "get_workflow_config",
1341
+ "set_workflow_config",
1342
+ "reset_workflow_config",
1343
+ # Consultation Config
1344
+ "WorkflowConsultationConfig",
1345
+ "ConsultationConfig",
1346
+ "load_consultation_config",
1347
+ "get_consultation_config",
1348
+ "set_consultation_config",
1349
+ "reset_consultation_config",
1350
+ ]