foundry-mcp 0.8.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of foundry-mcp might be problematic. Click here for more details.

Files changed (153) hide show
  1. foundry_mcp/__init__.py +13 -0
  2. foundry_mcp/cli/__init__.py +67 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +640 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +667 -0
  15. foundry_mcp/cli/commands/session.py +472 -0
  16. foundry_mcp/cli/commands/specs.py +686 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +298 -0
  22. foundry_mcp/cli/logging.py +212 -0
  23. foundry_mcp/cli/main.py +44 -0
  24. foundry_mcp/cli/output.py +122 -0
  25. foundry_mcp/cli/registry.py +110 -0
  26. foundry_mcp/cli/resilience.py +178 -0
  27. foundry_mcp/cli/transcript.py +217 -0
  28. foundry_mcp/config.py +1454 -0
  29. foundry_mcp/core/__init__.py +144 -0
  30. foundry_mcp/core/ai_consultation.py +1773 -0
  31. foundry_mcp/core/batch_operations.py +1202 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/health.py +749 -0
  40. foundry_mcp/core/intake.py +933 -0
  41. foundry_mcp/core/journal.py +700 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1376 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +146 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +387 -0
  57. foundry_mcp/core/prometheus.py +564 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +691 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
  61. foundry_mcp/core/prompts/plan_review.py +627 -0
  62. foundry_mcp/core/providers/__init__.py +237 -0
  63. foundry_mcp/core/providers/base.py +515 -0
  64. foundry_mcp/core/providers/claude.py +472 -0
  65. foundry_mcp/core/providers/codex.py +637 -0
  66. foundry_mcp/core/providers/cursor_agent.py +630 -0
  67. foundry_mcp/core/providers/detectors.py +515 -0
  68. foundry_mcp/core/providers/gemini.py +426 -0
  69. foundry_mcp/core/providers/opencode.py +718 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +308 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +857 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/research/__init__.py +68 -0
  78. foundry_mcp/core/research/memory.py +528 -0
  79. foundry_mcp/core/research/models.py +1234 -0
  80. foundry_mcp/core/research/providers/__init__.py +40 -0
  81. foundry_mcp/core/research/providers/base.py +242 -0
  82. foundry_mcp/core/research/providers/google.py +507 -0
  83. foundry_mcp/core/research/providers/perplexity.py +442 -0
  84. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  85. foundry_mcp/core/research/providers/tavily.py +383 -0
  86. foundry_mcp/core/research/workflows/__init__.py +25 -0
  87. foundry_mcp/core/research/workflows/base.py +298 -0
  88. foundry_mcp/core/research/workflows/chat.py +271 -0
  89. foundry_mcp/core/research/workflows/consensus.py +539 -0
  90. foundry_mcp/core/research/workflows/deep_research.py +4142 -0
  91. foundry_mcp/core/research/workflows/ideate.py +682 -0
  92. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  93. foundry_mcp/core/resilience.py +600 -0
  94. foundry_mcp/core/responses.py +1624 -0
  95. foundry_mcp/core/review.py +366 -0
  96. foundry_mcp/core/security.py +438 -0
  97. foundry_mcp/core/spec.py +4119 -0
  98. foundry_mcp/core/task.py +2463 -0
  99. foundry_mcp/core/testing.py +839 -0
  100. foundry_mcp/core/validation.py +2357 -0
  101. foundry_mcp/dashboard/__init__.py +32 -0
  102. foundry_mcp/dashboard/app.py +119 -0
  103. foundry_mcp/dashboard/components/__init__.py +17 -0
  104. foundry_mcp/dashboard/components/cards.py +88 -0
  105. foundry_mcp/dashboard/components/charts.py +177 -0
  106. foundry_mcp/dashboard/components/filters.py +136 -0
  107. foundry_mcp/dashboard/components/tables.py +195 -0
  108. foundry_mcp/dashboard/data/__init__.py +11 -0
  109. foundry_mcp/dashboard/data/stores.py +433 -0
  110. foundry_mcp/dashboard/launcher.py +300 -0
  111. foundry_mcp/dashboard/views/__init__.py +12 -0
  112. foundry_mcp/dashboard/views/errors.py +217 -0
  113. foundry_mcp/dashboard/views/metrics.py +164 -0
  114. foundry_mcp/dashboard/views/overview.py +96 -0
  115. foundry_mcp/dashboard/views/providers.py +83 -0
  116. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  117. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  118. foundry_mcp/prompts/__init__.py +9 -0
  119. foundry_mcp/prompts/workflows.py +525 -0
  120. foundry_mcp/resources/__init__.py +9 -0
  121. foundry_mcp/resources/specs.py +591 -0
  122. foundry_mcp/schemas/__init__.py +38 -0
  123. foundry_mcp/schemas/intake-schema.json +89 -0
  124. foundry_mcp/schemas/sdd-spec-schema.json +414 -0
  125. foundry_mcp/server.py +150 -0
  126. foundry_mcp/tools/__init__.py +10 -0
  127. foundry_mcp/tools/unified/__init__.py +92 -0
  128. foundry_mcp/tools/unified/authoring.py +3620 -0
  129. foundry_mcp/tools/unified/context_helpers.py +98 -0
  130. foundry_mcp/tools/unified/documentation_helpers.py +268 -0
  131. foundry_mcp/tools/unified/environment.py +1341 -0
  132. foundry_mcp/tools/unified/error.py +479 -0
  133. foundry_mcp/tools/unified/health.py +225 -0
  134. foundry_mcp/tools/unified/journal.py +841 -0
  135. foundry_mcp/tools/unified/lifecycle.py +640 -0
  136. foundry_mcp/tools/unified/metrics.py +777 -0
  137. foundry_mcp/tools/unified/plan.py +876 -0
  138. foundry_mcp/tools/unified/pr.py +294 -0
  139. foundry_mcp/tools/unified/provider.py +589 -0
  140. foundry_mcp/tools/unified/research.py +1283 -0
  141. foundry_mcp/tools/unified/review.py +1042 -0
  142. foundry_mcp/tools/unified/review_helpers.py +314 -0
  143. foundry_mcp/tools/unified/router.py +102 -0
  144. foundry_mcp/tools/unified/server.py +565 -0
  145. foundry_mcp/tools/unified/spec.py +1283 -0
  146. foundry_mcp/tools/unified/task.py +3846 -0
  147. foundry_mcp/tools/unified/test.py +431 -0
  148. foundry_mcp/tools/unified/verification.py +520 -0
  149. foundry_mcp-0.8.22.dist-info/METADATA +344 -0
  150. foundry_mcp-0.8.22.dist-info/RECORD +153 -0
  151. foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
  152. foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
  153. foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1376 @@
1
+ """
2
+ LLM configuration parsing for foundry-mcp.
3
+
4
+ Parses the [llm] section from foundry-mcp.toml to configure LLM provider settings.
5
+
6
+ TOML Configuration Example:
7
+ [llm]
8
+ provider = "openai" # Required: "openai", "anthropic", or "local"
9
+ api_key = "sk-..." # Optional: defaults to env var based on provider
10
+ model = "gpt-4.1" # Optional: provider-specific default
11
+ timeout = 30 # Optional: request timeout in seconds (default: 30)
12
+
13
+ Environment Variables (fallback if not in TOML):
14
+ - FOUNDRY_MCP_LLM_PROVIDER: LLM provider type ("openai", "anthropic", "local")
15
+ - FOUNDRY_MCP_LLM_API_KEY: API key (takes precedence over provider-specific keys)
16
+ - FOUNDRY_MCP_LLM_MODEL: Model identifier
17
+ - FOUNDRY_MCP_LLM_TIMEOUT: Request timeout in seconds
18
+ - FOUNDRY_MCP_LLM_BASE_URL: Custom API base URL
19
+ - FOUNDRY_MCP_LLM_MAX_TOKENS: Default max tokens
20
+ - FOUNDRY_MCP_LLM_TEMPERATURE: Default temperature
21
+ - FOUNDRY_MCP_LLM_ORGANIZATION: Organization ID (OpenAI)
22
+
23
+ Provider-specific API key fallbacks:
24
+ - OPENAI_API_KEY: OpenAI API key (if FOUNDRY_MCP_LLM_API_KEY not set)
25
+ - ANTHROPIC_API_KEY: Anthropic API key (if FOUNDRY_MCP_LLM_API_KEY not set)
26
+ """
27
+
28
+ import logging
29
+ import os
30
+ import re
31
+ from dataclasses import dataclass, field
32
+ from enum import Enum
33
+ from pathlib import Path
34
+ from typing import Optional, Dict, Any, List, Literal
35
+
36
+ try:
37
+ import tomllib
38
+ except ImportError:
39
+ import tomli as tomllib # Python < 3.11 fallback
40
+
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+
45
+ # =============================================================================
46
+ # Provider Specification (Unified Priority Notation)
47
+ # =============================================================================
48
+
49
+
50
+ @dataclass
51
+ class ProviderSpec:
52
+ """Parsed provider specification from hybrid notation.
53
+
54
+ Supports bracket-prefix notation for unified API/CLI provider configuration:
55
+ - [api]openai/gpt-4.1 -> API provider with model
56
+ - [api]anthropic/claude-sonnet-4 -> API provider with model
57
+ - [cli]gemini:pro -> CLI provider with model
58
+ - [cli]claude:opus -> CLI provider with model
59
+ - [cli]opencode:openai/gpt-5.2 -> CLI provider routing to backend
60
+ - [cli]codex -> CLI provider with default model
61
+
62
+ Grammar:
63
+ spec := "[api]" api_spec | "[cli]" cli_spec
64
+ api_spec := provider "/" model
65
+ cli_spec := transport (":" backend "/" model | ":" model | "")
66
+
67
+ Attributes:
68
+ type: Provider type - "api" for direct API calls, "cli" for CLI tools
69
+ provider: Provider/transport identifier (openai, gemini, opencode, etc.)
70
+ backend: Optional backend for CLI routing (openai, anthropic, gemini)
71
+ model: Optional model identifier (gpt-4.1, pro, opus, etc.)
72
+ raw: Original specification string for error messages
73
+ """
74
+
75
+ type: Literal["api", "cli"]
76
+ provider: str
77
+ backend: Optional[str] = None
78
+ model: Optional[str] = None
79
+ raw: str = ""
80
+
81
+ # Known providers for validation
82
+ KNOWN_API_PROVIDERS = {"openai", "anthropic", "local"}
83
+ KNOWN_CLI_PROVIDERS = {"gemini", "codex", "cursor-agent", "opencode", "claude"}
84
+ KNOWN_BACKENDS = {"openai", "anthropic", "gemini", "local"}
85
+
86
+ # Regex patterns for parsing
87
+ _API_PATTERN = re.compile(r"^\[api\]([^/]+)/(.+)$")
88
+ _CLI_FULL_PATTERN = re.compile(r"^\[cli\]([^:]+):([^/]+)/(.+)$") # transport:backend/model
89
+ _CLI_MODEL_PATTERN = re.compile(r"^\[cli\]([^:]+):([^/]+)$") # transport:model
90
+ _CLI_SIMPLE_PATTERN = re.compile(r"^\[cli\]([^:]+)$") # transport only
91
+
92
+ @classmethod
93
+ def parse(cls, spec: str) -> "ProviderSpec":
94
+ """Parse a provider specification string.
95
+
96
+ Args:
97
+ spec: Provider spec in bracket notation (e.g., "[api]openai/gpt-4.1")
98
+
99
+ Returns:
100
+ ProviderSpec instance with parsed components
101
+
102
+ Raises:
103
+ ValueError: If the spec format is invalid
104
+
105
+ Examples:
106
+ >>> ProviderSpec.parse("[api]openai/gpt-4.1")
107
+ ProviderSpec(type='api', provider='openai', model='gpt-4.1')
108
+
109
+ >>> ProviderSpec.parse("[cli]gemini:pro")
110
+ ProviderSpec(type='cli', provider='gemini', model='pro')
111
+
112
+ >>> ProviderSpec.parse("[cli]opencode:openai/gpt-5.2")
113
+ ProviderSpec(type='cli', provider='opencode', backend='openai', model='gpt-5.2')
114
+ """
115
+ spec = spec.strip()
116
+
117
+ if not spec:
118
+ raise ValueError("Provider spec cannot be empty")
119
+
120
+ # Try API pattern: [api]provider/model
121
+ if match := cls._API_PATTERN.match(spec):
122
+ provider, model = match.groups()
123
+ return cls(
124
+ type="api",
125
+ provider=provider.lower(),
126
+ model=model,
127
+ raw=spec,
128
+ )
129
+
130
+ # Try CLI full pattern: [cli]transport:backend/model
131
+ if match := cls._CLI_FULL_PATTERN.match(spec):
132
+ transport, backend, model = match.groups()
133
+ return cls(
134
+ type="cli",
135
+ provider=transport.lower(),
136
+ backend=backend.lower(),
137
+ model=model,
138
+ raw=spec,
139
+ )
140
+
141
+ # Try CLI model pattern: [cli]transport:model
142
+ if match := cls._CLI_MODEL_PATTERN.match(spec):
143
+ transport, model = match.groups()
144
+ return cls(
145
+ type="cli",
146
+ provider=transport.lower(),
147
+ model=model,
148
+ raw=spec,
149
+ )
150
+
151
+ # Try CLI simple pattern: [cli]transport
152
+ if match := cls._CLI_SIMPLE_PATTERN.match(spec):
153
+ transport = match.group(1)
154
+ return cls(
155
+ type="cli",
156
+ provider=transport.lower(),
157
+ raw=spec,
158
+ )
159
+
160
+ # Invalid format
161
+ raise ValueError(
162
+ f"Invalid provider spec '{spec}'. Expected format: "
163
+ "[api]provider/model or [cli]transport[:backend/model|:model]"
164
+ )
165
+
166
+ @classmethod
167
+ def parse_flexible(cls, spec: str) -> "ProviderSpec":
168
+ """Parse with fallback for simple provider IDs."""
169
+ spec = spec.strip()
170
+ if spec.startswith("["):
171
+ return cls.parse(spec)
172
+ return cls(type="cli", provider=spec.lower(), raw=spec)
173
+
174
+ def validate(self) -> List[str]:
175
+ """Validate the provider specification.
176
+
177
+ Returns:
178
+ List of validation error messages (empty if valid)
179
+ """
180
+ errors = []
181
+
182
+ if self.type == "api":
183
+ if self.provider not in self.KNOWN_API_PROVIDERS:
184
+ errors.append(
185
+ f"Unknown API provider '{self.provider}'. "
186
+ f"Known: {sorted(self.KNOWN_API_PROVIDERS)}"
187
+ )
188
+ if not self.model:
189
+ errors.append("API provider spec requires a model")
190
+ else: # cli
191
+ if self.provider not in self.KNOWN_CLI_PROVIDERS:
192
+ errors.append(
193
+ f"Unknown CLI provider '{self.provider}'. "
194
+ f"Known: {sorted(self.KNOWN_CLI_PROVIDERS)}"
195
+ )
196
+ if self.backend and self.backend not in self.KNOWN_BACKENDS:
197
+ errors.append(
198
+ f"Unknown backend '{self.backend}'. "
199
+ f"Known: {sorted(self.KNOWN_BACKENDS)}"
200
+ )
201
+
202
+ return errors
203
+
204
+ def __str__(self) -> str:
205
+ """Return canonical string representation."""
206
+ if self.type == "api":
207
+ return f"[api]{self.provider}/{self.model}"
208
+ elif self.backend:
209
+ return f"[cli]{self.provider}:{self.backend}/{self.model}"
210
+ elif self.model:
211
+ return f"[cli]{self.provider}:{self.model}"
212
+ else:
213
+ return f"[cli]{self.provider}"
214
+
215
+
216
+ class LLMProviderType(str, Enum):
217
+ """Supported LLM provider types."""
218
+
219
+ OPENAI = "openai"
220
+ ANTHROPIC = "anthropic"
221
+ LOCAL = "local"
222
+
223
+
224
+ # Default models per provider
225
+ DEFAULT_MODELS: Dict[LLMProviderType, str] = {
226
+ LLMProviderType.OPENAI: "gpt-4.1",
227
+ LLMProviderType.ANTHROPIC: "claude-sonnet-4-5",
228
+ LLMProviderType.LOCAL: "llama4",
229
+ }
230
+
231
+ # Environment variable names for API keys
232
+ API_KEY_ENV_VARS: Dict[LLMProviderType, str] = {
233
+ LLMProviderType.OPENAI: "OPENAI_API_KEY",
234
+ LLMProviderType.ANTHROPIC: "ANTHROPIC_API_KEY",
235
+ LLMProviderType.LOCAL: "", # Local providers typically don't need keys
236
+ }
237
+
238
+
239
+ @dataclass
240
+ class LLMConfig:
241
+ """LLM configuration parsed from foundry-mcp.toml.
242
+
243
+ Attributes:
244
+ provider: The LLM provider type ("openai", "anthropic", "local")
245
+ api_key: API key for the provider (optional, falls back to env var)
246
+ model: Model identifier (optional, uses provider default)
247
+ timeout: Request timeout in seconds (default: 30)
248
+ base_url: Custom API base URL (optional, for proxies or local servers)
249
+ organization: Organization ID (OpenAI only)
250
+ max_tokens: Default max tokens for responses
251
+ temperature: Default temperature for generation
252
+ """
253
+
254
+ provider: LLMProviderType = LLMProviderType.OPENAI
255
+ api_key: Optional[str] = None
256
+ model: Optional[str] = None
257
+ timeout: int = 30
258
+ base_url: Optional[str] = None
259
+ organization: Optional[str] = None
260
+ max_tokens: int = 1024
261
+ temperature: float = 0.7
262
+
263
+ def get_api_key(self) -> Optional[str]:
264
+ """Get API key, falling back to environment variables if not set.
265
+
266
+ Priority:
267
+ 1. Explicit api_key set in config
268
+ 2. FOUNDRY_MCP_LLM_API_KEY environment variable
269
+ 3. Provider-specific env var (OPENAI_API_KEY, ANTHROPIC_API_KEY)
270
+
271
+ Returns:
272
+ API key string or None if not available
273
+ """
274
+ if self.api_key:
275
+ return self.api_key
276
+
277
+ # Check unified env var first
278
+ if unified_key := os.environ.get("FOUNDRY_MCP_LLM_API_KEY"):
279
+ return unified_key
280
+
281
+ # Fall back to provider-specific env var
282
+ env_var = API_KEY_ENV_VARS.get(self.provider, "")
283
+ if env_var:
284
+ return os.environ.get(env_var)
285
+
286
+ return None
287
+
288
+ def get_model(self) -> str:
289
+ """Get model, falling back to provider default if not set.
290
+
291
+ Returns:
292
+ Model identifier string
293
+ """
294
+ if self.model:
295
+ return self.model
296
+
297
+ return DEFAULT_MODELS.get(self.provider, "gpt-4.1")
298
+
299
+ def validate(self) -> None:
300
+ """Validate the configuration.
301
+
302
+ Raises:
303
+ ValueError: If configuration is invalid
304
+ """
305
+ # Validate timeout
306
+ if self.timeout <= 0:
307
+ raise ValueError(f"timeout must be positive, got {self.timeout}")
308
+
309
+ # Validate max_tokens
310
+ if self.max_tokens <= 0:
311
+ raise ValueError(f"max_tokens must be positive, got {self.max_tokens}")
312
+
313
+ # Validate temperature
314
+ if not 0 <= self.temperature <= 2:
315
+ raise ValueError(f"temperature must be between 0 and 2, got {self.temperature}")
316
+
317
+ # Check API key for non-local providers
318
+ if self.provider != LLMProviderType.LOCAL:
319
+ if not self.get_api_key():
320
+ env_var = API_KEY_ENV_VARS.get(self.provider, "")
321
+ raise ValueError(
322
+ f"API key required for {self.provider.value} provider. "
323
+ f"Set 'api_key' in config or {env_var} environment variable."
324
+ )
325
+
326
+ @classmethod
327
+ def from_toml(cls, path: Path) -> "LLMConfig":
328
+ """Load LLM configuration from a TOML file.
329
+
330
+ Args:
331
+ path: Path to the TOML configuration file
332
+
333
+ Returns:
334
+ LLMConfig instance with parsed settings
335
+
336
+ Raises:
337
+ FileNotFoundError: If the config file doesn't exist
338
+ ValueError: If the configuration is invalid
339
+ """
340
+ if not path.exists():
341
+ raise FileNotFoundError(f"Config file not found: {path}")
342
+
343
+ with open(path, "rb") as f:
344
+ data = tomllib.load(f)
345
+
346
+ return cls.from_dict(data.get("llm", {}))
347
+
348
+ @classmethod
349
+ def from_dict(cls, data: Dict[str, Any]) -> "LLMConfig":
350
+ """Create LLMConfig from a dictionary (typically the [llm] section).
351
+
352
+ Args:
353
+ data: Dictionary with LLM configuration values
354
+
355
+ Returns:
356
+ LLMConfig instance
357
+
358
+ Raises:
359
+ ValueError: If provider type is invalid
360
+ """
361
+ config = cls()
362
+
363
+ # Parse provider
364
+ if "provider" in data:
365
+ provider_str = data["provider"].lower()
366
+ try:
367
+ config.provider = LLMProviderType(provider_str)
368
+ except ValueError:
369
+ valid = [p.value for p in LLMProviderType]
370
+ raise ValueError(
371
+ f"Invalid provider '{provider_str}'. Must be one of: {valid}"
372
+ )
373
+
374
+ # Parse other fields
375
+ if "api_key" in data:
376
+ config.api_key = data["api_key"]
377
+
378
+ if "model" in data:
379
+ config.model = data["model"]
380
+
381
+ if "timeout" in data:
382
+ config.timeout = int(data["timeout"])
383
+
384
+ if "base_url" in data:
385
+ config.base_url = data["base_url"]
386
+
387
+ if "organization" in data:
388
+ config.organization = data["organization"]
389
+
390
+ if "max_tokens" in data:
391
+ config.max_tokens = int(data["max_tokens"])
392
+
393
+ if "temperature" in data:
394
+ config.temperature = float(data["temperature"])
395
+
396
+ return config
397
+
398
+ @classmethod
399
+ def from_env(cls) -> "LLMConfig":
400
+ """Create LLMConfig from environment variables only.
401
+
402
+ Environment variables:
403
+ - FOUNDRY_MCP_LLM_PROVIDER: Provider type ("openai", "anthropic", "local")
404
+ - FOUNDRY_MCP_LLM_API_KEY: API key (unified, takes precedence)
405
+ - FOUNDRY_MCP_LLM_MODEL: Model identifier
406
+ - FOUNDRY_MCP_LLM_TIMEOUT: Request timeout in seconds
407
+ - FOUNDRY_MCP_LLM_BASE_URL: Custom API base URL
408
+ - FOUNDRY_MCP_LLM_MAX_TOKENS: Default max tokens
409
+ - FOUNDRY_MCP_LLM_TEMPERATURE: Default temperature
410
+ - FOUNDRY_MCP_LLM_ORGANIZATION: Organization ID (OpenAI only)
411
+
412
+ Returns:
413
+ LLMConfig instance with environment-based settings
414
+ """
415
+ config = cls()
416
+
417
+ # Provider
418
+ if provider := os.environ.get("FOUNDRY_MCP_LLM_PROVIDER"):
419
+ try:
420
+ config.provider = LLMProviderType(provider.lower())
421
+ except ValueError:
422
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_PROVIDER: {provider}, using default")
423
+
424
+ # API Key (explicit env var, not the provider-specific fallback)
425
+ if api_key := os.environ.get("FOUNDRY_MCP_LLM_API_KEY"):
426
+ config.api_key = api_key
427
+
428
+ # Model
429
+ if model := os.environ.get("FOUNDRY_MCP_LLM_MODEL"):
430
+ config.model = model
431
+
432
+ # Timeout
433
+ if timeout := os.environ.get("FOUNDRY_MCP_LLM_TIMEOUT"):
434
+ try:
435
+ config.timeout = int(timeout)
436
+ except ValueError:
437
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_TIMEOUT: {timeout}, using default")
438
+
439
+ # Base URL
440
+ if base_url := os.environ.get("FOUNDRY_MCP_LLM_BASE_URL"):
441
+ config.base_url = base_url
442
+
443
+ # Max tokens
444
+ if max_tokens := os.environ.get("FOUNDRY_MCP_LLM_MAX_TOKENS"):
445
+ try:
446
+ config.max_tokens = int(max_tokens)
447
+ except ValueError:
448
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_MAX_TOKENS: {max_tokens}, using default")
449
+
450
+ # Temperature
451
+ if temperature := os.environ.get("FOUNDRY_MCP_LLM_TEMPERATURE"):
452
+ try:
453
+ config.temperature = float(temperature)
454
+ except ValueError:
455
+ logger.warning(f"Invalid FOUNDRY_MCP_LLM_TEMPERATURE: {temperature}, using default")
456
+
457
+ # Organization
458
+ if organization := os.environ.get("FOUNDRY_MCP_LLM_ORGANIZATION"):
459
+ config.organization = organization
460
+
461
+ return config
462
+
463
+
464
+ def load_llm_config(
465
+ config_file: Optional[Path] = None,
466
+ use_env_fallback: bool = True,
467
+ ) -> LLMConfig:
468
+ """Load LLM configuration from TOML file with environment fallback.
469
+
470
+ Priority (highest to lowest):
471
+ 1. TOML config file (if provided or found at default locations)
472
+ 2. Environment variables
473
+ 3. Default values
474
+
475
+ Args:
476
+ config_file: Optional path to TOML config file
477
+ use_env_fallback: Whether to use environment variables as fallback
478
+
479
+ Returns:
480
+ LLMConfig instance with merged settings
481
+ """
482
+ config = LLMConfig()
483
+
484
+ # Try to load from TOML
485
+ toml_loaded = False
486
+ if config_file and config_file.exists():
487
+ try:
488
+ config = LLMConfig.from_toml(config_file)
489
+ toml_loaded = True
490
+ logger.debug(f"Loaded LLM config from {config_file}")
491
+ except Exception as e:
492
+ logger.warning(f"Failed to load LLM config from {config_file}: {e}")
493
+ else:
494
+ # Try default locations
495
+ default_paths = [
496
+ Path("foundry-mcp.toml"),
497
+ Path(".foundry-mcp.toml"),
498
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
499
+ ]
500
+ for path in default_paths:
501
+ if path.exists():
502
+ try:
503
+ config = LLMConfig.from_toml(path)
504
+ toml_loaded = True
505
+ logger.debug(f"Loaded LLM config from {path}")
506
+ break
507
+ except Exception as e:
508
+ logger.debug(f"Failed to load from {path}: {e}")
509
+
510
+ # Apply environment variable overrides
511
+ if use_env_fallback:
512
+ env_config = LLMConfig.from_env()
513
+
514
+ # Only override if TOML didn't set the value
515
+ if not toml_loaded or config.provider == LLMProviderType.OPENAI:
516
+ if os.environ.get("FOUNDRY_MCP_LLM_PROVIDER"):
517
+ config.provider = env_config.provider
518
+
519
+ # API key: env overrides if set (explicit FOUNDRY_MCP_LLM_API_KEY)
520
+ if not config.api_key and env_config.api_key:
521
+ config.api_key = env_config.api_key
522
+
523
+ if not config.model and env_config.model:
524
+ config.model = env_config.model
525
+
526
+ if config.timeout == 30 and env_config.timeout != 30:
527
+ config.timeout = env_config.timeout
528
+
529
+ if not config.base_url and env_config.base_url:
530
+ config.base_url = env_config.base_url
531
+
532
+ if config.max_tokens == 1024 and env_config.max_tokens != 1024:
533
+ config.max_tokens = env_config.max_tokens
534
+
535
+ if config.temperature == 0.7 and env_config.temperature != 0.7:
536
+ config.temperature = env_config.temperature
537
+
538
+ if not config.organization and env_config.organization:
539
+ config.organization = env_config.organization
540
+
541
+ return config
542
+
543
+
544
+ # Global configuration instance
545
+ _llm_config: Optional[LLMConfig] = None
546
+
547
+
548
+ def get_llm_config() -> LLMConfig:
549
+ """Get the global LLM configuration instance.
550
+
551
+ Returns:
552
+ LLMConfig instance (loaded from file/env on first call)
553
+ """
554
+ global _llm_config
555
+ if _llm_config is None:
556
+ _llm_config = load_llm_config()
557
+ return _llm_config
558
+
559
+
560
+ def set_llm_config(config: LLMConfig) -> None:
561
+ """Set the global LLM configuration instance.
562
+
563
+ Args:
564
+ config: LLMConfig instance to use globally
565
+ """
566
+ global _llm_config
567
+ _llm_config = config
568
+
569
+
570
+ def reset_llm_config() -> None:
571
+ """Reset the global LLM configuration to None.
572
+
573
+ Useful for testing or reloading configuration.
574
+ """
575
+ global _llm_config
576
+ _llm_config = None
577
+
578
+
579
+ # =============================================================================
580
+ # Workflow Configuration
581
+ # =============================================================================
582
+
583
+
584
+ class WorkflowMode(str, Enum):
585
+ """Workflow execution modes.
586
+
587
+ SINGLE: Execute one task at a time with user approval between tasks
588
+ AUTONOMOUS: Execute all tasks in phase automatically until completion or blocker
589
+ BATCH: Execute a specified number of tasks, then pause for review
590
+ """
591
+
592
+ SINGLE = "single"
593
+ AUTONOMOUS = "autonomous"
594
+ BATCH = "batch"
595
+
596
+
597
+ @dataclass
598
+ class WorkflowConfig:
599
+ """Workflow configuration parsed from foundry-mcp.toml [workflow] section.
600
+
601
+ TOML Configuration Example:
602
+ [workflow]
603
+ mode = "single" # Execution mode: "single", "autonomous", or "batch"
604
+ auto_validate = true # Automatically run validation after task completion
605
+ journal_enabled = true # Enable journaling of task completions
606
+ batch_size = 5 # Number of tasks to execute in batch mode
607
+ context_threshold = 85 # Context usage threshold (%) to trigger pause
608
+
609
+ Environment Variables:
610
+ - FOUNDRY_MCP_WORKFLOW_MODE: Workflow execution mode
611
+ - FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE: Enable auto-validation (true/false)
612
+ - FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED: Enable journaling (true/false)
613
+ - FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: Batch size for batch mode
614
+ - FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: Context threshold percentage
615
+
616
+ Attributes:
617
+ mode: Workflow execution mode
618
+ auto_validate: Whether to run validation after task completion
619
+ journal_enabled: Whether to journal task completions
620
+ batch_size: Number of tasks to execute in batch mode
621
+ context_threshold: Context usage threshold to trigger pause (percentage)
622
+ """
623
+
624
+ mode: WorkflowMode = WorkflowMode.SINGLE
625
+ auto_validate: bool = True
626
+ journal_enabled: bool = True
627
+ batch_size: int = 5
628
+ context_threshold: int = 85
629
+
630
+ def validate(self) -> None:
631
+ """Validate the workflow configuration.
632
+
633
+ Raises:
634
+ ValueError: If configuration is invalid
635
+ """
636
+ if self.batch_size < 1:
637
+ raise ValueError(f"batch_size must be at least 1, got {self.batch_size}")
638
+
639
+ if not 50 <= self.context_threshold <= 100:
640
+ raise ValueError(
641
+ f"context_threshold must be between 50 and 100, got {self.context_threshold}"
642
+ )
643
+
644
+ @classmethod
645
+ def from_dict(cls, data: Dict[str, Any]) -> "WorkflowConfig":
646
+ """Create WorkflowConfig from a dictionary (typically the [workflow] section).
647
+
648
+ Args:
649
+ data: Dictionary with workflow configuration values
650
+
651
+ Returns:
652
+ WorkflowConfig instance
653
+
654
+ Raises:
655
+ ValueError: If mode is invalid
656
+ """
657
+ config = cls()
658
+
659
+ # Parse mode
660
+ if "mode" in data:
661
+ mode_str = data["mode"].lower()
662
+ try:
663
+ config.mode = WorkflowMode(mode_str)
664
+ except ValueError:
665
+ valid = [m.value for m in WorkflowMode]
666
+ raise ValueError(
667
+ f"Invalid workflow mode '{mode_str}'. Must be one of: {valid}"
668
+ )
669
+
670
+ # Parse boolean fields
671
+ if "auto_validate" in data:
672
+ config.auto_validate = bool(data["auto_validate"])
673
+
674
+ if "journal_enabled" in data:
675
+ config.journal_enabled = bool(data["journal_enabled"])
676
+
677
+ # Parse integer fields
678
+ if "batch_size" in data:
679
+ config.batch_size = int(data["batch_size"])
680
+
681
+ if "context_threshold" in data:
682
+ config.context_threshold = int(data["context_threshold"])
683
+
684
+ return config
685
+
686
+ @classmethod
687
+ def from_toml(cls, path: Path) -> "WorkflowConfig":
688
+ """Load workflow configuration from a TOML file.
689
+
690
+ Args:
691
+ path: Path to the TOML configuration file
692
+
693
+ Returns:
694
+ WorkflowConfig instance with parsed settings
695
+
696
+ Raises:
697
+ FileNotFoundError: If the config file doesn't exist
698
+ """
699
+ if not path.exists():
700
+ raise FileNotFoundError(f"Config file not found: {path}")
701
+
702
+ with open(path, "rb") as f:
703
+ data = tomllib.load(f)
704
+
705
+ return cls.from_dict(data.get("workflow", {}))
706
+
707
+ @classmethod
708
+ def from_env(cls) -> "WorkflowConfig":
709
+ """Create WorkflowConfig from environment variables only.
710
+
711
+ Environment variables:
712
+ - FOUNDRY_MCP_WORKFLOW_MODE: Workflow execution mode
713
+ - FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE: Enable auto-validation
714
+ - FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED: Enable journaling
715
+ - FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: Batch size
716
+ - FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: Context threshold
717
+
718
+ Returns:
719
+ WorkflowConfig instance with environment-based settings
720
+ """
721
+ config = cls()
722
+
723
+ # Mode
724
+ if mode := os.environ.get("FOUNDRY_MCP_WORKFLOW_MODE"):
725
+ try:
726
+ config.mode = WorkflowMode(mode.lower())
727
+ except ValueError:
728
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_MODE: {mode}, using default")
729
+
730
+ # Auto-validate
731
+ if auto_validate := os.environ.get("FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE"):
732
+ config.auto_validate = auto_validate.lower() in ("true", "1", "yes")
733
+
734
+ # Journal enabled
735
+ if journal := os.environ.get("FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED"):
736
+ config.journal_enabled = journal.lower() in ("true", "1", "yes")
737
+
738
+ # Batch size
739
+ if batch_size := os.environ.get("FOUNDRY_MCP_WORKFLOW_BATCH_SIZE"):
740
+ try:
741
+ config.batch_size = int(batch_size)
742
+ except ValueError:
743
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_BATCH_SIZE: {batch_size}, using default")
744
+
745
+ # Context threshold
746
+ if threshold := os.environ.get("FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD"):
747
+ try:
748
+ config.context_threshold = int(threshold)
749
+ except ValueError:
750
+ logger.warning(f"Invalid FOUNDRY_MCP_WORKFLOW_CONTEXT_THRESHOLD: {threshold}, using default")
751
+
752
+ return config
753
+
754
+
755
+ def load_workflow_config(
756
+ config_file: Optional[Path] = None,
757
+ use_env_fallback: bool = True,
758
+ ) -> WorkflowConfig:
759
+ """Load workflow configuration from TOML file with environment fallback.
760
+
761
+ Priority (highest to lowest):
762
+ 1. TOML config file (if provided or found at default locations)
763
+ 2. Environment variables
764
+ 3. Default values
765
+
766
+ Args:
767
+ config_file: Optional path to TOML config file
768
+ use_env_fallback: Whether to use environment variables as fallback
769
+
770
+ Returns:
771
+ WorkflowConfig instance with merged settings
772
+ """
773
+ config = WorkflowConfig()
774
+
775
+ # Try to load from TOML
776
+ toml_loaded = False
777
+ if config_file and config_file.exists():
778
+ try:
779
+ config = WorkflowConfig.from_toml(config_file)
780
+ toml_loaded = True
781
+ logger.debug(f"Loaded workflow config from {config_file}")
782
+ except Exception as e:
783
+ logger.warning(f"Failed to load workflow config from {config_file}: {e}")
784
+ else:
785
+ # Try default locations
786
+ default_paths = [
787
+ Path("foundry-mcp.toml"),
788
+ Path(".foundry-mcp.toml"),
789
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
790
+ ]
791
+ for path in default_paths:
792
+ if path.exists():
793
+ try:
794
+ config = WorkflowConfig.from_toml(path)
795
+ toml_loaded = True
796
+ logger.debug(f"Loaded workflow config from {path}")
797
+ break
798
+ except Exception as e:
799
+ logger.debug(f"Failed to load from {path}: {e}")
800
+
801
+ # Apply environment variable overrides
802
+ if use_env_fallback:
803
+ env_config = WorkflowConfig.from_env()
804
+
805
+ # Mode override
806
+ if not toml_loaded or config.mode == WorkflowMode.SINGLE:
807
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_MODE"):
808
+ config.mode = env_config.mode
809
+
810
+ # Boolean overrides (env can override TOML)
811
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_AUTO_VALIDATE"):
812
+ config.auto_validate = env_config.auto_validate
813
+
814
+ if os.environ.get("FOUNDRY_MCP_WORKFLOW_JOURNAL_ENABLED"):
815
+ config.journal_enabled = env_config.journal_enabled
816
+
817
+ # Integer overrides
818
+ if config.batch_size == 5 and env_config.batch_size != 5:
819
+ config.batch_size = env_config.batch_size
820
+
821
+ if config.context_threshold == 85 and env_config.context_threshold != 85:
822
+ config.context_threshold = env_config.context_threshold
823
+
824
+ return config
825
+
826
+
827
+ # Global workflow configuration instance
828
+ _workflow_config: Optional[WorkflowConfig] = None
829
+
830
+
831
+ def get_workflow_config() -> WorkflowConfig:
832
+ """Get the global workflow configuration instance.
833
+
834
+ Returns:
835
+ WorkflowConfig instance (loaded from file/env on first call)
836
+ """
837
+ global _workflow_config
838
+ if _workflow_config is None:
839
+ _workflow_config = load_workflow_config()
840
+ return _workflow_config
841
+
842
+
843
+ def set_workflow_config(config: WorkflowConfig) -> None:
844
+ """Set the global workflow configuration instance.
845
+
846
+ Args:
847
+ config: WorkflowConfig instance to use globally
848
+ """
849
+ global _workflow_config
850
+ _workflow_config = config
851
+
852
+
853
+ def reset_workflow_config() -> None:
854
+ """Reset the global workflow configuration to None.
855
+
856
+ Useful for testing or reloading configuration.
857
+ """
858
+ global _workflow_config
859
+ _workflow_config = None
860
+
861
+
862
+ # =============================================================================
863
+ # Consultation Configuration
864
+ # =============================================================================
865
+
866
+
867
+ @dataclass
868
+ class WorkflowConsultationConfig:
869
+ """Per-workflow consultation configuration overrides.
870
+
871
+ Allows individual workflows to specify minimum model requirements,
872
+ timeout overrides, and default review types for AI consultations.
873
+
874
+ TOML Configuration Example:
875
+ [consultation.workflows.fidelity_review]
876
+ min_models = 2
877
+ timeout_override = 600.0
878
+ default_review_type = "full"
879
+
880
+ [consultation.workflows.plan_review]
881
+ min_models = 3
882
+ default_review_type = "full"
883
+
884
+ Attributes:
885
+ min_models: Minimum number of models required for consensus (default: 1).
886
+ When set > 1, the consultation orchestrator will gather
887
+ responses from multiple providers before synthesizing.
888
+ timeout_override: Optional timeout override in seconds. When set,
889
+ overrides the default_timeout from ConsultationConfig
890
+ for this specific workflow.
891
+ default_review_type: Default review type for this workflow (default: "full").
892
+ Valid values: "quick", "full", "security", "feasibility".
893
+ Used when no explicit review_type is provided in requests.
894
+ """
895
+
896
+ min_models: int = 1
897
+ timeout_override: Optional[float] = None
898
+ default_review_type: str = "full"
899
+
900
+ # Valid review types
901
+ VALID_REVIEW_TYPES = {"quick", "full", "security", "feasibility"}
902
+
903
+ def validate(self) -> None:
904
+ """Validate the workflow consultation configuration.
905
+
906
+ Raises:
907
+ ValueError: If configuration is invalid
908
+ """
909
+ if self.min_models < 1:
910
+ raise ValueError(f"min_models must be at least 1, got {self.min_models}")
911
+
912
+ if self.timeout_override is not None and self.timeout_override <= 0:
913
+ raise ValueError(
914
+ f"timeout_override must be positive if set, got {self.timeout_override}"
915
+ )
916
+
917
+ if self.default_review_type not in self.VALID_REVIEW_TYPES:
918
+ raise ValueError(
919
+ f"default_review_type must be one of {sorted(self.VALID_REVIEW_TYPES)}, "
920
+ f"got '{self.default_review_type}'"
921
+ )
922
+
923
+ @classmethod
924
+ def from_dict(cls, data: Dict[str, Any]) -> "WorkflowConsultationConfig":
925
+ """Create WorkflowConsultationConfig from a dictionary.
926
+
927
+ Args:
928
+ data: Dictionary with workflow consultation configuration values
929
+
930
+ Returns:
931
+ WorkflowConsultationConfig instance
932
+ """
933
+ config = cls()
934
+
935
+ if "min_models" in data:
936
+ config.min_models = int(data["min_models"])
937
+
938
+ if "timeout_override" in data:
939
+ value = data["timeout_override"]
940
+ if value is not None:
941
+ config.timeout_override = float(value)
942
+
943
+ if "default_review_type" in data:
944
+ config.default_review_type = str(data["default_review_type"]).lower()
945
+
946
+ return config
947
+
948
+
949
+ @dataclass
950
+ class ConsultationConfig:
951
+ """AI consultation configuration parsed from foundry-mcp.toml [consultation] section.
952
+
953
+ TOML Configuration Example:
954
+ [consultation]
955
+ # Provider priority list - first available wins
956
+ # Format: "[api]provider/model" or "[cli]transport[:backend/model|:model]"
957
+ priority = [
958
+ "[cli]gemini:pro",
959
+ "[cli]claude:opus",
960
+ "[cli]opencode:openai/gpt-5.2",
961
+ "[api]openai/gpt-4.1",
962
+ ]
963
+
964
+ # Per-provider overrides (optional)
965
+ [consultation.overrides]
966
+ "[cli]opencode:openai/gpt-5.2" = { timeout = 600 }
967
+ "[api]openai/gpt-4.1" = { temperature = 0.3 }
968
+
969
+ # Operational settings
970
+ default_timeout = 300 # Default timeout in seconds (default: 300)
971
+ max_retries = 2 # Max retry attempts on failure (default: 2)
972
+ retry_delay = 5.0 # Delay between retries in seconds (default: 5.0)
973
+ fallback_enabled = true # Enable fallback to next provider (default: true)
974
+ cache_ttl = 3600 # Cache TTL in seconds (default: 3600)
975
+
976
+ # Per-workflow configuration (optional)
977
+ [consultation.workflows.fidelity_review]
978
+ min_models = 2 # Require 2 models for consensus
979
+ timeout_override = 600.0 # Override default timeout
980
+
981
+ [consultation.workflows.plan_review]
982
+ min_models = 3 # Require 3 models for plan reviews
983
+
984
+ Environment Variables:
985
+ - FOUNDRY_MCP_CONSULTATION_TIMEOUT: Default timeout
986
+ - FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: Max retry attempts
987
+ - FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: Delay between retries
988
+ - FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED: Enable provider fallback
989
+ - FOUNDRY_MCP_CONSULTATION_CACHE_TTL: Cache TTL
990
+ - FOUNDRY_MCP_CONSULTATION_PRIORITY: Comma-separated priority list
991
+
992
+ Attributes:
993
+ priority: List of provider specs in priority order (first available wins)
994
+ overrides: Per-provider setting overrides (keyed by spec string)
995
+ default_timeout: Default timeout for AI consultations in seconds
996
+ max_retries: Maximum retry attempts on transient failures
997
+ retry_delay: Delay between retry attempts in seconds
998
+ fallback_enabled: Whether to try next provider on failure
999
+ cache_ttl: Time-to-live for cached consultation results in seconds
1000
+ workflows: Per-workflow configuration overrides (keyed by workflow name)
1001
+ """
1002
+
1003
+ priority: List[str] = field(default_factory=list)
1004
+ overrides: Dict[str, Dict[str, Any]] = field(default_factory=dict)
1005
+ default_timeout: float = 300.0
1006
+ max_retries: int = 2
1007
+ retry_delay: float = 5.0
1008
+ fallback_enabled: bool = True
1009
+ cache_ttl: int = 3600
1010
+ workflows: Dict[str, WorkflowConsultationConfig] = field(default_factory=dict)
1011
+
1012
+ def get_provider_specs(self) -> List[ProviderSpec]:
1013
+ """Parse priority list into ProviderSpec objects.
1014
+
1015
+ Returns:
1016
+ List of parsed ProviderSpec instances
1017
+
1018
+ Raises:
1019
+ ValueError: If any spec in priority list is invalid
1020
+ """
1021
+ return [ProviderSpec.parse(spec) for spec in self.priority]
1022
+
1023
+ def get_override(self, spec: str) -> Dict[str, Any]:
1024
+ """Get override settings for a specific provider spec.
1025
+
1026
+ Args:
1027
+ spec: Provider spec string (e.g., "[api]openai/gpt-4.1")
1028
+
1029
+ Returns:
1030
+ Override dictionary (empty if no overrides configured)
1031
+ """
1032
+ return self.overrides.get(spec, {})
1033
+
1034
+ def get_workflow_config(self, workflow_name: str) -> WorkflowConsultationConfig:
1035
+ """Get configuration for a specific workflow.
1036
+
1037
+ Args:
1038
+ workflow_name: Name of the workflow (e.g., "fidelity_review", "plan_review")
1039
+
1040
+ Returns:
1041
+ WorkflowConsultationConfig for the workflow. Returns a default instance
1042
+ with min_models=1 if no workflow-specific config exists.
1043
+
1044
+ Examples:
1045
+ >>> config = ConsultationConfig()
1046
+ >>> config.workflows["fidelity_review"] = WorkflowConsultationConfig(min_models=2)
1047
+ >>> fidelity = config.get_workflow_config("fidelity_review")
1048
+ >>> fidelity.min_models
1049
+ 2
1050
+ >>> unknown = config.get_workflow_config("unknown_workflow")
1051
+ >>> unknown.min_models
1052
+ 1
1053
+ """
1054
+ return self.workflows.get(workflow_name, WorkflowConsultationConfig())
1055
+
1056
+ def validate(self) -> None:
1057
+ """Validate the consultation configuration.
1058
+
1059
+ Raises:
1060
+ ValueError: If configuration is invalid
1061
+ """
1062
+ if self.default_timeout <= 0:
1063
+ raise ValueError(f"default_timeout must be positive, got {self.default_timeout}")
1064
+
1065
+ if self.max_retries < 0:
1066
+ raise ValueError(f"max_retries must be non-negative, got {self.max_retries}")
1067
+
1068
+ if self.retry_delay < 0:
1069
+ raise ValueError(f"retry_delay must be non-negative, got {self.retry_delay}")
1070
+
1071
+ if self.cache_ttl <= 0:
1072
+ raise ValueError(f"cache_ttl must be positive, got {self.cache_ttl}")
1073
+
1074
+ # Validate priority list
1075
+ all_errors = []
1076
+ for spec_str in self.priority:
1077
+ try:
1078
+ spec = ProviderSpec.parse(spec_str)
1079
+ errors = spec.validate()
1080
+ if errors:
1081
+ all_errors.extend([f"{spec_str}: {e}" for e in errors])
1082
+ except ValueError as e:
1083
+ all_errors.append(f"{spec_str}: {e}")
1084
+
1085
+ if all_errors:
1086
+ raise ValueError("Invalid provider specs in priority list:\n" + "\n".join(all_errors))
1087
+
1088
+ # Validate workflow configurations
1089
+ workflow_errors = []
1090
+ for workflow_name, workflow_config in self.workflows.items():
1091
+ try:
1092
+ workflow_config.validate()
1093
+ except ValueError as e:
1094
+ workflow_errors.append(f"workflows.{workflow_name}: {e}")
1095
+
1096
+ if workflow_errors:
1097
+ raise ValueError("Invalid workflow configurations:\n" + "\n".join(workflow_errors))
1098
+
1099
+ @classmethod
1100
+ def from_dict(cls, data: Dict[str, Any]) -> "ConsultationConfig":
1101
+ """Create ConsultationConfig from a dictionary (typically the [consultation] section).
1102
+
1103
+ Args:
1104
+ data: Dictionary with consultation configuration values
1105
+
1106
+ Returns:
1107
+ ConsultationConfig instance
1108
+ """
1109
+ config = cls()
1110
+
1111
+ # Parse priority list
1112
+ if "priority" in data:
1113
+ priority = data["priority"]
1114
+ if isinstance(priority, list):
1115
+ config.priority = [str(p) for p in priority]
1116
+ else:
1117
+ logger.warning(f"Invalid priority format (expected list): {type(priority)}")
1118
+
1119
+ # Parse overrides
1120
+ if "overrides" in data:
1121
+ overrides = data["overrides"]
1122
+ if isinstance(overrides, dict):
1123
+ config.overrides = {str(k): dict(v) for k, v in overrides.items()}
1124
+ else:
1125
+ logger.warning(f"Invalid overrides format (expected dict): {type(overrides)}")
1126
+
1127
+ if "default_timeout" in data:
1128
+ config.default_timeout = float(data["default_timeout"])
1129
+
1130
+ if "max_retries" in data:
1131
+ config.max_retries = int(data["max_retries"])
1132
+
1133
+ if "retry_delay" in data:
1134
+ config.retry_delay = float(data["retry_delay"])
1135
+
1136
+ if "fallback_enabled" in data:
1137
+ config.fallback_enabled = bool(data["fallback_enabled"])
1138
+
1139
+ if "cache_ttl" in data:
1140
+ config.cache_ttl = int(data["cache_ttl"])
1141
+
1142
+ # Parse workflow configurations
1143
+ if "workflows" in data:
1144
+ workflows = data["workflows"]
1145
+ if isinstance(workflows, dict):
1146
+ for workflow_name, workflow_data in workflows.items():
1147
+ if isinstance(workflow_data, dict):
1148
+ config.workflows[str(workflow_name)] = (
1149
+ WorkflowConsultationConfig.from_dict(workflow_data)
1150
+ )
1151
+ else:
1152
+ logger.warning(
1153
+ f"Invalid workflow config format for '{workflow_name}' "
1154
+ f"(expected dict): {type(workflow_data)}"
1155
+ )
1156
+ else:
1157
+ logger.warning(f"Invalid workflows format (expected dict): {type(workflows)}")
1158
+
1159
+ return config
1160
+
1161
+ @classmethod
1162
+ def from_toml(cls, path: Path) -> "ConsultationConfig":
1163
+ """Load consultation configuration from a TOML file.
1164
+
1165
+ Args:
1166
+ path: Path to the TOML configuration file
1167
+
1168
+ Returns:
1169
+ ConsultationConfig instance with parsed settings
1170
+
1171
+ Raises:
1172
+ FileNotFoundError: If the config file doesn't exist
1173
+ """
1174
+ if not path.exists():
1175
+ raise FileNotFoundError(f"Config file not found: {path}")
1176
+
1177
+ with open(path, "rb") as f:
1178
+ data = tomllib.load(f)
1179
+
1180
+ return cls.from_dict(data.get("consultation", {}))
1181
+
1182
+ @classmethod
1183
+ def from_env(cls) -> "ConsultationConfig":
1184
+ """Create ConsultationConfig from environment variables only.
1185
+
1186
+ Environment variables:
1187
+ - FOUNDRY_MCP_CONSULTATION_PRIORITY: Comma-separated priority list
1188
+ - FOUNDRY_MCP_CONSULTATION_TIMEOUT: Default timeout in seconds
1189
+ - FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: Max retry attempts
1190
+ - FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: Delay between retries
1191
+ - FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED: Enable fallback (true/false)
1192
+ - FOUNDRY_MCP_CONSULTATION_CACHE_TTL: Cache TTL in seconds
1193
+
1194
+ Returns:
1195
+ ConsultationConfig instance with environment-based settings
1196
+ """
1197
+ config = cls()
1198
+
1199
+ # Priority list (comma-separated)
1200
+ if priority := os.environ.get("FOUNDRY_MCP_CONSULTATION_PRIORITY"):
1201
+ config.priority = [p.strip() for p in priority.split(",") if p.strip()]
1202
+
1203
+ # Timeout
1204
+ if timeout := os.environ.get("FOUNDRY_MCP_CONSULTATION_TIMEOUT"):
1205
+ try:
1206
+ config.default_timeout = float(timeout)
1207
+ except ValueError:
1208
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_TIMEOUT: {timeout}, using default")
1209
+
1210
+ # Max retries
1211
+ if max_retries := os.environ.get("FOUNDRY_MCP_CONSULTATION_MAX_RETRIES"):
1212
+ try:
1213
+ config.max_retries = int(max_retries)
1214
+ except ValueError:
1215
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_MAX_RETRIES: {max_retries}, using default")
1216
+
1217
+ # Retry delay
1218
+ if retry_delay := os.environ.get("FOUNDRY_MCP_CONSULTATION_RETRY_DELAY"):
1219
+ try:
1220
+ config.retry_delay = float(retry_delay)
1221
+ except ValueError:
1222
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_RETRY_DELAY: {retry_delay}, using default")
1223
+
1224
+ # Fallback enabled
1225
+ if fallback := os.environ.get("FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED"):
1226
+ config.fallback_enabled = fallback.lower() in ("true", "1", "yes")
1227
+
1228
+ # Cache TTL
1229
+ if cache_ttl := os.environ.get("FOUNDRY_MCP_CONSULTATION_CACHE_TTL"):
1230
+ try:
1231
+ config.cache_ttl = int(cache_ttl)
1232
+ except ValueError:
1233
+ logger.warning(f"Invalid FOUNDRY_MCP_CONSULTATION_CACHE_TTL: {cache_ttl}, using default")
1234
+
1235
+ return config
1236
+
1237
+
1238
+ def load_consultation_config(
1239
+ config_file: Optional[Path] = None,
1240
+ use_env_fallback: bool = True,
1241
+ ) -> ConsultationConfig:
1242
+ """Load consultation configuration from TOML file with environment fallback.
1243
+
1244
+ Priority (highest to lowest):
1245
+ 1. TOML config file (if provided or found at default locations)
1246
+ 2. Environment variables
1247
+ 3. Default values
1248
+
1249
+ Args:
1250
+ config_file: Optional path to TOML config file
1251
+ use_env_fallback: Whether to use environment variables as fallback
1252
+
1253
+ Returns:
1254
+ ConsultationConfig instance with merged settings
1255
+ """
1256
+ config = ConsultationConfig()
1257
+
1258
+ # Try to load from TOML
1259
+ if config_file and config_file.exists():
1260
+ try:
1261
+ config = ConsultationConfig.from_toml(config_file)
1262
+ logger.debug(f"Loaded consultation config from {config_file}")
1263
+ except Exception as e:
1264
+ logger.warning(f"Failed to load consultation config from {config_file}: {e}")
1265
+ else:
1266
+ # Try default locations
1267
+ default_paths = [
1268
+ Path("foundry-mcp.toml"),
1269
+ Path(".foundry-mcp.toml"),
1270
+ Path.home() / ".config" / "foundry-mcp" / "config.toml",
1271
+ ]
1272
+ for path in default_paths:
1273
+ if path.exists():
1274
+ try:
1275
+ config = ConsultationConfig.from_toml(path)
1276
+ logger.debug(f"Loaded consultation config from {path}")
1277
+ break
1278
+ except Exception as e:
1279
+ logger.debug(f"Failed to load from {path}: {e}")
1280
+
1281
+ # Apply environment variable overrides
1282
+ if use_env_fallback:
1283
+ env_config = ConsultationConfig.from_env()
1284
+
1285
+ # Priority override (env can override TOML if set)
1286
+ if not config.priority and env_config.priority:
1287
+ config.priority = env_config.priority
1288
+ elif os.environ.get("FOUNDRY_MCP_CONSULTATION_PRIORITY"):
1289
+ # Explicit env var overrides TOML
1290
+ config.priority = env_config.priority
1291
+
1292
+ # Timeout override
1293
+ if config.default_timeout == 300.0 and env_config.default_timeout != 300.0:
1294
+ config.default_timeout = env_config.default_timeout
1295
+
1296
+ # Max retries override
1297
+ if config.max_retries == 2 and env_config.max_retries != 2:
1298
+ config.max_retries = env_config.max_retries
1299
+
1300
+ # Retry delay override
1301
+ if config.retry_delay == 5.0 and env_config.retry_delay != 5.0:
1302
+ config.retry_delay = env_config.retry_delay
1303
+
1304
+ # Fallback enabled (env can override TOML)
1305
+ if os.environ.get("FOUNDRY_MCP_CONSULTATION_FALLBACK_ENABLED"):
1306
+ config.fallback_enabled = env_config.fallback_enabled
1307
+
1308
+ # Cache TTL override
1309
+ if config.cache_ttl == 3600 and env_config.cache_ttl != 3600:
1310
+ config.cache_ttl = env_config.cache_ttl
1311
+
1312
+ return config
1313
+
1314
+
1315
+ # Global consultation configuration instance
1316
+ _consultation_config: Optional[ConsultationConfig] = None
1317
+
1318
+
1319
+ def get_consultation_config() -> ConsultationConfig:
1320
+ """Get the global consultation configuration instance.
1321
+
1322
+ Returns:
1323
+ ConsultationConfig instance (loaded from file/env on first call)
1324
+ """
1325
+ global _consultation_config
1326
+ if _consultation_config is None:
1327
+ _consultation_config = load_consultation_config()
1328
+ return _consultation_config
1329
+
1330
+
1331
+ def set_consultation_config(config: ConsultationConfig) -> None:
1332
+ """Set the global consultation configuration instance.
1333
+
1334
+ Args:
1335
+ config: ConsultationConfig instance to use globally
1336
+ """
1337
+ global _consultation_config
1338
+ _consultation_config = config
1339
+
1340
+
1341
+ def reset_consultation_config() -> None:
1342
+ """Reset the global consultation configuration to None.
1343
+
1344
+ Useful for testing or reloading configuration.
1345
+ """
1346
+ global _consultation_config
1347
+ _consultation_config = None
1348
+
1349
+
1350
+ __all__ = [
1351
+ # Provider Spec (unified priority notation)
1352
+ "ProviderSpec",
1353
+ # LLM Config
1354
+ "LLMProviderType",
1355
+ "LLMConfig",
1356
+ "load_llm_config",
1357
+ "get_llm_config",
1358
+ "set_llm_config",
1359
+ "reset_llm_config",
1360
+ "DEFAULT_MODELS",
1361
+ "API_KEY_ENV_VARS",
1362
+ # Workflow Config
1363
+ "WorkflowMode",
1364
+ "WorkflowConfig",
1365
+ "load_workflow_config",
1366
+ "get_workflow_config",
1367
+ "set_workflow_config",
1368
+ "reset_workflow_config",
1369
+ # Consultation Config
1370
+ "WorkflowConsultationConfig",
1371
+ "ConsultationConfig",
1372
+ "load_consultation_config",
1373
+ "get_consultation_config",
1374
+ "set_consultation_config",
1375
+ "reset_consultation_config",
1376
+ ]