foundry-mcp 0.7.0__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/config.py +381 -7
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +1 -1
- foundry_mcp/core/llm_config.py +8 -0
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +45 -1
- foundry_mcp/core/providers/codex.py +64 -3
- foundry_mcp/core/providers/cursor_agent.py +22 -3
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +63 -1
- foundry_mcp/core/providers/opencode.py +95 -71
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/memory.py +103 -0
- foundry_mcp/core/research/models.py +783 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +5 -2
- foundry_mcp/core/research/workflows/base.py +106 -12
- foundry_mcp/core/research/workflows/consensus.py +160 -17
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/responses.py +240 -0
- foundry_mcp/core/spec.py +1 -0
- foundry_mcp/core/task.py +141 -12
- foundry_mcp/core/validation.py +6 -1
- foundry_mcp/server.py +0 -52
- foundry_mcp/tools/unified/__init__.py +37 -18
- foundry_mcp/tools/unified/authoring.py +0 -33
- foundry_mcp/tools/unified/environment.py +202 -29
- foundry_mcp/tools/unified/plan.py +20 -1
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +644 -19
- foundry_mcp/tools/unified/review.py +5 -2
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/task.py +528 -9
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +2 -1
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/RECORD +52 -46
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -6,9 +6,12 @@ synthesis strategies for combining responses.
|
|
|
6
6
|
|
|
7
7
|
import asyncio
|
|
8
8
|
import logging
|
|
9
|
+
import time
|
|
10
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
9
11
|
from typing import Any, Optional
|
|
10
12
|
|
|
11
13
|
from foundry_mcp.config import ResearchConfig
|
|
14
|
+
from foundry_mcp.core.llm_config import ProviderSpec
|
|
12
15
|
from foundry_mcp.core.providers import ProviderHooks, ProviderRequest, ProviderStatus
|
|
13
16
|
from foundry_mcp.core.providers.registry import available_providers, resolve_provider
|
|
14
17
|
from foundry_mcp.core.research.memory import ResearchMemory
|
|
@@ -75,18 +78,36 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
75
78
|
Returns:
|
|
76
79
|
WorkflowResult with synthesized or combined response
|
|
77
80
|
"""
|
|
78
|
-
# Resolve providers
|
|
79
|
-
|
|
81
|
+
# Resolve providers - parse specs and check availability
|
|
82
|
+
provider_specs = providers or self.config.consensus_providers
|
|
80
83
|
available = available_providers()
|
|
81
|
-
valid_providers = [p for p in provider_ids if p in available]
|
|
82
84
|
|
|
83
|
-
|
|
85
|
+
# Parse each provider spec and filter by availability
|
|
86
|
+
valid_specs: list[ProviderSpec] = []
|
|
87
|
+
for spec_str in provider_specs:
|
|
88
|
+
try:
|
|
89
|
+
spec = ProviderSpec.parse_flexible(spec_str)
|
|
90
|
+
if spec.provider in available:
|
|
91
|
+
valid_specs.append(spec)
|
|
92
|
+
else:
|
|
93
|
+
logger.warning(
|
|
94
|
+
"Provider %s (from spec '%s') not available",
|
|
95
|
+
spec.provider,
|
|
96
|
+
spec_str,
|
|
97
|
+
)
|
|
98
|
+
except ValueError as exc:
|
|
99
|
+
logger.warning("Invalid provider spec '%s': %s", spec_str, exc)
|
|
100
|
+
|
|
101
|
+
if not valid_specs:
|
|
84
102
|
return WorkflowResult(
|
|
85
103
|
success=False,
|
|
86
104
|
content="",
|
|
87
|
-
error=f"No valid providers available. Requested: {
|
|
105
|
+
error=f"No valid providers available. Requested: {provider_specs}, Available: {available}",
|
|
88
106
|
)
|
|
89
107
|
|
|
108
|
+
# Use full spec strings for tracking, but we'll parse again when resolving
|
|
109
|
+
valid_providers = [spec.raw or f"{spec.provider}:{spec.model}" if spec.model else spec.provider for spec in valid_specs]
|
|
110
|
+
|
|
90
111
|
# Create consensus config and state
|
|
91
112
|
consensus_config = ConsensusConfig(
|
|
92
113
|
providers=valid_providers,
|
|
@@ -104,16 +125,15 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
104
125
|
system_prompt=system_prompt,
|
|
105
126
|
)
|
|
106
127
|
|
|
107
|
-
# Execute parallel requests
|
|
128
|
+
# Execute parallel requests using ThreadPoolExecutor
|
|
129
|
+
# This avoids asyncio.run() conflicts with MCP server's event loop
|
|
108
130
|
try:
|
|
109
|
-
responses =
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
max_concurrent=max_concurrent,
|
|
116
|
-
)
|
|
131
|
+
responses = self._execute_parallel_sync(
|
|
132
|
+
prompt=prompt,
|
|
133
|
+
providers=valid_providers,
|
|
134
|
+
system_prompt=system_prompt,
|
|
135
|
+
timeout=timeout_per_provider,
|
|
136
|
+
max_concurrent=max_concurrent,
|
|
117
137
|
)
|
|
118
138
|
except Exception as exc:
|
|
119
139
|
logger.error("Parallel execution failed: %s", exc)
|
|
@@ -167,6 +187,124 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
167
187
|
|
|
168
188
|
return result
|
|
169
189
|
|
|
190
|
+
def _execute_parallel_sync(
|
|
191
|
+
self,
|
|
192
|
+
prompt: str,
|
|
193
|
+
providers: list[str],
|
|
194
|
+
system_prompt: Optional[str],
|
|
195
|
+
timeout: float,
|
|
196
|
+
max_concurrent: int,
|
|
197
|
+
) -> list[ModelResponse]:
|
|
198
|
+
"""Execute requests to multiple providers in parallel using ThreadPoolExecutor.
|
|
199
|
+
|
|
200
|
+
This approach avoids asyncio.run() conflicts when called from within
|
|
201
|
+
an MCP server's event loop.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
prompt: User prompt
|
|
205
|
+
providers: Provider IDs to query
|
|
206
|
+
system_prompt: Optional system prompt
|
|
207
|
+
timeout: Timeout per provider
|
|
208
|
+
max_concurrent: Max concurrent requests
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
List of ModelResponse objects
|
|
212
|
+
"""
|
|
213
|
+
responses: list[ModelResponse] = []
|
|
214
|
+
|
|
215
|
+
with ThreadPoolExecutor(max_workers=max_concurrent) as executor:
|
|
216
|
+
# Submit all provider queries
|
|
217
|
+
future_to_provider = {
|
|
218
|
+
executor.submit(
|
|
219
|
+
self._query_provider_sync,
|
|
220
|
+
provider_id,
|
|
221
|
+
prompt,
|
|
222
|
+
system_prompt,
|
|
223
|
+
timeout,
|
|
224
|
+
): provider_id
|
|
225
|
+
for provider_id in providers
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# Collect results as they complete
|
|
229
|
+
for future in as_completed(future_to_provider, timeout=timeout * len(providers)):
|
|
230
|
+
provider_id = future_to_provider[future]
|
|
231
|
+
try:
|
|
232
|
+
response = future.result()
|
|
233
|
+
responses.append(response)
|
|
234
|
+
except Exception as exc:
|
|
235
|
+
responses.append(
|
|
236
|
+
ModelResponse(
|
|
237
|
+
provider_id=provider_id,
|
|
238
|
+
content="",
|
|
239
|
+
success=False,
|
|
240
|
+
error_message=str(exc),
|
|
241
|
+
)
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return responses
|
|
245
|
+
|
|
246
|
+
def _query_provider_sync(
|
|
247
|
+
self,
|
|
248
|
+
provider_id: str,
|
|
249
|
+
prompt: str,
|
|
250
|
+
system_prompt: Optional[str],
|
|
251
|
+
timeout: float,
|
|
252
|
+
) -> ModelResponse:
|
|
253
|
+
"""Query a single provider synchronously.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
provider_id: Provider ID or full spec (e.g., "[cli]codex:gpt-5.2")
|
|
257
|
+
prompt: User prompt
|
|
258
|
+
system_prompt: Optional system prompt
|
|
259
|
+
timeout: Request timeout
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
ModelResponse with result or error
|
|
263
|
+
"""
|
|
264
|
+
start_time = time.perf_counter()
|
|
265
|
+
|
|
266
|
+
try:
|
|
267
|
+
# Parse provider spec to extract base ID and model
|
|
268
|
+
spec = ProviderSpec.parse_flexible(provider_id)
|
|
269
|
+
provider = resolve_provider(spec.provider, hooks=ProviderHooks(), model=spec.model)
|
|
270
|
+
request = ProviderRequest(
|
|
271
|
+
prompt=prompt,
|
|
272
|
+
system_prompt=system_prompt,
|
|
273
|
+
timeout=timeout,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
result = provider.generate(request)
|
|
277
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
278
|
+
|
|
279
|
+
if result.status != ProviderStatus.SUCCESS:
|
|
280
|
+
return ModelResponse(
|
|
281
|
+
provider_id=provider_id,
|
|
282
|
+
model_used=result.model_used,
|
|
283
|
+
content=result.content or "",
|
|
284
|
+
success=False,
|
|
285
|
+
error_message=f"Provider returned status: {result.status.value}",
|
|
286
|
+
duration_ms=duration_ms,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
return ModelResponse(
|
|
290
|
+
provider_id=provider_id,
|
|
291
|
+
model_used=result.model_used,
|
|
292
|
+
content=result.content,
|
|
293
|
+
success=True,
|
|
294
|
+
tokens_used=result.tokens.total_tokens if result.tokens else None,
|
|
295
|
+
duration_ms=duration_ms,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
except Exception as exc:
|
|
299
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
300
|
+
return ModelResponse(
|
|
301
|
+
provider_id=provider_id,
|
|
302
|
+
content="",
|
|
303
|
+
success=False,
|
|
304
|
+
error_message=str(exc),
|
|
305
|
+
duration_ms=duration_ms,
|
|
306
|
+
)
|
|
307
|
+
|
|
170
308
|
async def _execute_parallel(
|
|
171
309
|
self,
|
|
172
310
|
prompt: str,
|
|
@@ -175,7 +313,10 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
175
313
|
timeout: float,
|
|
176
314
|
max_concurrent: int,
|
|
177
315
|
) -> list[ModelResponse]:
|
|
178
|
-
"""Execute requests to multiple providers in parallel.
|
|
316
|
+
"""Execute requests to multiple providers in parallel (async version).
|
|
317
|
+
|
|
318
|
+
Note: This async method is kept for potential future use but the sync
|
|
319
|
+
version (_execute_parallel_sync) is preferred to avoid event loop conflicts.
|
|
179
320
|
|
|
180
321
|
Args:
|
|
181
322
|
prompt: User prompt
|
|
@@ -228,7 +369,7 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
228
369
|
"""Query a single provider asynchronously.
|
|
229
370
|
|
|
230
371
|
Args:
|
|
231
|
-
provider_id: Provider
|
|
372
|
+
provider_id: Provider ID or full spec (e.g., "[cli]codex:gpt-5.2")
|
|
232
373
|
prompt: User prompt
|
|
233
374
|
system_prompt: Optional system prompt
|
|
234
375
|
timeout: Request timeout
|
|
@@ -241,7 +382,9 @@ class ConsensusWorkflow(ResearchWorkflowBase):
|
|
|
241
382
|
start_time = time.perf_counter()
|
|
242
383
|
|
|
243
384
|
try:
|
|
244
|
-
provider
|
|
385
|
+
# Parse provider spec to extract base ID and model
|
|
386
|
+
spec = ProviderSpec.parse_flexible(provider_id)
|
|
387
|
+
provider = resolve_provider(spec.provider, hooks=ProviderHooks(), model=spec.model)
|
|
245
388
|
request = ProviderRequest(
|
|
246
389
|
prompt=prompt,
|
|
247
390
|
system_prompt=system_prompt,
|