foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +615 -11
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +7 -7
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +28 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +51 -48
- foundry_mcp/core/providers/codex.py +70 -60
- foundry_mcp/core/providers/cursor_agent.py +25 -47
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +69 -58
- foundry_mcp/core/providers/opencode.py +101 -47
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1220 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +690 -0
- foundry_mcp/core/spec.py +2439 -236
- foundry_mcp/core/task.py +1205 -31
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +319 -43
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +0 -14
- foundry_mcp/tools/unified/__init__.py +39 -18
- foundry_mcp/tools/unified/authoring.py +2371 -248
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +434 -32
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +133 -2
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +374 -17
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1664 -30
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
- foundry_mcp-0.8.10.dist-info/RECORD +153 -0
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- foundry_mcp-0.3.3.dist-info/RECORD +0 -135
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -44,7 +44,7 @@ import hashlib
|
|
|
44
44
|
import json
|
|
45
45
|
import logging
|
|
46
46
|
import time
|
|
47
|
-
from dataclasses import dataclass, field
|
|
47
|
+
from dataclasses import dataclass, field, replace
|
|
48
48
|
from enum import Enum
|
|
49
49
|
from pathlib import Path
|
|
50
50
|
from typing import Any, Dict, List, Optional, Sequence, Union
|
|
@@ -1305,6 +1305,143 @@ class ConsultationOrchestrator:
|
|
|
1305
1305
|
warnings=warnings,
|
|
1306
1306
|
)
|
|
1307
1307
|
|
|
1308
|
+
async def _execute_parallel_providers_with_fallback_async(
|
|
1309
|
+
self,
|
|
1310
|
+
request: ConsultationRequest,
|
|
1311
|
+
prompt: str,
|
|
1312
|
+
all_providers: List[ResolvedProvider],
|
|
1313
|
+
min_models: int = 1,
|
|
1314
|
+
) -> ConsensusResult:
|
|
1315
|
+
"""
|
|
1316
|
+
Execute providers in parallel with sequential fallback on failures.
|
|
1317
|
+
|
|
1318
|
+
Uses a two-phase approach:
|
|
1319
|
+
1. Execute first min_models providers in parallel
|
|
1320
|
+
2. If any fail and fallback_enabled, try remaining providers sequentially
|
|
1321
|
+
until min_models succeed or providers exhausted
|
|
1322
|
+
|
|
1323
|
+
Args:
|
|
1324
|
+
request: The consultation request
|
|
1325
|
+
prompt: The rendered prompt
|
|
1326
|
+
all_providers: Complete priority list of providers to try
|
|
1327
|
+
min_models: Minimum successful models required
|
|
1328
|
+
|
|
1329
|
+
Returns:
|
|
1330
|
+
ConsensusResult with all attempted provider responses
|
|
1331
|
+
"""
|
|
1332
|
+
start_time = time.time()
|
|
1333
|
+
warnings: List[str] = []
|
|
1334
|
+
all_responses: List[ProviderResponse] = []
|
|
1335
|
+
|
|
1336
|
+
if not all_providers:
|
|
1337
|
+
return ConsensusResult(
|
|
1338
|
+
workflow=request.workflow,
|
|
1339
|
+
responses=[],
|
|
1340
|
+
duration_ms=0.0,
|
|
1341
|
+
warnings=["No providers available for parallel execution"],
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
# Phase 1: Initial parallel execution of first min_models providers
|
|
1345
|
+
initial_providers = all_providers[:min_models]
|
|
1346
|
+
logger.debug(
|
|
1347
|
+
f"Phase 1: Executing {len(initial_providers)} providers in parallel"
|
|
1348
|
+
)
|
|
1349
|
+
|
|
1350
|
+
tasks = [
|
|
1351
|
+
self._execute_single_provider_async(request, prompt, resolved)
|
|
1352
|
+
for resolved in initial_providers
|
|
1353
|
+
]
|
|
1354
|
+
initial_responses: List[ProviderResponse] = await asyncio.gather(*tasks)
|
|
1355
|
+
all_responses.extend(initial_responses)
|
|
1356
|
+
|
|
1357
|
+
# Count successes and log failures
|
|
1358
|
+
# A response is only truly successful if it has non-empty content
|
|
1359
|
+
successful_count = sum(
|
|
1360
|
+
1 for r in initial_responses if r.success and r.content.strip()
|
|
1361
|
+
)
|
|
1362
|
+
for response in initial_responses:
|
|
1363
|
+
if not response.success:
|
|
1364
|
+
warnings.append(
|
|
1365
|
+
f"Provider {response.provider_id} failed: {response.error}"
|
|
1366
|
+
)
|
|
1367
|
+
elif not response.content.strip():
|
|
1368
|
+
warnings.append(
|
|
1369
|
+
f"Provider {response.provider_id} returned empty content"
|
|
1370
|
+
)
|
|
1371
|
+
|
|
1372
|
+
# Phase 2: Sequential fallback if needed and enabled
|
|
1373
|
+
if successful_count < min_models and self._config.fallback_enabled:
|
|
1374
|
+
needed = min_models - successful_count
|
|
1375
|
+
remaining_providers = all_providers[min_models:]
|
|
1376
|
+
|
|
1377
|
+
if remaining_providers:
|
|
1378
|
+
warnings.append(
|
|
1379
|
+
f"Initial parallel execution yielded {successful_count}/{min_models} "
|
|
1380
|
+
f"successes, attempting fallback for {needed} more"
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
for fallback_provider in remaining_providers:
|
|
1384
|
+
# Skip if already tried (shouldn't happen, but safety check)
|
|
1385
|
+
if any(
|
|
1386
|
+
r.provider_id == fallback_provider.provider_id
|
|
1387
|
+
for r in all_responses
|
|
1388
|
+
):
|
|
1389
|
+
continue
|
|
1390
|
+
|
|
1391
|
+
# Check if provider is available
|
|
1392
|
+
if not check_provider_available(fallback_provider.provider_id):
|
|
1393
|
+
warnings.append(
|
|
1394
|
+
f"Fallback provider {fallback_provider.provider_id} "
|
|
1395
|
+
"is not available, skipping"
|
|
1396
|
+
)
|
|
1397
|
+
continue
|
|
1398
|
+
|
|
1399
|
+
logger.debug(
|
|
1400
|
+
f"Fallback attempt: trying provider {fallback_provider.provider_id}"
|
|
1401
|
+
)
|
|
1402
|
+
|
|
1403
|
+
response = await self._execute_single_provider_async(
|
|
1404
|
+
request, prompt, fallback_provider
|
|
1405
|
+
)
|
|
1406
|
+
all_responses.append(response)
|
|
1407
|
+
|
|
1408
|
+
if response.success and response.content.strip():
|
|
1409
|
+
successful_count += 1
|
|
1410
|
+
warnings.append(
|
|
1411
|
+
f"Fallback provider {fallback_provider.provider_id} succeeded"
|
|
1412
|
+
)
|
|
1413
|
+
if successful_count >= min_models:
|
|
1414
|
+
logger.debug(
|
|
1415
|
+
f"Reached {min_models} successful providers via fallback"
|
|
1416
|
+
)
|
|
1417
|
+
break
|
|
1418
|
+
elif response.success and not response.content.strip():
|
|
1419
|
+
warnings.append(
|
|
1420
|
+
f"Fallback provider {fallback_provider.provider_id} "
|
|
1421
|
+
"returned empty content"
|
|
1422
|
+
)
|
|
1423
|
+
else:
|
|
1424
|
+
warnings.append(
|
|
1425
|
+
f"Fallback provider {fallback_provider.provider_id} "
|
|
1426
|
+
f"failed: {response.error}"
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1429
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
1430
|
+
|
|
1431
|
+
# Final warning if still insufficient
|
|
1432
|
+
if successful_count < min_models:
|
|
1433
|
+
warnings.append(
|
|
1434
|
+
f"Only {successful_count} of {min_models} required models succeeded "
|
|
1435
|
+
f"after trying {len(all_responses)} provider(s)"
|
|
1436
|
+
)
|
|
1437
|
+
|
|
1438
|
+
return ConsensusResult(
|
|
1439
|
+
workflow=request.workflow,
|
|
1440
|
+
responses=all_responses,
|
|
1441
|
+
duration_ms=duration_ms,
|
|
1442
|
+
warnings=warnings,
|
|
1443
|
+
)
|
|
1444
|
+
|
|
1308
1445
|
def _execute_with_fallback(
|
|
1309
1446
|
self,
|
|
1310
1447
|
request: ConsultationRequest,
|
|
@@ -1490,6 +1627,10 @@ class ConsultationOrchestrator:
|
|
|
1490
1627
|
workflow_config = self._config.get_workflow_config(effective_workflow)
|
|
1491
1628
|
min_models = workflow_config.min_models
|
|
1492
1629
|
|
|
1630
|
+
# Apply workflow-specific timeout override if configured
|
|
1631
|
+
if workflow_config.timeout_override is not None:
|
|
1632
|
+
request = replace(request, timeout=workflow_config.timeout_override)
|
|
1633
|
+
|
|
1493
1634
|
# Generate cache key
|
|
1494
1635
|
cache_key = self._generate_cache_key(request)
|
|
1495
1636
|
|
|
@@ -1533,14 +1674,10 @@ class ConsultationOrchestrator:
|
|
|
1533
1674
|
providers = self._get_providers_to_try(request)
|
|
1534
1675
|
|
|
1535
1676
|
if min_models > 1:
|
|
1536
|
-
# Multi-model mode: execute providers in parallel
|
|
1537
|
-
#
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
)
|
|
1541
|
-
|
|
1542
|
-
result = await self._execute_parallel_providers_async(
|
|
1543
|
-
request, prompt, providers_to_use, min_models
|
|
1677
|
+
# Multi-model mode: execute providers in parallel with fallback support
|
|
1678
|
+
# Pass full provider list - fallback will try additional providers if needed
|
|
1679
|
+
result = await self._execute_parallel_providers_with_fallback_async(
|
|
1680
|
+
request, prompt, providers, min_models
|
|
1544
1681
|
)
|
|
1545
1682
|
return result
|
|
1546
1683
|
else:
|