foundry-mcp 0.7.0__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/config.py +381 -7
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +1 -1
- foundry_mcp/core/llm_config.py +8 -0
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +45 -1
- foundry_mcp/core/providers/codex.py +64 -3
- foundry_mcp/core/providers/cursor_agent.py +22 -3
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +63 -1
- foundry_mcp/core/providers/opencode.py +95 -71
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/memory.py +103 -0
- foundry_mcp/core/research/models.py +783 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +5 -2
- foundry_mcp/core/research/workflows/base.py +106 -12
- foundry_mcp/core/research/workflows/consensus.py +160 -17
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/responses.py +240 -0
- foundry_mcp/core/spec.py +1 -0
- foundry_mcp/core/task.py +141 -12
- foundry_mcp/core/validation.py +6 -1
- foundry_mcp/server.py +0 -52
- foundry_mcp/tools/unified/__init__.py +37 -18
- foundry_mcp/tools/unified/authoring.py +0 -33
- foundry_mcp/tools/unified/environment.py +202 -29
- foundry_mcp/tools/unified/plan.py +20 -1
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +644 -19
- foundry_mcp/tools/unified/review.py +5 -2
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/task.py +528 -9
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +2 -1
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/RECORD +52 -46
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
foundry_mcp/core/responses.py
CHANGED
|
@@ -144,6 +144,7 @@ class ErrorCode(str, Enum):
|
|
|
144
144
|
BACKUP_CORRUPTED = "BACKUP_CORRUPTED"
|
|
145
145
|
ROLLBACK_FAILED = "ROLLBACK_FAILED"
|
|
146
146
|
COMPARISON_FAILED = "COMPARISON_FAILED"
|
|
147
|
+
OPERATION_FAILED = "OPERATION_FAILED"
|
|
147
148
|
|
|
148
149
|
# AI/LLM Provider errors
|
|
149
150
|
AI_NO_PROVIDER = "AI_NO_PROVIDER"
|
|
@@ -1331,6 +1332,13 @@ def ai_cache_stale_error(
|
|
|
1331
1332
|
# ---------------------------------------------------------------------------
|
|
1332
1333
|
|
|
1333
1334
|
|
|
1335
|
+
try:
|
|
1336
|
+
from pydantic import BaseModel, Field
|
|
1337
|
+
PYDANTIC_AVAILABLE = True
|
|
1338
|
+
except ImportError:
|
|
1339
|
+
PYDANTIC_AVAILABLE = False
|
|
1340
|
+
|
|
1341
|
+
|
|
1334
1342
|
def sanitize_error_message(
|
|
1335
1343
|
exc: Exception,
|
|
1336
1344
|
context: str = "",
|
|
@@ -1382,3 +1390,235 @@ def sanitize_error_message(
|
|
|
1382
1390
|
# Generic fallback - don't expose exception message
|
|
1383
1391
|
suffix = f" ({type_name})" if include_type else ""
|
|
1384
1392
|
return f"An internal error occurred{suffix}"
|
|
1393
|
+
|
|
1394
|
+
|
|
1395
|
+
# ---------------------------------------------------------------------------
|
|
1396
|
+
# Batch Operation Response Schemas (Pydantic)
|
|
1397
|
+
# ---------------------------------------------------------------------------
|
|
1398
|
+
# These schemas provide type-safe definitions for batch operation responses.
|
|
1399
|
+
# They ensure contract stability and enable validation of batch operation data.
|
|
1400
|
+
|
|
1401
|
+
|
|
1402
|
+
if PYDANTIC_AVAILABLE:
|
|
1403
|
+
|
|
1404
|
+
class DependencyNode(BaseModel):
|
|
1405
|
+
"""A node in the dependency graph representing a task."""
|
|
1406
|
+
|
|
1407
|
+
id: str = Field(..., description="Task identifier")
|
|
1408
|
+
title: str = Field(default="", description="Task title")
|
|
1409
|
+
status: str = Field(default="", description="Task status")
|
|
1410
|
+
file_path: Optional[str] = Field(
|
|
1411
|
+
default=None, description="File path associated with the task"
|
|
1412
|
+
)
|
|
1413
|
+
is_target: bool = Field(
|
|
1414
|
+
default=False, description="Whether this is a target task in the batch"
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
class DependencyEdge(BaseModel):
|
|
1418
|
+
"""An edge in the dependency graph representing a dependency relationship."""
|
|
1419
|
+
|
|
1420
|
+
from_id: str = Field(..., alias="from", description="Source task ID")
|
|
1421
|
+
to_id: str = Field(..., alias="to", description="Target task ID")
|
|
1422
|
+
edge_type: str = Field(
|
|
1423
|
+
default="blocks", alias="type", description="Type of dependency (blocks)"
|
|
1424
|
+
)
|
|
1425
|
+
|
|
1426
|
+
model_config = {"populate_by_name": True}
|
|
1427
|
+
|
|
1428
|
+
class DependencyGraph(BaseModel):
|
|
1429
|
+
"""Dependency graph structure for batch tasks.
|
|
1430
|
+
|
|
1431
|
+
Contains nodes (tasks) and edges (dependency relationships) to visualize
|
|
1432
|
+
task dependencies for parallel execution planning.
|
|
1433
|
+
"""
|
|
1434
|
+
|
|
1435
|
+
nodes: list[DependencyNode] = Field(
|
|
1436
|
+
default_factory=list, description="Task nodes in the graph"
|
|
1437
|
+
)
|
|
1438
|
+
edges: list[DependencyEdge] = Field(
|
|
1439
|
+
default_factory=list, description="Dependency edges between tasks"
|
|
1440
|
+
)
|
|
1441
|
+
|
|
1442
|
+
class BatchTaskDependencies(BaseModel):
|
|
1443
|
+
"""Dependency status for a task in a batch."""
|
|
1444
|
+
|
|
1445
|
+
task_id: str = Field(..., description="Task identifier")
|
|
1446
|
+
can_start: bool = Field(
|
|
1447
|
+
default=True, description="Whether the task can be started"
|
|
1448
|
+
)
|
|
1449
|
+
blocked_by: list[str] = Field(
|
|
1450
|
+
default_factory=list, description="IDs of tasks blocking this one"
|
|
1451
|
+
)
|
|
1452
|
+
soft_depends: list[str] = Field(
|
|
1453
|
+
default_factory=list, description="IDs of soft dependencies"
|
|
1454
|
+
)
|
|
1455
|
+
blocks: list[str] = Field(
|
|
1456
|
+
default_factory=list, description="IDs of tasks this one blocks"
|
|
1457
|
+
)
|
|
1458
|
+
|
|
1459
|
+
class BatchTaskContext(BaseModel):
|
|
1460
|
+
"""Context for a single task in a batch prepare response.
|
|
1461
|
+
|
|
1462
|
+
Contains all information needed to execute a task in parallel with others.
|
|
1463
|
+
"""
|
|
1464
|
+
|
|
1465
|
+
task_id: str = Field(..., description="Unique task identifier")
|
|
1466
|
+
title: str = Field(default="", description="Task title")
|
|
1467
|
+
task_type: str = Field(
|
|
1468
|
+
default="task", alias="type", description="Task type (task, subtask, verify)"
|
|
1469
|
+
)
|
|
1470
|
+
status: str = Field(default="pending", description="Current task status")
|
|
1471
|
+
metadata: Dict[str, Any] = Field(
|
|
1472
|
+
default_factory=dict,
|
|
1473
|
+
description="Task metadata including file_path, description, etc.",
|
|
1474
|
+
)
|
|
1475
|
+
dependencies: Optional[BatchTaskDependencies] = Field(
|
|
1476
|
+
default=None, description="Dependency status for the task"
|
|
1477
|
+
)
|
|
1478
|
+
phase: Optional[Dict[str, Any]] = Field(
|
|
1479
|
+
default=None, description="Phase context (id, title, progress)"
|
|
1480
|
+
)
|
|
1481
|
+
parent: Optional[Dict[str, Any]] = Field(
|
|
1482
|
+
default=None, description="Parent task context (id, title, position_label)"
|
|
1483
|
+
)
|
|
1484
|
+
|
|
1485
|
+
model_config = {"populate_by_name": True}
|
|
1486
|
+
|
|
1487
|
+
class StaleTaskInfo(BaseModel):
|
|
1488
|
+
"""Information about a stale in_progress task."""
|
|
1489
|
+
|
|
1490
|
+
task_id: str = Field(..., description="Task identifier")
|
|
1491
|
+
title: str = Field(default="", description="Task title")
|
|
1492
|
+
|
|
1493
|
+
class BatchPrepareResponse(BaseModel):
|
|
1494
|
+
"""Response schema for prepare_batch_context operation.
|
|
1495
|
+
|
|
1496
|
+
Contains independent tasks that can be executed in parallel along with
|
|
1497
|
+
context, dependency information, and warnings.
|
|
1498
|
+
"""
|
|
1499
|
+
|
|
1500
|
+
tasks: list[BatchTaskContext] = Field(
|
|
1501
|
+
default_factory=list, description="Tasks ready for parallel execution"
|
|
1502
|
+
)
|
|
1503
|
+
task_count: int = Field(default=0, description="Number of tasks in the batch")
|
|
1504
|
+
spec_complete: bool = Field(
|
|
1505
|
+
default=False, description="Whether the spec has no remaining tasks"
|
|
1506
|
+
)
|
|
1507
|
+
all_blocked: bool = Field(
|
|
1508
|
+
default=False, description="Whether all remaining tasks are blocked"
|
|
1509
|
+
)
|
|
1510
|
+
warnings: list[str] = Field(
|
|
1511
|
+
default_factory=list, description="Non-fatal warnings about the batch"
|
|
1512
|
+
)
|
|
1513
|
+
stale_tasks: list[StaleTaskInfo] = Field(
|
|
1514
|
+
default_factory=list, description="In-progress tasks exceeding time threshold"
|
|
1515
|
+
)
|
|
1516
|
+
dependency_graph: DependencyGraph = Field(
|
|
1517
|
+
default_factory=DependencyGraph,
|
|
1518
|
+
description="Dependency graph for batch tasks",
|
|
1519
|
+
)
|
|
1520
|
+
token_estimate: Optional[int] = Field(
|
|
1521
|
+
default=None, description="Estimated token count for the batch context"
|
|
1522
|
+
)
|
|
1523
|
+
|
|
1524
|
+
class BatchStartResponse(BaseModel):
|
|
1525
|
+
"""Response schema for start_batch operation.
|
|
1526
|
+
|
|
1527
|
+
Confirms which tasks were atomically started and when.
|
|
1528
|
+
"""
|
|
1529
|
+
|
|
1530
|
+
started: list[str] = Field(
|
|
1531
|
+
default_factory=list, description="IDs of tasks successfully started"
|
|
1532
|
+
)
|
|
1533
|
+
started_count: int = Field(
|
|
1534
|
+
default=0, description="Number of tasks started"
|
|
1535
|
+
)
|
|
1536
|
+
started_at: Optional[str] = Field(
|
|
1537
|
+
default=None, description="ISO timestamp when tasks were started"
|
|
1538
|
+
)
|
|
1539
|
+
errors: Optional[list[str]] = Field(
|
|
1540
|
+
default=None, description="Validation errors if operation failed"
|
|
1541
|
+
)
|
|
1542
|
+
|
|
1543
|
+
class BatchTaskCompletion(BaseModel):
|
|
1544
|
+
"""Input schema for a single task completion in complete_batch.
|
|
1545
|
+
|
|
1546
|
+
Used to specify outcome for each task being completed.
|
|
1547
|
+
"""
|
|
1548
|
+
|
|
1549
|
+
task_id: str = Field(..., description="Task identifier to complete")
|
|
1550
|
+
success: bool = Field(
|
|
1551
|
+
..., description="True if task succeeded, False if failed"
|
|
1552
|
+
)
|
|
1553
|
+
completion_note: str = Field(
|
|
1554
|
+
default="", description="Note describing what was accomplished or why it failed"
|
|
1555
|
+
)
|
|
1556
|
+
|
|
1557
|
+
class BatchTaskResult(BaseModel):
|
|
1558
|
+
"""Result for a single task in the complete_batch response."""
|
|
1559
|
+
|
|
1560
|
+
status: str = Field(
|
|
1561
|
+
..., description="Result status: completed, failed, skipped, error"
|
|
1562
|
+
)
|
|
1563
|
+
completed_at: Optional[str] = Field(
|
|
1564
|
+
default=None, description="ISO timestamp when completed (if successful)"
|
|
1565
|
+
)
|
|
1566
|
+
failed_at: Optional[str] = Field(
|
|
1567
|
+
default=None, description="ISO timestamp when failed (if unsuccessful)"
|
|
1568
|
+
)
|
|
1569
|
+
retry_count: Optional[int] = Field(
|
|
1570
|
+
default=None, description="Updated retry count (if failed)"
|
|
1571
|
+
)
|
|
1572
|
+
error: Optional[str] = Field(
|
|
1573
|
+
default=None, description="Error message (if status is error or skipped)"
|
|
1574
|
+
)
|
|
1575
|
+
|
|
1576
|
+
class BatchCompleteResponse(BaseModel):
|
|
1577
|
+
"""Response schema for complete_batch operation.
|
|
1578
|
+
|
|
1579
|
+
Contains per-task results and summary counts for the batch completion.
|
|
1580
|
+
"""
|
|
1581
|
+
|
|
1582
|
+
results: Dict[str, BatchTaskResult] = Field(
|
|
1583
|
+
default_factory=dict,
|
|
1584
|
+
description="Per-task results keyed by task_id",
|
|
1585
|
+
)
|
|
1586
|
+
completed_count: int = Field(
|
|
1587
|
+
default=0, description="Number of tasks successfully completed"
|
|
1588
|
+
)
|
|
1589
|
+
failed_count: int = Field(
|
|
1590
|
+
default=0, description="Number of tasks that failed"
|
|
1591
|
+
)
|
|
1592
|
+
total_processed: int = Field(
|
|
1593
|
+
default=0, description="Total number of completions processed"
|
|
1594
|
+
)
|
|
1595
|
+
|
|
1596
|
+
# Export Pydantic models
|
|
1597
|
+
__all_pydantic__ = [
|
|
1598
|
+
"DependencyNode",
|
|
1599
|
+
"DependencyEdge",
|
|
1600
|
+
"DependencyGraph",
|
|
1601
|
+
"BatchTaskDependencies",
|
|
1602
|
+
"BatchTaskContext",
|
|
1603
|
+
"StaleTaskInfo",
|
|
1604
|
+
"BatchPrepareResponse",
|
|
1605
|
+
"BatchStartResponse",
|
|
1606
|
+
"BatchTaskCompletion",
|
|
1607
|
+
"BatchTaskResult",
|
|
1608
|
+
"BatchCompleteResponse",
|
|
1609
|
+
]
|
|
1610
|
+
|
|
1611
|
+
else:
|
|
1612
|
+
# Pydantic not available - provide None placeholders
|
|
1613
|
+
DependencyNode = None # type: ignore[misc,assignment]
|
|
1614
|
+
DependencyEdge = None # type: ignore[misc,assignment]
|
|
1615
|
+
DependencyGraph = None # type: ignore[misc,assignment]
|
|
1616
|
+
BatchTaskDependencies = None # type: ignore[misc,assignment]
|
|
1617
|
+
BatchTaskContext = None # type: ignore[misc,assignment]
|
|
1618
|
+
StaleTaskInfo = None # type: ignore[misc,assignment]
|
|
1619
|
+
BatchPrepareResponse = None # type: ignore[misc,assignment]
|
|
1620
|
+
BatchStartResponse = None # type: ignore[misc,assignment]
|
|
1621
|
+
BatchTaskCompletion = None # type: ignore[misc,assignment]
|
|
1622
|
+
BatchTaskResult = None # type: ignore[misc,assignment]
|
|
1623
|
+
BatchCompleteResponse = None # type: ignore[misc,assignment]
|
|
1624
|
+
__all_pydantic__ = []
|
foundry_mcp/core/spec.py
CHANGED
foundry_mcp/core/task.py
CHANGED
|
@@ -19,7 +19,7 @@ from foundry_mcp.core.spec import (
|
|
|
19
19
|
from foundry_mcp.core.responses import success_response, error_response
|
|
20
20
|
|
|
21
21
|
# Valid task types for add_task
|
|
22
|
-
TASK_TYPES = ("task", "subtask", "verify")
|
|
22
|
+
TASK_TYPES = ("task", "subtask", "verify", "research")
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
def is_unblocked(spec_data: Dict[str, Any], task_id: str, task_data: Dict[str, Any]) -> bool:
|
|
@@ -31,6 +31,11 @@ def is_unblocked(spec_data: Dict[str, Any], task_id: str, task_data: Dict[str, A
|
|
|
31
31
|
1. Any of its direct task dependencies are not completed, OR
|
|
32
32
|
2. Its parent phase is blocked by an incomplete phase
|
|
33
33
|
|
|
34
|
+
Research nodes have special blocking behavior based on blocking_mode:
|
|
35
|
+
- "none": Research doesn't block dependents
|
|
36
|
+
- "soft": Research is informational, doesn't block (default)
|
|
37
|
+
- "hard": Research must complete before dependents can start
|
|
38
|
+
|
|
34
39
|
Args:
|
|
35
40
|
spec_data: JSON spec file data
|
|
36
41
|
task_id: Task identifier
|
|
@@ -45,7 +50,18 @@ def is_unblocked(spec_data: Dict[str, Any], task_id: str, task_data: Dict[str, A
|
|
|
45
50
|
blocked_by = task_data.get("dependencies", {}).get("blocked_by", [])
|
|
46
51
|
for blocker_id in blocked_by:
|
|
47
52
|
blocker = hierarchy.get(blocker_id)
|
|
48
|
-
if not blocker
|
|
53
|
+
if not blocker:
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
# Special handling for research nodes based on blocking_mode
|
|
57
|
+
if blocker.get("type") == "research":
|
|
58
|
+
blocking_mode = blocker.get("metadata", {}).get("blocking_mode", "soft")
|
|
59
|
+
if blocking_mode in ("none", "soft"):
|
|
60
|
+
# Research with "none" or "soft" blocking mode doesn't block
|
|
61
|
+
continue
|
|
62
|
+
# "hard" mode falls through to standard completion check
|
|
63
|
+
|
|
64
|
+
if blocker.get("status") != "completed":
|
|
49
65
|
return False
|
|
50
66
|
|
|
51
67
|
# Check phase-level dependencies
|
|
@@ -533,6 +549,82 @@ def get_task_journal_summary(
|
|
|
533
549
|
}
|
|
534
550
|
|
|
535
551
|
|
|
552
|
+
def _compute_auto_mode_hints(
|
|
553
|
+
spec_data: Dict[str, Any],
|
|
554
|
+
task_id: str,
|
|
555
|
+
task_data: Dict[str, Any],
|
|
556
|
+
) -> Dict[str, Any]:
|
|
557
|
+
"""
|
|
558
|
+
Compute hints for autonomous mode execution.
|
|
559
|
+
|
|
560
|
+
These hints help an autonomous agent decide whether to proceed
|
|
561
|
+
without user input or pause for confirmation.
|
|
562
|
+
|
|
563
|
+
Args:
|
|
564
|
+
spec_data: Loaded spec data
|
|
565
|
+
task_id: Current task ID
|
|
566
|
+
task_data: Task node data
|
|
567
|
+
|
|
568
|
+
Returns:
|
|
569
|
+
Dictionary with autonomous mode hints:
|
|
570
|
+
- estimated_complexity: "low", "medium", or "high"
|
|
571
|
+
- has_sibling_verify: bool (phase has verify tasks)
|
|
572
|
+
- may_require_user_input: bool (task category suggests user input needed)
|
|
573
|
+
"""
|
|
574
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
575
|
+
metadata = task_data.get("metadata", {}) or {}
|
|
576
|
+
|
|
577
|
+
# Compute estimated_complexity
|
|
578
|
+
complexity = metadata.get("complexity", "").lower()
|
|
579
|
+
estimated_hours = metadata.get("estimated_hours")
|
|
580
|
+
|
|
581
|
+
if complexity in ("complex", "high"):
|
|
582
|
+
estimated_complexity = "high"
|
|
583
|
+
elif complexity in ("medium", "moderate"):
|
|
584
|
+
estimated_complexity = "medium"
|
|
585
|
+
elif complexity in ("simple", "low"):
|
|
586
|
+
estimated_complexity = "low"
|
|
587
|
+
elif estimated_hours is not None:
|
|
588
|
+
# Derive from hours if explicit complexity not set
|
|
589
|
+
if estimated_hours > 2:
|
|
590
|
+
estimated_complexity = "high"
|
|
591
|
+
elif estimated_hours > 0.5:
|
|
592
|
+
estimated_complexity = "medium"
|
|
593
|
+
else:
|
|
594
|
+
estimated_complexity = "low"
|
|
595
|
+
else:
|
|
596
|
+
# Default to medium if no hints
|
|
597
|
+
estimated_complexity = "medium"
|
|
598
|
+
|
|
599
|
+
# Check has_sibling_verify - look for verify tasks in same phase
|
|
600
|
+
parent_id = task_data.get("parent")
|
|
601
|
+
has_sibling_verify = False
|
|
602
|
+
if parent_id:
|
|
603
|
+
parent = hierarchy.get(parent_id, {})
|
|
604
|
+
children = parent.get("children", [])
|
|
605
|
+
for sibling_id in children:
|
|
606
|
+
if sibling_id != task_id:
|
|
607
|
+
sibling = hierarchy.get(sibling_id, {})
|
|
608
|
+
if sibling.get("type") == "verify":
|
|
609
|
+
has_sibling_verify = True
|
|
610
|
+
break
|
|
611
|
+
|
|
612
|
+
# Check may_require_user_input based on task_category
|
|
613
|
+
task_category = metadata.get("task_category", "").lower()
|
|
614
|
+
may_require_user_input = task_category in (
|
|
615
|
+
"decision",
|
|
616
|
+
"investigation",
|
|
617
|
+
"planning",
|
|
618
|
+
"design",
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
return {
|
|
622
|
+
"estimated_complexity": estimated_complexity,
|
|
623
|
+
"has_sibling_verify": has_sibling_verify,
|
|
624
|
+
"may_require_user_input": may_require_user_input,
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
|
|
536
628
|
def prepare_task(
|
|
537
629
|
spec_id: str,
|
|
538
630
|
specs_dir: Path,
|
|
@@ -599,12 +691,16 @@ def prepare_task(
|
|
|
599
691
|
"task_journal": get_task_journal_summary(spec_data, task_id),
|
|
600
692
|
}
|
|
601
693
|
|
|
694
|
+
# Compute autonomous mode hints
|
|
695
|
+
auto_mode_hints = _compute_auto_mode_hints(spec_data, task_id, task_data)
|
|
696
|
+
|
|
602
697
|
return asdict(success_response(
|
|
603
698
|
task_id=task_id,
|
|
604
699
|
task_data=task_data,
|
|
605
700
|
dependencies=deps,
|
|
606
701
|
spec_complete=False,
|
|
607
|
-
context=context
|
|
702
|
+
context=context,
|
|
703
|
+
auto_mode_hints=auto_mode_hints,
|
|
608
704
|
))
|
|
609
705
|
|
|
610
706
|
|
|
@@ -619,28 +715,35 @@ def _generate_task_id(parent_id: str, existing_children: List[str], task_type: s
|
|
|
619
715
|
For verify IDs:
|
|
620
716
|
- Same pattern but with "verify-" prefix
|
|
621
717
|
|
|
718
|
+
For research IDs:
|
|
719
|
+
- Same pattern but with "research-" prefix
|
|
720
|
+
|
|
622
721
|
Args:
|
|
623
722
|
parent_id: Parent node ID
|
|
624
723
|
existing_children: List of existing child IDs
|
|
625
|
-
task_type: Type of task (task, subtask, verify)
|
|
724
|
+
task_type: Type of task (task, subtask, verify, research)
|
|
626
725
|
|
|
627
726
|
Returns:
|
|
628
727
|
New task ID string
|
|
629
728
|
"""
|
|
630
|
-
|
|
729
|
+
# Map task_type to ID prefix
|
|
730
|
+
prefix_map = {"verify": "verify", "research": "research"}
|
|
731
|
+
prefix = prefix_map.get(task_type, "task")
|
|
631
732
|
|
|
632
733
|
# Extract numeric parts from parent
|
|
633
734
|
if parent_id.startswith("phase-"):
|
|
634
735
|
# Parent is phase-N, new task is task-N-1, task-N-2, etc.
|
|
635
736
|
phase_num = parent_id.replace("phase-", "")
|
|
636
737
|
base = f"{prefix}-{phase_num}"
|
|
637
|
-
elif parent_id.startswith("task-"
|
|
638
|
-
# Parent is task-N-M
|
|
639
|
-
# Remove the prefix
|
|
738
|
+
elif parent_id.startswith(("task-", "verify-", "research-")):
|
|
739
|
+
# Parent is task-N-M, verify-N-M, or research-N-M; new task appends next number
|
|
740
|
+
# Remove the prefix to get the numeric path
|
|
640
741
|
if parent_id.startswith("task-"):
|
|
641
742
|
base = f"{prefix}-{parent_id[5:]}" # len("task-") = 5
|
|
642
|
-
|
|
743
|
+
elif parent_id.startswith("verify-"):
|
|
643
744
|
base = f"{prefix}-{parent_id[7:]}" # len("verify-") = 7
|
|
745
|
+
else: # research-
|
|
746
|
+
base = f"{prefix}-{parent_id[9:]}" # len("research-") = 9
|
|
644
747
|
else:
|
|
645
748
|
# Unknown parent type, generate based on existing children count
|
|
646
749
|
base = f"{prefix}-1"
|
|
@@ -696,11 +799,15 @@ def add_task(
|
|
|
696
799
|
position: Optional[int] = None,
|
|
697
800
|
file_path: Optional[str] = None,
|
|
698
801
|
specs_dir: Optional[Path] = None,
|
|
802
|
+
# Research-specific parameters
|
|
803
|
+
research_type: Optional[str] = None,
|
|
804
|
+
blocking_mode: Optional[str] = None,
|
|
805
|
+
query: Optional[str] = None,
|
|
699
806
|
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
700
807
|
"""
|
|
701
808
|
Add a new task to a specification's hierarchy.
|
|
702
809
|
|
|
703
|
-
Creates a new task, subtask, or
|
|
810
|
+
Creates a new task, subtask, verify, or research node under the specified parent.
|
|
704
811
|
Automatically generates the task ID and updates ancestor task counts.
|
|
705
812
|
|
|
706
813
|
Args:
|
|
@@ -708,11 +815,14 @@ def add_task(
|
|
|
708
815
|
parent_id: Parent node ID (phase or task).
|
|
709
816
|
title: Task title.
|
|
710
817
|
description: Optional task description.
|
|
711
|
-
task_type: Type of task (task, subtask, verify). Default: task.
|
|
818
|
+
task_type: Type of task (task, subtask, verify, research). Default: task.
|
|
712
819
|
estimated_hours: Optional estimated hours.
|
|
713
820
|
position: Optional position in parent's children list (0-based).
|
|
714
821
|
file_path: Optional file path associated with this task.
|
|
715
822
|
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
823
|
+
research_type: For research nodes - workflow type (chat, consensus, etc).
|
|
824
|
+
blocking_mode: For research nodes - blocking behavior (none, soft, hard).
|
|
825
|
+
query: For research nodes - the research question/topic.
|
|
716
826
|
|
|
717
827
|
Returns:
|
|
718
828
|
Tuple of (result_dict, error_message).
|
|
@@ -723,6 +833,15 @@ def add_task(
|
|
|
723
833
|
if task_type not in TASK_TYPES:
|
|
724
834
|
return None, f"Invalid task_type '{task_type}'. Must be one of: {', '.join(TASK_TYPES)}"
|
|
725
835
|
|
|
836
|
+
# Validate research-specific parameters
|
|
837
|
+
if task_type == "research":
|
|
838
|
+
from foundry_mcp.core.validation import VALID_RESEARCH_TYPES, RESEARCH_BLOCKING_MODES
|
|
839
|
+
|
|
840
|
+
if research_type and research_type not in VALID_RESEARCH_TYPES:
|
|
841
|
+
return None, f"Invalid research_type '{research_type}'. Must be one of: {', '.join(sorted(VALID_RESEARCH_TYPES))}"
|
|
842
|
+
if blocking_mode and blocking_mode not in RESEARCH_BLOCKING_MODES:
|
|
843
|
+
return None, f"Invalid blocking_mode '{blocking_mode}'. Must be one of: {', '.join(sorted(RESEARCH_BLOCKING_MODES))}"
|
|
844
|
+
|
|
726
845
|
# Validate title
|
|
727
846
|
if not title or not title.strip():
|
|
728
847
|
return None, "Title is required"
|
|
@@ -774,6 +893,15 @@ def add_task(
|
|
|
774
893
|
if file_path:
|
|
775
894
|
metadata["file_path"] = file_path.strip()
|
|
776
895
|
|
|
896
|
+
# Add research-specific metadata
|
|
897
|
+
if task_type == "research":
|
|
898
|
+
metadata["research_type"] = research_type or "consensus" # Default to consensus
|
|
899
|
+
metadata["blocking_mode"] = blocking_mode or "soft" # Default to soft blocking
|
|
900
|
+
if query:
|
|
901
|
+
metadata["query"] = query.strip()
|
|
902
|
+
metadata["research_history"] = [] # Empty history initially
|
|
903
|
+
metadata["findings"] = {} # Empty findings initially
|
|
904
|
+
|
|
777
905
|
# Create the task node
|
|
778
906
|
task_node = {
|
|
779
907
|
"type": task_type,
|
|
@@ -981,7 +1109,8 @@ def remove_task(
|
|
|
981
1109
|
# Validate task type (can only remove task, subtask, verify)
|
|
982
1110
|
task_type = task.get("type")
|
|
983
1111
|
if task_type not in ("task", "subtask", "verify"):
|
|
984
|
-
|
|
1112
|
+
hint = " Use `authoring action=\"phase-remove\"` instead." if task_type == "phase" else ""
|
|
1113
|
+
return None, f"Cannot remove node type '{task_type}'. Only task, subtask, or verify nodes can be removed.{hint}"
|
|
985
1114
|
|
|
986
1115
|
# Check for children
|
|
987
1116
|
children = task.get("children", [])
|
foundry_mcp/core/validation.py
CHANGED
|
@@ -110,7 +110,7 @@ class SpecStats:
|
|
|
110
110
|
# Constants
|
|
111
111
|
|
|
112
112
|
STATUS_FIELDS = {"pending", "in_progress", "completed", "blocked"}
|
|
113
|
-
VALID_NODE_TYPES = {"spec", "phase", "group", "task", "subtask", "verify"}
|
|
113
|
+
VALID_NODE_TYPES = {"spec", "phase", "group", "task", "subtask", "verify", "research"}
|
|
114
114
|
VALID_STATUSES = {"pending", "in_progress", "completed", "blocked"}
|
|
115
115
|
VALID_TASK_CATEGORIES = {
|
|
116
116
|
"investigation",
|
|
@@ -127,6 +127,11 @@ VERIFICATION_TYPE_MAPPING = {
|
|
|
127
127
|
"auto": "run-tests",
|
|
128
128
|
}
|
|
129
129
|
|
|
130
|
+
# Research node constants
|
|
131
|
+
VALID_RESEARCH_TYPES = {"chat", "consensus", "thinkdeep", "ideate", "deep-research"}
|
|
132
|
+
VALID_RESEARCH_RESULTS = {"completed", "inconclusive", "blocked", "cancelled"}
|
|
133
|
+
RESEARCH_BLOCKING_MODES = {"none", "soft", "hard"}
|
|
134
|
+
|
|
130
135
|
# Common field name typos/alternatives
|
|
131
136
|
FIELD_NAME_SUGGESTIONS = {
|
|
132
137
|
"category": "task_category",
|
foundry_mcp/server.py
CHANGED
|
@@ -17,7 +17,6 @@ from mcp.server.fastmcp import FastMCP
|
|
|
17
17
|
|
|
18
18
|
from foundry_mcp.config import ServerConfig, get_config
|
|
19
19
|
from foundry_mcp.core.observability import audit_log, get_observability_manager
|
|
20
|
-
from foundry_mcp.core.feature_flags import get_flag_service
|
|
21
20
|
from foundry_mcp.resources.specs import register_spec_resources
|
|
22
21
|
from foundry_mcp.prompts.workflows import register_workflow_prompts
|
|
23
22
|
from foundry_mcp.tools.unified import register_unified_tools
|
|
@@ -99,18 +98,6 @@ def _init_metrics_persistence(config: ServerConfig) -> None:
|
|
|
99
98
|
logger.warning("Failed to initialize metrics persistence: %s", exc)
|
|
100
99
|
|
|
101
100
|
|
|
102
|
-
def _apply_feature_flag_overrides_from_env() -> None:
|
|
103
|
-
"""Apply comma-separated feature flag overrides from `FEATURE_FLAGS`."""
|
|
104
|
-
|
|
105
|
-
raw = os.environ.get("FEATURE_FLAGS")
|
|
106
|
-
if not raw:
|
|
107
|
-
return
|
|
108
|
-
|
|
109
|
-
flag_service = get_flag_service()
|
|
110
|
-
for name in [part.strip() for part in raw.split(",") if part.strip()]:
|
|
111
|
-
flag_service.set_override("anonymous", name, True)
|
|
112
|
-
|
|
113
|
-
|
|
114
101
|
def create_server(config: Optional[ServerConfig] = None) -> FastMCP:
|
|
115
102
|
"""Create and configure the FastMCP server instance."""
|
|
116
103
|
|
|
@@ -119,7 +106,6 @@ def create_server(config: Optional[ServerConfig] = None) -> FastMCP:
|
|
|
119
106
|
|
|
120
107
|
config.setup_logging()
|
|
121
108
|
|
|
122
|
-
_apply_feature_flag_overrides_from_env()
|
|
123
109
|
_init_observability(config)
|
|
124
110
|
_init_error_collection(config)
|
|
125
111
|
_init_metrics_persistence(config)
|
|
@@ -137,48 +123,10 @@ def create_server(config: Optional[ServerConfig] = None) -> FastMCP:
|
|
|
137
123
|
return mcp
|
|
138
124
|
|
|
139
125
|
|
|
140
|
-
def _patch_fastmcp_json_serialization() -> None:
|
|
141
|
-
"""Patch FastMCP to use minified JSON for tool responses.
|
|
142
|
-
|
|
143
|
-
FastMCP serializes dict responses with indent=2 by default.
|
|
144
|
-
This patch makes responses minified (no indentation) for smaller payloads.
|
|
145
|
-
"""
|
|
146
|
-
try:
|
|
147
|
-
import pydantic_core
|
|
148
|
-
from itertools import chain
|
|
149
|
-
from mcp.types import TextContent, ContentBlock
|
|
150
|
-
from mcp.server.fastmcp.utilities import func_metadata
|
|
151
|
-
from mcp.server.fastmcp.utilities.types import Image, Audio
|
|
152
|
-
|
|
153
|
-
def _minified_convert_to_content(result):
|
|
154
|
-
if result is None:
|
|
155
|
-
return []
|
|
156
|
-
if isinstance(result, ContentBlock):
|
|
157
|
-
return [result]
|
|
158
|
-
if isinstance(result, Image):
|
|
159
|
-
return [result.to_image_content()]
|
|
160
|
-
if isinstance(result, Audio):
|
|
161
|
-
return [result.to_audio_content()]
|
|
162
|
-
if isinstance(result, (list, tuple)):
|
|
163
|
-
return list(chain.from_iterable(
|
|
164
|
-
_minified_convert_to_content(item) for item in result
|
|
165
|
-
))
|
|
166
|
-
if not isinstance(result, str):
|
|
167
|
-
# Minified: no indent
|
|
168
|
-
result = pydantic_core.to_json(result, fallback=str).decode()
|
|
169
|
-
return [TextContent(type="text", text=result)]
|
|
170
|
-
|
|
171
|
-
func_metadata._convert_to_content = _minified_convert_to_content
|
|
172
|
-
logger.debug("Patched FastMCP for minified JSON responses")
|
|
173
|
-
except Exception as e:
|
|
174
|
-
logger.warning("Failed to patch FastMCP JSON serialization: %s", e)
|
|
175
|
-
|
|
176
|
-
|
|
177
126
|
def main() -> None:
|
|
178
127
|
"""Main entry point for the foundry-mcp server."""
|
|
179
128
|
|
|
180
129
|
try:
|
|
181
|
-
_patch_fastmcp_json_serialization()
|
|
182
130
|
config = get_config()
|
|
183
131
|
server = create_server(config)
|
|
184
132
|
|