foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +235 -5
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/discovery.py +6 -6
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +20 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/claude.py +6 -47
- foundry_mcp/core/providers/codex.py +6 -57
- foundry_mcp/core/providers/cursor_agent.py +3 -44
- foundry_mcp/core/providers/gemini.py +6 -57
- foundry_mcp/core/providers/opencode.py +35 -5
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +425 -0
- foundry_mcp/core/research/models.py +437 -0
- foundry_mcp/core/research/workflows/__init__.py +22 -0
- foundry_mcp/core/research/workflows/base.py +204 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +396 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +450 -0
- foundry_mcp/core/spec.py +2438 -236
- foundry_mcp/core/task.py +1064 -19
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +313 -42
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +38 -0
- foundry_mcp/tools/unified/__init__.py +4 -2
- foundry_mcp/tools/unified/authoring.py +2423 -267
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +235 -6
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +113 -1
- foundry_mcp/tools/unified/research.py +658 -0
- foundry_mcp/tools/unified/review.py +370 -16
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1163 -48
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -20,7 +20,6 @@ from typing import Any, Dict, List, Optional, Protocol, Sequence, Tuple
|
|
|
20
20
|
logger = logging.getLogger(__name__)
|
|
21
21
|
|
|
22
22
|
from .base import (
|
|
23
|
-
ModelDescriptor,
|
|
24
23
|
ProviderCapability,
|
|
25
24
|
ProviderContext,
|
|
26
25
|
ProviderExecutionError,
|
|
@@ -188,33 +187,10 @@ def _default_runner(
|
|
|
188
187
|
)
|
|
189
188
|
|
|
190
189
|
|
|
191
|
-
CURSOR_MODELS: List[ModelDescriptor] = [
|
|
192
|
-
ModelDescriptor(
|
|
193
|
-
id="composer-1",
|
|
194
|
-
display_name="Composer-1",
|
|
195
|
-
capabilities={
|
|
196
|
-
ProviderCapability.TEXT,
|
|
197
|
-
ProviderCapability.FUNCTION_CALLING,
|
|
198
|
-
ProviderCapability.STREAMING,
|
|
199
|
-
},
|
|
200
|
-
routing_hints={"tier": "default"},
|
|
201
|
-
),
|
|
202
|
-
ModelDescriptor(
|
|
203
|
-
id="gpt-5.1-codex",
|
|
204
|
-
display_name="GPT-5.1 Codex",
|
|
205
|
-
capabilities={
|
|
206
|
-
ProviderCapability.TEXT,
|
|
207
|
-
ProviderCapability.FUNCTION_CALLING,
|
|
208
|
-
ProviderCapability.STREAMING,
|
|
209
|
-
},
|
|
210
|
-
routing_hints={"tier": "codex"},
|
|
211
|
-
),
|
|
212
|
-
]
|
|
213
|
-
|
|
214
190
|
CURSOR_METADATA = ProviderMetadata(
|
|
215
191
|
provider_id="cursor-agent",
|
|
216
192
|
display_name="Cursor Agent CLI",
|
|
217
|
-
models=
|
|
193
|
+
models=[], # Model validation delegated to CLI
|
|
218
194
|
default_model="composer-1",
|
|
219
195
|
capabilities={ProviderCapability.TEXT, ProviderCapability.FUNCTION_CALLING, ProviderCapability.STREAMING},
|
|
220
196
|
security_flags={"writes_allowed": False, "read_only": True},
|
|
@@ -246,7 +222,7 @@ class CursorAgentProvider(ProviderContext):
|
|
|
246
222
|
self._binary = binary or os.environ.get(CUSTOM_BINARY_ENV, DEFAULT_BINARY)
|
|
247
223
|
self._env = env
|
|
248
224
|
self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
|
|
249
|
-
self._model =
|
|
225
|
+
self._model = model or metadata.default_model or "composer-1"
|
|
250
226
|
self._config_backup_path: Optional[Path] = None
|
|
251
227
|
self._original_config_existed: bool = False
|
|
252
228
|
self._cleanup_done: bool = False
|
|
@@ -255,23 +231,6 @@ class CursorAgentProvider(ProviderContext):
|
|
|
255
231
|
"""Clean up temporary config directory on provider destruction."""
|
|
256
232
|
self._cleanup_config_file()
|
|
257
233
|
|
|
258
|
-
def _first_model_id(self) -> str:
|
|
259
|
-
if not self.metadata.models:
|
|
260
|
-
raise ProviderUnavailableError(
|
|
261
|
-
"Cursor Agent metadata is missing model descriptors.",
|
|
262
|
-
provider=self.metadata.provider_id,
|
|
263
|
-
)
|
|
264
|
-
return self.metadata.models[0].id
|
|
265
|
-
|
|
266
|
-
def _ensure_model(self, candidate: str) -> str:
|
|
267
|
-
available = {descriptor.id for descriptor in self.metadata.models}
|
|
268
|
-
if candidate not in available:
|
|
269
|
-
raise ProviderExecutionError(
|
|
270
|
-
f"Unsupported Cursor Agent model '{candidate}'. Available: {', '.join(sorted(available))}",
|
|
271
|
-
provider=self.metadata.provider_id,
|
|
272
|
-
)
|
|
273
|
-
return candidate
|
|
274
|
-
|
|
275
234
|
def _create_readonly_config(self) -> Path:
|
|
276
235
|
"""
|
|
277
236
|
Backup and replace ~/.cursor/cli-config.json with read-only permissions.
|
|
@@ -533,7 +492,7 @@ class CursorAgentProvider(ProviderContext):
|
|
|
533
492
|
provider=self.metadata.provider_id,
|
|
534
493
|
)
|
|
535
494
|
|
|
536
|
-
model =
|
|
495
|
+
model = (
|
|
537
496
|
str(request.metadata.get("model")) if request.metadata and "model" in request.metadata else self._model
|
|
538
497
|
)
|
|
539
498
|
|
|
@@ -14,10 +14,7 @@ import os
|
|
|
14
14
|
import subprocess
|
|
15
15
|
from typing import Any, Dict, List, Optional, Protocol, Sequence
|
|
16
16
|
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
17
|
from .base import (
|
|
20
|
-
ModelDescriptor,
|
|
21
18
|
ProviderCapability,
|
|
22
19
|
ProviderContext,
|
|
23
20
|
ProviderExecutionError,
|
|
@@ -34,6 +31,8 @@ from .base import (
|
|
|
34
31
|
from .detectors import detect_provider_availability
|
|
35
32
|
from .registry import register_provider
|
|
36
33
|
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
37
36
|
DEFAULT_BINARY = "gemini"
|
|
38
37
|
DEFAULT_TIMEOUT_SECONDS = 360
|
|
39
38
|
AVAILABILITY_OVERRIDE_ENV = "GEMINI_CLI_AVAILABLE_OVERRIDE"
|
|
@@ -137,44 +136,11 @@ def _default_runner(
|
|
|
137
136
|
)
|
|
138
137
|
|
|
139
138
|
|
|
140
|
-
GEMINI_MODELS: List[ModelDescriptor] = [
|
|
141
|
-
ModelDescriptor(
|
|
142
|
-
id="pro",
|
|
143
|
-
display_name="Gemini 3.0 Pro",
|
|
144
|
-
capabilities={
|
|
145
|
-
ProviderCapability.TEXT,
|
|
146
|
-
ProviderCapability.STREAMING,
|
|
147
|
-
ProviderCapability.VISION,
|
|
148
|
-
},
|
|
149
|
-
routing_hints={"tier": "pro", "context_window": "1M"},
|
|
150
|
-
),
|
|
151
|
-
ModelDescriptor(
|
|
152
|
-
id="gemini-2.5-pro",
|
|
153
|
-
display_name="Gemini 2.5 Pro",
|
|
154
|
-
capabilities={
|
|
155
|
-
ProviderCapability.TEXT,
|
|
156
|
-
ProviderCapability.STREAMING,
|
|
157
|
-
ProviderCapability.VISION,
|
|
158
|
-
},
|
|
159
|
-
routing_hints={"tier": "pro", "context_window": "1M"},
|
|
160
|
-
),
|
|
161
|
-
ModelDescriptor(
|
|
162
|
-
id="gemini-2.5-flash",
|
|
163
|
-
display_name="Gemini 2.5 Flash",
|
|
164
|
-
capabilities={
|
|
165
|
-
ProviderCapability.TEXT,
|
|
166
|
-
ProviderCapability.STREAMING,
|
|
167
|
-
ProviderCapability.VISION,
|
|
168
|
-
},
|
|
169
|
-
routing_hints={"tier": "flash"},
|
|
170
|
-
),
|
|
171
|
-
]
|
|
172
|
-
|
|
173
139
|
GEMINI_METADATA = ProviderMetadata(
|
|
174
140
|
provider_id="gemini",
|
|
175
141
|
display_name="Google Gemini CLI",
|
|
176
|
-
models=
|
|
177
|
-
default_model="
|
|
142
|
+
models=[], # Model validation delegated to CLI
|
|
143
|
+
default_model="pro",
|
|
178
144
|
capabilities={ProviderCapability.TEXT, ProviderCapability.STREAMING, ProviderCapability.VISION},
|
|
179
145
|
security_flags={"writes_allowed": False},
|
|
180
146
|
extra={"cli": "gemini", "output_format": "json"},
|
|
@@ -200,24 +166,7 @@ class GeminiProvider(ProviderContext):
|
|
|
200
166
|
self._binary = binary or os.environ.get(CUSTOM_BINARY_ENV, DEFAULT_BINARY)
|
|
201
167
|
self._env = env
|
|
202
168
|
self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
|
|
203
|
-
self._model =
|
|
204
|
-
|
|
205
|
-
def _first_model_id(self) -> str:
|
|
206
|
-
if not self.metadata.models:
|
|
207
|
-
raise ProviderUnavailableError(
|
|
208
|
-
"Gemini provider metadata is missing model descriptors.",
|
|
209
|
-
provider=self.metadata.provider_id,
|
|
210
|
-
)
|
|
211
|
-
return self.metadata.models[0].id
|
|
212
|
-
|
|
213
|
-
def _ensure_model(self, candidate: str) -> str:
|
|
214
|
-
available = {descriptor.id for descriptor in self.metadata.models}
|
|
215
|
-
if candidate not in available:
|
|
216
|
-
raise ProviderExecutionError(
|
|
217
|
-
f"Unsupported Gemini model '{candidate}'. Available: {', '.join(sorted(available))}",
|
|
218
|
-
provider=self.metadata.provider_id,
|
|
219
|
-
)
|
|
220
|
-
return candidate
|
|
169
|
+
self._model = model or metadata.default_model or "pro"
|
|
221
170
|
|
|
222
171
|
def _validate_request(self, request: ProviderRequest) -> None:
|
|
223
172
|
"""Validate and normalize request, ignoring unsupported parameters."""
|
|
@@ -305,7 +254,7 @@ class GeminiProvider(ProviderContext):
|
|
|
305
254
|
def _resolve_model(self, request: ProviderRequest) -> str:
|
|
306
255
|
model_override = request.metadata.get("model") if request.metadata else None
|
|
307
256
|
if model_override:
|
|
308
|
-
return
|
|
257
|
+
return str(model_override)
|
|
309
258
|
return self._model
|
|
310
259
|
|
|
311
260
|
def _emit_stream_if_requested(self, content: str, *, stream: bool) -> None:
|
|
@@ -18,8 +18,6 @@ import time
|
|
|
18
18
|
from pathlib import Path
|
|
19
19
|
from typing import Any, Dict, List, Optional, Protocol, Sequence
|
|
20
20
|
|
|
21
|
-
logger = logging.getLogger(__name__)
|
|
22
|
-
|
|
23
21
|
from .base import (
|
|
24
22
|
ModelDescriptor,
|
|
25
23
|
ProviderCapability,
|
|
@@ -38,6 +36,8 @@ from .base import (
|
|
|
38
36
|
from .detectors import detect_provider_availability
|
|
39
37
|
from .registry import register_provider
|
|
40
38
|
|
|
39
|
+
logger = logging.getLogger(__name__)
|
|
40
|
+
|
|
41
41
|
DEFAULT_BINARY = "node"
|
|
42
42
|
DEFAULT_WRAPPER_SCRIPT = Path(__file__).parent / "opencode_wrapper.js"
|
|
43
43
|
DEFAULT_TIMEOUT_SECONDS = 360
|
|
@@ -123,6 +123,32 @@ def _default_runner(
|
|
|
123
123
|
|
|
124
124
|
|
|
125
125
|
OPENCODE_MODELS: List[ModelDescriptor] = [
|
|
126
|
+
ModelDescriptor(
|
|
127
|
+
id="openai/gpt-5.2",
|
|
128
|
+
display_name="OpenAI GPT-5.2 (via OpenCode)",
|
|
129
|
+
capabilities={
|
|
130
|
+
ProviderCapability.TEXT,
|
|
131
|
+
ProviderCapability.STREAMING,
|
|
132
|
+
},
|
|
133
|
+
routing_hints={
|
|
134
|
+
"configurable": True,
|
|
135
|
+
"source": "opencode config",
|
|
136
|
+
"note": "Accepts any model ID - validated by opencode CLI",
|
|
137
|
+
},
|
|
138
|
+
),
|
|
139
|
+
ModelDescriptor(
|
|
140
|
+
id="openai/gpt-5.2-codex",
|
|
141
|
+
display_name="OpenAI GPT-5.2 Codex (via OpenCode)",
|
|
142
|
+
capabilities={
|
|
143
|
+
ProviderCapability.TEXT,
|
|
144
|
+
ProviderCapability.STREAMING,
|
|
145
|
+
},
|
|
146
|
+
routing_hints={
|
|
147
|
+
"configurable": True,
|
|
148
|
+
"source": "opencode config",
|
|
149
|
+
"note": "Accepts any model ID - validated by opencode CLI",
|
|
150
|
+
},
|
|
151
|
+
),
|
|
126
152
|
ModelDescriptor(
|
|
127
153
|
id="openai/gpt-5.1-codex-mini",
|
|
128
154
|
display_name="OpenAI GPT-5.1 Codex Mini (via OpenCode)",
|
|
@@ -180,8 +206,10 @@ class OpenCodeProvider(ProviderContext):
|
|
|
180
206
|
self._env = self._prepare_subprocess_env(env)
|
|
181
207
|
|
|
182
208
|
self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
|
|
183
|
-
self._model = self._ensure_model(
|
|
184
|
-
|
|
209
|
+
self._model = self._ensure_model(
|
|
210
|
+
model or metadata.default_model or self._first_model_id()
|
|
211
|
+
)
|
|
212
|
+
self._server_process: Optional[subprocess.Popen[bytes]] = None
|
|
185
213
|
self._config_file_path: Optional[Path] = None
|
|
186
214
|
|
|
187
215
|
def __del__(self) -> None:
|
|
@@ -205,7 +233,9 @@ class OpenCodeProvider(ProviderContext):
|
|
|
205
233
|
# Clean up config file
|
|
206
234
|
self._cleanup_config_file()
|
|
207
235
|
|
|
208
|
-
def _prepare_subprocess_env(
|
|
236
|
+
def _prepare_subprocess_env(
|
|
237
|
+
self, custom_env: Optional[Dict[str, str]]
|
|
238
|
+
) -> Dict[str, str]:
|
|
209
239
|
"""
|
|
210
240
|
Prepare environment variables for subprocess execution.
|
|
211
241
|
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""Research workflows for multi-model orchestration.
|
|
2
|
+
|
|
3
|
+
This package provides conversation threading, multi-model consensus,
|
|
4
|
+
hypothesis-driven investigation, and creative brainstorming workflows.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from foundry_mcp.core.research.models import (
|
|
8
|
+
ConfidenceLevel,
|
|
9
|
+
ConsensusConfig,
|
|
10
|
+
ConsensusState,
|
|
11
|
+
ConsensusStrategy,
|
|
12
|
+
ConversationMessage,
|
|
13
|
+
ConversationThread,
|
|
14
|
+
Hypothesis,
|
|
15
|
+
Idea,
|
|
16
|
+
IdeaCluster,
|
|
17
|
+
IdeationPhase,
|
|
18
|
+
IdeationState,
|
|
19
|
+
InvestigationStep,
|
|
20
|
+
ModelResponse,
|
|
21
|
+
ThreadStatus,
|
|
22
|
+
ThinkDeepState,
|
|
23
|
+
WorkflowType,
|
|
24
|
+
)
|
|
25
|
+
from foundry_mcp.core.research.memory import (
|
|
26
|
+
FileStorageBackend,
|
|
27
|
+
ResearchMemory,
|
|
28
|
+
)
|
|
29
|
+
from foundry_mcp.core.research.workflows import (
|
|
30
|
+
ChatWorkflow,
|
|
31
|
+
ConsensusWorkflow,
|
|
32
|
+
IdeateWorkflow,
|
|
33
|
+
ResearchWorkflowBase,
|
|
34
|
+
ThinkDeepWorkflow,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
__all__ = [
|
|
38
|
+
# Enums
|
|
39
|
+
"WorkflowType",
|
|
40
|
+
"ConfidenceLevel",
|
|
41
|
+
"ConsensusStrategy",
|
|
42
|
+
"ThreadStatus",
|
|
43
|
+
"IdeationPhase",
|
|
44
|
+
# Conversation models
|
|
45
|
+
"ConversationMessage",
|
|
46
|
+
"ConversationThread",
|
|
47
|
+
# THINKDEEP models
|
|
48
|
+
"Hypothesis",
|
|
49
|
+
"InvestigationStep",
|
|
50
|
+
"ThinkDeepState",
|
|
51
|
+
# IDEATE models
|
|
52
|
+
"Idea",
|
|
53
|
+
"IdeaCluster",
|
|
54
|
+
"IdeationState",
|
|
55
|
+
# CONSENSUS models
|
|
56
|
+
"ModelResponse",
|
|
57
|
+
"ConsensusConfig",
|
|
58
|
+
"ConsensusState",
|
|
59
|
+
# Storage
|
|
60
|
+
"FileStorageBackend",
|
|
61
|
+
"ResearchMemory",
|
|
62
|
+
# Workflows
|
|
63
|
+
"ResearchWorkflowBase",
|
|
64
|
+
"ChatWorkflow",
|
|
65
|
+
"ConsensusWorkflow",
|
|
66
|
+
"ThinkDeepWorkflow",
|
|
67
|
+
"IdeateWorkflow",
|
|
68
|
+
]
|