foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +615 -11
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +7 -7
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +28 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +51 -48
- foundry_mcp/core/providers/codex.py +70 -60
- foundry_mcp/core/providers/cursor_agent.py +25 -47
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +69 -58
- foundry_mcp/core/providers/opencode.py +101 -47
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1220 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +690 -0
- foundry_mcp/core/spec.py +2439 -236
- foundry_mcp/core/task.py +1205 -31
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +319 -43
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +0 -14
- foundry_mcp/tools/unified/__init__.py +39 -18
- foundry_mcp/tools/unified/authoring.py +2371 -248
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +434 -32
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +133 -2
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +374 -17
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1664 -30
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
- foundry_mcp-0.8.10.dist-info/RECORD +153 -0
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- foundry_mcp-0.3.3.dist-info/RECORD +0 -135
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1283 @@
|
|
|
1
|
+
"""Unified research tool with action routing.
|
|
2
|
+
|
|
3
|
+
Provides multi-model orchestration capabilities through CHAT, CONSENSUS,
|
|
4
|
+
THINKDEEP, and IDEATE workflows via a unified MCP tool interface.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from dataclasses import asdict
|
|
11
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
12
|
+
|
|
13
|
+
from mcp.server.fastmcp import FastMCP
|
|
14
|
+
|
|
15
|
+
from foundry_mcp.config import ServerConfig
|
|
16
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
17
|
+
from foundry_mcp.core.research.memory import ResearchMemory
|
|
18
|
+
from foundry_mcp.core.research.models import ConsensusStrategy, ThreadStatus
|
|
19
|
+
from foundry_mcp.core.research.workflows import (
|
|
20
|
+
ChatWorkflow,
|
|
21
|
+
ConsensusWorkflow,
|
|
22
|
+
DeepResearchWorkflow,
|
|
23
|
+
IdeateWorkflow,
|
|
24
|
+
ThinkDeepWorkflow,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.core.responses import (
|
|
27
|
+
ErrorCode,
|
|
28
|
+
ErrorType,
|
|
29
|
+
error_response,
|
|
30
|
+
success_response,
|
|
31
|
+
)
|
|
32
|
+
from foundry_mcp.tools.unified.router import (
|
|
33
|
+
ActionDefinition,
|
|
34
|
+
ActionRouter,
|
|
35
|
+
ActionRouterError,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
logger = logging.getLogger(__name__)
|
|
42
|
+
|
|
43
|
+
# =============================================================================
|
|
44
|
+
# Action Summaries
|
|
45
|
+
# =============================================================================
|
|
46
|
+
|
|
47
|
+
_ACTION_SUMMARY = {
|
|
48
|
+
"chat": "Single-model conversation with thread persistence",
|
|
49
|
+
"consensus": "Multi-model parallel consultation with synthesis",
|
|
50
|
+
"thinkdeep": "Hypothesis-driven systematic investigation",
|
|
51
|
+
"ideate": "Creative brainstorming with idea clustering",
|
|
52
|
+
"deep-research": "Multi-phase iterative deep research with query decomposition",
|
|
53
|
+
"deep-research-status": "Get status of deep research session",
|
|
54
|
+
"deep-research-report": "Get final report from deep research",
|
|
55
|
+
"deep-research-list": "List deep research sessions",
|
|
56
|
+
"deep-research-delete": "Delete a deep research session",
|
|
57
|
+
"thread-list": "List conversation threads",
|
|
58
|
+
"thread-get": "Get full thread details including messages",
|
|
59
|
+
"thread-delete": "Delete a conversation thread",
|
|
60
|
+
# Spec-integrated research actions
|
|
61
|
+
"node-execute": "Execute research workflow linked to spec node",
|
|
62
|
+
"node-record": "Record research findings to spec node",
|
|
63
|
+
"node-status": "Get research node status and linked session info",
|
|
64
|
+
"node-findings": "Retrieve recorded findings from spec node",
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# =============================================================================
|
|
69
|
+
# Module State
|
|
70
|
+
# =============================================================================
|
|
71
|
+
|
|
72
|
+
_config: Optional[ServerConfig] = None
|
|
73
|
+
_memory: Optional[ResearchMemory] = None
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _get_memory() -> ResearchMemory:
|
|
77
|
+
"""Get or create the research memory instance."""
|
|
78
|
+
global _memory, _config
|
|
79
|
+
if _memory is None:
|
|
80
|
+
if _config is not None:
|
|
81
|
+
_memory = ResearchMemory(
|
|
82
|
+
base_path=_config.research.get_storage_path(),
|
|
83
|
+
ttl_hours=_config.research.ttl_hours,
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
_memory = ResearchMemory()
|
|
87
|
+
return _memory
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _get_config() -> ServerConfig:
|
|
91
|
+
"""Get the server config, raising if not initialized."""
|
|
92
|
+
global _config
|
|
93
|
+
if _config is None:
|
|
94
|
+
# Create default config if not set
|
|
95
|
+
_config = ServerConfig()
|
|
96
|
+
return _config
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# =============================================================================
|
|
100
|
+
# Validation Helpers
|
|
101
|
+
# =============================================================================
|
|
102
|
+
|
|
103
|
+
def _validation_error(
|
|
104
|
+
field: str,
|
|
105
|
+
action: str,
|
|
106
|
+
message: str,
|
|
107
|
+
*,
|
|
108
|
+
code: ErrorCode = ErrorCode.VALIDATION_ERROR,
|
|
109
|
+
remediation: Optional[str] = None,
|
|
110
|
+
) -> dict:
|
|
111
|
+
"""Create a validation error response."""
|
|
112
|
+
return asdict(
|
|
113
|
+
error_response(
|
|
114
|
+
f"Invalid field '{field}' for research.{action}: {message}",
|
|
115
|
+
error_code=code,
|
|
116
|
+
error_type=ErrorType.VALIDATION,
|
|
117
|
+
remediation=remediation or f"Provide a valid '{field}' value",
|
|
118
|
+
details={"field": field, "action": f"research.{action}"},
|
|
119
|
+
)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# =============================================================================
|
|
124
|
+
# Action Handlers
|
|
125
|
+
# =============================================================================
|
|
126
|
+
|
|
127
|
+
def _handle_chat(
|
|
128
|
+
*,
|
|
129
|
+
prompt: Optional[str] = None,
|
|
130
|
+
thread_id: Optional[str] = None,
|
|
131
|
+
system_prompt: Optional[str] = None,
|
|
132
|
+
provider_id: Optional[str] = None,
|
|
133
|
+
model: Optional[str] = None,
|
|
134
|
+
temperature: Optional[float] = None,
|
|
135
|
+
max_tokens: Optional[int] = None,
|
|
136
|
+
title: Optional[str] = None,
|
|
137
|
+
**kwargs: Any,
|
|
138
|
+
) -> dict:
|
|
139
|
+
"""Handle chat action."""
|
|
140
|
+
if not prompt:
|
|
141
|
+
return _validation_error("prompt", "chat", "Required non-empty string")
|
|
142
|
+
|
|
143
|
+
config = _get_config()
|
|
144
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
145
|
+
|
|
146
|
+
result = workflow.execute(
|
|
147
|
+
prompt=prompt,
|
|
148
|
+
thread_id=thread_id,
|
|
149
|
+
system_prompt=system_prompt,
|
|
150
|
+
provider_id=provider_id,
|
|
151
|
+
model=model,
|
|
152
|
+
temperature=temperature,
|
|
153
|
+
max_tokens=max_tokens,
|
|
154
|
+
title=title,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
if result.success:
|
|
158
|
+
return asdict(
|
|
159
|
+
success_response(
|
|
160
|
+
data={
|
|
161
|
+
"content": result.content,
|
|
162
|
+
"thread_id": result.metadata.get("thread_id"),
|
|
163
|
+
"message_count": result.metadata.get("message_count"),
|
|
164
|
+
"provider_id": result.provider_id,
|
|
165
|
+
"model_used": result.model_used,
|
|
166
|
+
"tokens_used": result.tokens_used,
|
|
167
|
+
}
|
|
168
|
+
)
|
|
169
|
+
)
|
|
170
|
+
else:
|
|
171
|
+
return asdict(
|
|
172
|
+
error_response(
|
|
173
|
+
result.error or "Chat failed",
|
|
174
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
175
|
+
error_type=ErrorType.INTERNAL,
|
|
176
|
+
remediation="Check provider availability and retry",
|
|
177
|
+
)
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _handle_consensus(
|
|
182
|
+
*,
|
|
183
|
+
prompt: Optional[str] = None,
|
|
184
|
+
providers: Optional[list[str]] = None,
|
|
185
|
+
strategy: Optional[str] = None,
|
|
186
|
+
synthesis_provider: Optional[str] = None,
|
|
187
|
+
system_prompt: Optional[str] = None,
|
|
188
|
+
timeout_per_provider: float = 30.0,
|
|
189
|
+
max_concurrent: int = 3,
|
|
190
|
+
require_all: bool = False,
|
|
191
|
+
min_responses: int = 1,
|
|
192
|
+
**kwargs: Any,
|
|
193
|
+
) -> dict:
|
|
194
|
+
"""Handle consensus action."""
|
|
195
|
+
if not prompt:
|
|
196
|
+
return _validation_error("prompt", "consensus", "Required non-empty string")
|
|
197
|
+
|
|
198
|
+
# Parse strategy
|
|
199
|
+
consensus_strategy = ConsensusStrategy.SYNTHESIZE
|
|
200
|
+
if strategy:
|
|
201
|
+
try:
|
|
202
|
+
consensus_strategy = ConsensusStrategy(strategy)
|
|
203
|
+
except ValueError:
|
|
204
|
+
valid = [s.value for s in ConsensusStrategy]
|
|
205
|
+
return _validation_error(
|
|
206
|
+
"strategy",
|
|
207
|
+
"consensus",
|
|
208
|
+
f"Invalid value. Valid: {valid}",
|
|
209
|
+
remediation=f"Use one of: {', '.join(valid)}",
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
config = _get_config()
|
|
213
|
+
workflow = ConsensusWorkflow(config.research, _get_memory())
|
|
214
|
+
|
|
215
|
+
result = workflow.execute(
|
|
216
|
+
prompt=prompt,
|
|
217
|
+
providers=providers,
|
|
218
|
+
strategy=consensus_strategy,
|
|
219
|
+
synthesis_provider=synthesis_provider,
|
|
220
|
+
system_prompt=system_prompt,
|
|
221
|
+
timeout_per_provider=timeout_per_provider,
|
|
222
|
+
max_concurrent=max_concurrent,
|
|
223
|
+
require_all=require_all,
|
|
224
|
+
min_responses=min_responses,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
if result.success:
|
|
228
|
+
return asdict(
|
|
229
|
+
success_response(
|
|
230
|
+
data={
|
|
231
|
+
"content": result.content,
|
|
232
|
+
"consensus_id": result.metadata.get("consensus_id"),
|
|
233
|
+
"providers_consulted": result.metadata.get("providers_consulted"),
|
|
234
|
+
"strategy": result.metadata.get("strategy"),
|
|
235
|
+
"response_count": result.metadata.get("response_count"),
|
|
236
|
+
}
|
|
237
|
+
)
|
|
238
|
+
)
|
|
239
|
+
else:
|
|
240
|
+
return asdict(
|
|
241
|
+
error_response(
|
|
242
|
+
result.error or "Consensus failed",
|
|
243
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
244
|
+
error_type=ErrorType.INTERNAL,
|
|
245
|
+
remediation="Check provider availability and retry",
|
|
246
|
+
details=result.metadata,
|
|
247
|
+
)
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _handle_thinkdeep(
|
|
252
|
+
*,
|
|
253
|
+
topic: Optional[str] = None,
|
|
254
|
+
investigation_id: Optional[str] = None,
|
|
255
|
+
query: Optional[str] = None,
|
|
256
|
+
system_prompt: Optional[str] = None,
|
|
257
|
+
provider_id: Optional[str] = None,
|
|
258
|
+
max_depth: Optional[int] = None,
|
|
259
|
+
**kwargs: Any,
|
|
260
|
+
) -> dict:
|
|
261
|
+
"""Handle thinkdeep action."""
|
|
262
|
+
if not topic and not investigation_id:
|
|
263
|
+
return _validation_error(
|
|
264
|
+
"topic/investigation_id",
|
|
265
|
+
"thinkdeep",
|
|
266
|
+
"Either 'topic' (new) or 'investigation_id' (continue) required",
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
config = _get_config()
|
|
270
|
+
workflow = ThinkDeepWorkflow(config.research, _get_memory())
|
|
271
|
+
|
|
272
|
+
result = workflow.execute(
|
|
273
|
+
topic=topic,
|
|
274
|
+
investigation_id=investigation_id,
|
|
275
|
+
query=query,
|
|
276
|
+
system_prompt=system_prompt,
|
|
277
|
+
provider_id=provider_id,
|
|
278
|
+
max_depth=max_depth,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
if result.success:
|
|
282
|
+
return asdict(
|
|
283
|
+
success_response(
|
|
284
|
+
data={
|
|
285
|
+
"content": result.content,
|
|
286
|
+
"investigation_id": result.metadata.get("investigation_id"),
|
|
287
|
+
"current_depth": result.metadata.get("current_depth"),
|
|
288
|
+
"max_depth": result.metadata.get("max_depth"),
|
|
289
|
+
"converged": result.metadata.get("converged"),
|
|
290
|
+
"hypothesis_count": result.metadata.get("hypothesis_count"),
|
|
291
|
+
"step_count": result.metadata.get("step_count"),
|
|
292
|
+
}
|
|
293
|
+
)
|
|
294
|
+
)
|
|
295
|
+
else:
|
|
296
|
+
return asdict(
|
|
297
|
+
error_response(
|
|
298
|
+
result.error or "ThinkDeep failed",
|
|
299
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
300
|
+
error_type=ErrorType.INTERNAL,
|
|
301
|
+
remediation="Check investigation ID or topic validity",
|
|
302
|
+
)
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def _handle_ideate(
|
|
307
|
+
*,
|
|
308
|
+
topic: Optional[str] = None,
|
|
309
|
+
ideation_id: Optional[str] = None,
|
|
310
|
+
ideate_action: str = "generate",
|
|
311
|
+
perspective: Optional[str] = None,
|
|
312
|
+
cluster_ids: Optional[list[str]] = None,
|
|
313
|
+
system_prompt: Optional[str] = None,
|
|
314
|
+
provider_id: Optional[str] = None,
|
|
315
|
+
perspectives: Optional[list[str]] = None,
|
|
316
|
+
scoring_criteria: Optional[list[str]] = None,
|
|
317
|
+
**kwargs: Any,
|
|
318
|
+
) -> dict:
|
|
319
|
+
"""Handle ideate action."""
|
|
320
|
+
if not topic and not ideation_id:
|
|
321
|
+
return _validation_error(
|
|
322
|
+
"topic/ideation_id",
|
|
323
|
+
"ideate",
|
|
324
|
+
"Either 'topic' (new) or 'ideation_id' (continue) required",
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
config = _get_config()
|
|
328
|
+
workflow = IdeateWorkflow(config.research, _get_memory())
|
|
329
|
+
|
|
330
|
+
result = workflow.execute(
|
|
331
|
+
topic=topic,
|
|
332
|
+
ideation_id=ideation_id,
|
|
333
|
+
action=ideate_action,
|
|
334
|
+
perspective=perspective,
|
|
335
|
+
cluster_ids=cluster_ids,
|
|
336
|
+
system_prompt=system_prompt,
|
|
337
|
+
provider_id=provider_id,
|
|
338
|
+
perspectives=perspectives,
|
|
339
|
+
scoring_criteria=scoring_criteria,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
if result.success:
|
|
343
|
+
return asdict(
|
|
344
|
+
success_response(
|
|
345
|
+
data={
|
|
346
|
+
"content": result.content,
|
|
347
|
+
"ideation_id": result.metadata.get("ideation_id"),
|
|
348
|
+
"phase": result.metadata.get("phase"),
|
|
349
|
+
"idea_count": result.metadata.get("idea_count"),
|
|
350
|
+
"cluster_count": result.metadata.get("cluster_count"),
|
|
351
|
+
}
|
|
352
|
+
)
|
|
353
|
+
)
|
|
354
|
+
else:
|
|
355
|
+
return asdict(
|
|
356
|
+
error_response(
|
|
357
|
+
result.error or "Ideate failed",
|
|
358
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
359
|
+
error_type=ErrorType.INTERNAL,
|
|
360
|
+
remediation="Check ideation ID or topic validity",
|
|
361
|
+
)
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def _handle_deep_research(
|
|
366
|
+
*,
|
|
367
|
+
query: Optional[str] = None,
|
|
368
|
+
research_id: Optional[str] = None,
|
|
369
|
+
deep_research_action: str = "start",
|
|
370
|
+
provider_id: Optional[str] = None,
|
|
371
|
+
system_prompt: Optional[str] = None,
|
|
372
|
+
max_iterations: int = 3,
|
|
373
|
+
max_sub_queries: int = 5,
|
|
374
|
+
max_sources_per_query: int = 5,
|
|
375
|
+
follow_links: bool = True,
|
|
376
|
+
timeout_per_operation: float = 120.0,
|
|
377
|
+
max_concurrent: int = 3,
|
|
378
|
+
task_timeout: Optional[float] = None,
|
|
379
|
+
**kwargs: Any,
|
|
380
|
+
) -> dict:
|
|
381
|
+
"""Handle deep-research action with background execution.
|
|
382
|
+
|
|
383
|
+
CRITICAL: This handler uses asyncio.create_task() via the workflow's
|
|
384
|
+
background mode to start research and return immediately with the
|
|
385
|
+
research_id. The workflow runs in the background and can be polled
|
|
386
|
+
via deep-research-status.
|
|
387
|
+
|
|
388
|
+
Supports:
|
|
389
|
+
- start: Begin new research, returns immediately with research_id
|
|
390
|
+
- continue: Resume paused research in background
|
|
391
|
+
- resume: Alias for continue (for backward compatibility)
|
|
392
|
+
"""
|
|
393
|
+
# Normalize 'resume' to 'continue' for workflow compatibility
|
|
394
|
+
if deep_research_action == "resume":
|
|
395
|
+
deep_research_action = "continue"
|
|
396
|
+
|
|
397
|
+
# Validate based on action
|
|
398
|
+
if deep_research_action == "start" and not query:
|
|
399
|
+
return _validation_error(
|
|
400
|
+
"query",
|
|
401
|
+
"deep-research",
|
|
402
|
+
"Query is required to start deep research",
|
|
403
|
+
remediation="Provide a research query to investigate",
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
if deep_research_action in ("continue",) and not research_id:
|
|
407
|
+
return _validation_error(
|
|
408
|
+
"research_id",
|
|
409
|
+
"deep-research",
|
|
410
|
+
f"research_id is required for '{deep_research_action}' action",
|
|
411
|
+
remediation="Use deep-research-list to find existing research sessions",
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
config = _get_config()
|
|
415
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
416
|
+
|
|
417
|
+
# Execute with background=True for non-blocking execution
|
|
418
|
+
# This uses asyncio.create_task() internally and returns immediately
|
|
419
|
+
result = workflow.execute(
|
|
420
|
+
query=query,
|
|
421
|
+
research_id=research_id,
|
|
422
|
+
action=deep_research_action,
|
|
423
|
+
provider_id=provider_id,
|
|
424
|
+
system_prompt=system_prompt,
|
|
425
|
+
max_iterations=max_iterations,
|
|
426
|
+
max_sub_queries=max_sub_queries,
|
|
427
|
+
max_sources_per_query=max_sources_per_query,
|
|
428
|
+
follow_links=follow_links,
|
|
429
|
+
timeout_per_operation=timeout_per_operation,
|
|
430
|
+
max_concurrent=max_concurrent,
|
|
431
|
+
background=True, # CRITICAL: Run in background, return immediately
|
|
432
|
+
task_timeout=task_timeout,
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
if result.success:
|
|
436
|
+
# For background execution, return started status with research_id
|
|
437
|
+
response_data = {
|
|
438
|
+
"research_id": result.metadata.get("research_id"),
|
|
439
|
+
"status": "started",
|
|
440
|
+
"message": "Deep research started in background. Use deep-research-status to poll progress.",
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
# Include additional metadata if available (for continue/resume)
|
|
444
|
+
if result.metadata.get("phase"):
|
|
445
|
+
response_data["phase"] = result.metadata.get("phase")
|
|
446
|
+
if result.metadata.get("iteration") is not None:
|
|
447
|
+
response_data["iteration"] = result.metadata.get("iteration")
|
|
448
|
+
|
|
449
|
+
return asdict(success_response(data=response_data))
|
|
450
|
+
else:
|
|
451
|
+
return asdict(
|
|
452
|
+
error_response(
|
|
453
|
+
result.error or "Deep research failed to start",
|
|
454
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
455
|
+
error_type=ErrorType.INTERNAL,
|
|
456
|
+
remediation="Check query or research_id validity and provider availability",
|
|
457
|
+
details={"action": deep_research_action},
|
|
458
|
+
)
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
def _handle_deep_research_status(
|
|
463
|
+
*,
|
|
464
|
+
research_id: Optional[str] = None,
|
|
465
|
+
**kwargs: Any,
|
|
466
|
+
) -> dict:
|
|
467
|
+
"""Handle deep-research-status action."""
|
|
468
|
+
if not research_id:
|
|
469
|
+
return _validation_error("research_id", "deep-research-status", "Required")
|
|
470
|
+
|
|
471
|
+
config = _get_config()
|
|
472
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
473
|
+
|
|
474
|
+
result = workflow.execute(
|
|
475
|
+
research_id=research_id,
|
|
476
|
+
action="status",
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
if result.success:
|
|
480
|
+
return asdict(success_response(data=result.metadata))
|
|
481
|
+
else:
|
|
482
|
+
return asdict(
|
|
483
|
+
error_response(
|
|
484
|
+
result.error or "Failed to get status",
|
|
485
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
486
|
+
error_type=ErrorType.NOT_FOUND,
|
|
487
|
+
remediation="Use deep-research-list to find valid research IDs",
|
|
488
|
+
)
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
def _handle_deep_research_report(
|
|
493
|
+
*,
|
|
494
|
+
research_id: Optional[str] = None,
|
|
495
|
+
**kwargs: Any,
|
|
496
|
+
) -> dict:
|
|
497
|
+
"""Handle deep-research-report action."""
|
|
498
|
+
if not research_id:
|
|
499
|
+
return _validation_error("research_id", "deep-research-report", "Required")
|
|
500
|
+
|
|
501
|
+
config = _get_config()
|
|
502
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
503
|
+
|
|
504
|
+
result = workflow.execute(
|
|
505
|
+
research_id=research_id,
|
|
506
|
+
action="report",
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
if result.success:
|
|
510
|
+
return asdict(
|
|
511
|
+
success_response(
|
|
512
|
+
data={
|
|
513
|
+
"report": result.content,
|
|
514
|
+
**result.metadata,
|
|
515
|
+
}
|
|
516
|
+
)
|
|
517
|
+
)
|
|
518
|
+
else:
|
|
519
|
+
return asdict(
|
|
520
|
+
error_response(
|
|
521
|
+
result.error or "Failed to get report",
|
|
522
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
523
|
+
error_type=ErrorType.NOT_FOUND,
|
|
524
|
+
remediation="Ensure research is complete or use deep-research-status to check",
|
|
525
|
+
)
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
def _handle_deep_research_list(
|
|
530
|
+
*,
|
|
531
|
+
limit: int = 50,
|
|
532
|
+
cursor: Optional[str] = None,
|
|
533
|
+
completed_only: bool = False,
|
|
534
|
+
**kwargs: Any,
|
|
535
|
+
) -> dict:
|
|
536
|
+
"""Handle deep-research-list action."""
|
|
537
|
+
config = _get_config()
|
|
538
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
539
|
+
|
|
540
|
+
sessions = workflow.list_sessions(
|
|
541
|
+
limit=limit,
|
|
542
|
+
cursor=cursor,
|
|
543
|
+
completed_only=completed_only,
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
# Build response with pagination support
|
|
547
|
+
response_data: dict[str, Any] = {
|
|
548
|
+
"sessions": sessions,
|
|
549
|
+
"count": len(sessions),
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
# Include next cursor if there are more results
|
|
553
|
+
if sessions and len(sessions) == limit:
|
|
554
|
+
# Use last session's ID as cursor for next page
|
|
555
|
+
response_data["next_cursor"] = sessions[-1].get("id")
|
|
556
|
+
|
|
557
|
+
return asdict(success_response(data=response_data))
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
def _handle_deep_research_delete(
|
|
561
|
+
*,
|
|
562
|
+
research_id: Optional[str] = None,
|
|
563
|
+
**kwargs: Any,
|
|
564
|
+
) -> dict:
|
|
565
|
+
"""Handle deep-research-delete action."""
|
|
566
|
+
if not research_id:
|
|
567
|
+
return _validation_error("research_id", "deep-research-delete", "Required")
|
|
568
|
+
|
|
569
|
+
config = _get_config()
|
|
570
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
571
|
+
|
|
572
|
+
deleted = workflow.delete_session(research_id)
|
|
573
|
+
|
|
574
|
+
if not deleted:
|
|
575
|
+
return asdict(
|
|
576
|
+
error_response(
|
|
577
|
+
f"Research session '{research_id}' not found",
|
|
578
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
579
|
+
error_type=ErrorType.NOT_FOUND,
|
|
580
|
+
remediation="Use deep-research-list to find valid research IDs",
|
|
581
|
+
)
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
return asdict(
|
|
585
|
+
success_response(
|
|
586
|
+
data={
|
|
587
|
+
"deleted": True,
|
|
588
|
+
"research_id": research_id,
|
|
589
|
+
}
|
|
590
|
+
)
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def _handle_thread_list(
|
|
595
|
+
*,
|
|
596
|
+
status: Optional[str] = None,
|
|
597
|
+
limit: int = 50,
|
|
598
|
+
**kwargs: Any,
|
|
599
|
+
) -> dict:
|
|
600
|
+
"""Handle thread-list action."""
|
|
601
|
+
thread_status = None
|
|
602
|
+
if status:
|
|
603
|
+
try:
|
|
604
|
+
thread_status = ThreadStatus(status)
|
|
605
|
+
except ValueError:
|
|
606
|
+
valid = [s.value for s in ThreadStatus]
|
|
607
|
+
return _validation_error(
|
|
608
|
+
"status",
|
|
609
|
+
"thread-list",
|
|
610
|
+
f"Invalid value. Valid: {valid}",
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
config = _get_config()
|
|
614
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
615
|
+
threads = workflow.list_threads(status=thread_status, limit=limit)
|
|
616
|
+
|
|
617
|
+
return asdict(
|
|
618
|
+
success_response(
|
|
619
|
+
data={
|
|
620
|
+
"threads": threads,
|
|
621
|
+
"count": len(threads),
|
|
622
|
+
}
|
|
623
|
+
)
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
def _handle_thread_get(
|
|
628
|
+
*,
|
|
629
|
+
thread_id: Optional[str] = None,
|
|
630
|
+
**kwargs: Any,
|
|
631
|
+
) -> dict:
|
|
632
|
+
"""Handle thread-get action."""
|
|
633
|
+
if not thread_id:
|
|
634
|
+
return _validation_error("thread_id", "thread-get", "Required")
|
|
635
|
+
|
|
636
|
+
config = _get_config()
|
|
637
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
638
|
+
thread = workflow.get_thread(thread_id)
|
|
639
|
+
|
|
640
|
+
if not thread:
|
|
641
|
+
return asdict(
|
|
642
|
+
error_response(
|
|
643
|
+
f"Thread '{thread_id}' not found",
|
|
644
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
645
|
+
error_type=ErrorType.NOT_FOUND,
|
|
646
|
+
remediation="Use thread-list to find valid thread IDs",
|
|
647
|
+
)
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
return asdict(success_response(data=thread))
|
|
651
|
+
|
|
652
|
+
|
|
653
|
+
def _handle_thread_delete(
|
|
654
|
+
*,
|
|
655
|
+
thread_id: Optional[str] = None,
|
|
656
|
+
**kwargs: Any,
|
|
657
|
+
) -> dict:
|
|
658
|
+
"""Handle thread-delete action."""
|
|
659
|
+
if not thread_id:
|
|
660
|
+
return _validation_error("thread_id", "thread-delete", "Required")
|
|
661
|
+
|
|
662
|
+
config = _get_config()
|
|
663
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
664
|
+
deleted = workflow.delete_thread(thread_id)
|
|
665
|
+
|
|
666
|
+
if not deleted:
|
|
667
|
+
return asdict(
|
|
668
|
+
error_response(
|
|
669
|
+
f"Thread '{thread_id}' not found",
|
|
670
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
671
|
+
error_type=ErrorType.NOT_FOUND,
|
|
672
|
+
remediation="Use thread-list to find valid thread IDs",
|
|
673
|
+
)
|
|
674
|
+
)
|
|
675
|
+
|
|
676
|
+
return asdict(
|
|
677
|
+
success_response(
|
|
678
|
+
data={
|
|
679
|
+
"deleted": True,
|
|
680
|
+
"thread_id": thread_id,
|
|
681
|
+
}
|
|
682
|
+
)
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
# =============================================================================
|
|
687
|
+
# Spec-Integrated Research Actions
|
|
688
|
+
# =============================================================================
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
def _load_research_node(
|
|
692
|
+
spec_id: str,
|
|
693
|
+
research_node_id: str,
|
|
694
|
+
workspace: Optional[str] = None,
|
|
695
|
+
) -> tuple[Optional[dict], Optional[dict], Optional[str]]:
|
|
696
|
+
"""Load spec and validate research node exists.
|
|
697
|
+
|
|
698
|
+
Returns:
|
|
699
|
+
(spec_data, node_data, error_message)
|
|
700
|
+
"""
|
|
701
|
+
from foundry_mcp.core.spec import load_spec, find_specs_directory
|
|
702
|
+
|
|
703
|
+
specs_dir = find_specs_directory(workspace)
|
|
704
|
+
if specs_dir is None:
|
|
705
|
+
return None, None, "No specs directory found"
|
|
706
|
+
|
|
707
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
708
|
+
if spec_data is None:
|
|
709
|
+
return None, None, f"Specification '{spec_id}' not found"
|
|
710
|
+
|
|
711
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
712
|
+
node = hierarchy.get(research_node_id)
|
|
713
|
+
if node is None:
|
|
714
|
+
return None, None, f"Node '{research_node_id}' not found"
|
|
715
|
+
|
|
716
|
+
if node.get("type") != "research":
|
|
717
|
+
return None, None, f"Node '{research_node_id}' is not a research node (type: {node.get('type')})"
|
|
718
|
+
|
|
719
|
+
return spec_data, node, None
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
def _handle_node_execute(
|
|
723
|
+
*,
|
|
724
|
+
spec_id: Optional[str] = None,
|
|
725
|
+
research_node_id: Optional[str] = None,
|
|
726
|
+
workspace: Optional[str] = None,
|
|
727
|
+
prompt: Optional[str] = None,
|
|
728
|
+
**kwargs: Any,
|
|
729
|
+
) -> dict:
|
|
730
|
+
"""Execute research workflow linked to spec node.
|
|
731
|
+
|
|
732
|
+
Starts the research workflow configured in the node's metadata,
|
|
733
|
+
and stores the session_id back in the node for tracking.
|
|
734
|
+
"""
|
|
735
|
+
from datetime import datetime, timezone
|
|
736
|
+
from foundry_mcp.core.spec import save_spec, find_specs_directory
|
|
737
|
+
|
|
738
|
+
if not spec_id:
|
|
739
|
+
return _validation_error("spec_id", "node-execute", "Required")
|
|
740
|
+
if not research_node_id:
|
|
741
|
+
return _validation_error("research_node_id", "node-execute", "Required")
|
|
742
|
+
|
|
743
|
+
spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
|
|
744
|
+
if error:
|
|
745
|
+
return asdict(
|
|
746
|
+
error_response(
|
|
747
|
+
error,
|
|
748
|
+
error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
|
|
749
|
+
error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
|
|
750
|
+
)
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
metadata = node.get("metadata", {})
|
|
754
|
+
research_type = metadata.get("research_type", "consensus")
|
|
755
|
+
query = prompt or metadata.get("query", "")
|
|
756
|
+
|
|
757
|
+
if not query:
|
|
758
|
+
return _validation_error("query", "node-execute", "No query found in node or prompt parameter")
|
|
759
|
+
|
|
760
|
+
# Execute the appropriate research workflow
|
|
761
|
+
config = _get_config()
|
|
762
|
+
session_id = None
|
|
763
|
+
result_data: dict[str, Any] = {
|
|
764
|
+
"spec_id": spec_id,
|
|
765
|
+
"research_node_id": research_node_id,
|
|
766
|
+
"research_type": research_type,
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
if research_type == "chat":
|
|
770
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
771
|
+
result = workflow.chat(prompt=query)
|
|
772
|
+
session_id = result.thread_id
|
|
773
|
+
result_data["thread_id"] = session_id
|
|
774
|
+
elif research_type == "consensus":
|
|
775
|
+
workflow = ConsensusWorkflow(config.research, _get_memory())
|
|
776
|
+
result = workflow.run(prompt=query)
|
|
777
|
+
session_id = result.session_id
|
|
778
|
+
result_data["consensus_id"] = session_id
|
|
779
|
+
result_data["strategy"] = result.strategy.value if result.strategy else None
|
|
780
|
+
elif research_type == "thinkdeep":
|
|
781
|
+
workflow = ThinkDeepWorkflow(config.research, _get_memory())
|
|
782
|
+
result = workflow.run(topic=query)
|
|
783
|
+
session_id = result.investigation_id
|
|
784
|
+
result_data["investigation_id"] = session_id
|
|
785
|
+
elif research_type == "ideate":
|
|
786
|
+
workflow = IdeateWorkflow(config.research, _get_memory())
|
|
787
|
+
result = workflow.run(topic=query)
|
|
788
|
+
session_id = result.ideation_id
|
|
789
|
+
result_data["ideation_id"] = session_id
|
|
790
|
+
elif research_type == "deep-research":
|
|
791
|
+
workflow = DeepResearchWorkflow(config.research, _get_memory())
|
|
792
|
+
result = workflow.start(query=query)
|
|
793
|
+
session_id = result.research_id
|
|
794
|
+
result_data["research_id"] = session_id
|
|
795
|
+
else:
|
|
796
|
+
return _validation_error("research_type", "node-execute", f"Unsupported: {research_type}")
|
|
797
|
+
|
|
798
|
+
# Update node metadata with session info
|
|
799
|
+
metadata["session_id"] = session_id
|
|
800
|
+
history = metadata.setdefault("research_history", [])
|
|
801
|
+
history.append({
|
|
802
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
803
|
+
"action": "started",
|
|
804
|
+
"workflow": research_type,
|
|
805
|
+
"session_id": session_id,
|
|
806
|
+
})
|
|
807
|
+
node["metadata"] = metadata
|
|
808
|
+
node["status"] = "in_progress"
|
|
809
|
+
|
|
810
|
+
# Save spec
|
|
811
|
+
specs_dir = find_specs_directory(workspace)
|
|
812
|
+
if specs_dir and not save_spec(spec_id, spec_data, specs_dir):
|
|
813
|
+
return asdict(
|
|
814
|
+
error_response(
|
|
815
|
+
"Failed to save specification",
|
|
816
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
817
|
+
error_type=ErrorType.INTERNAL,
|
|
818
|
+
)
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
result_data["session_id"] = session_id
|
|
822
|
+
result_data["status"] = "started"
|
|
823
|
+
return asdict(success_response(data=result_data))
|
|
824
|
+
|
|
825
|
+
|
|
826
|
+
def _handle_node_record(
|
|
827
|
+
*,
|
|
828
|
+
spec_id: Optional[str] = None,
|
|
829
|
+
research_node_id: Optional[str] = None,
|
|
830
|
+
workspace: Optional[str] = None,
|
|
831
|
+
result: Optional[str] = None,
|
|
832
|
+
summary: Optional[str] = None,
|
|
833
|
+
key_insights: Optional[list[str]] = None,
|
|
834
|
+
recommendations: Optional[list[str]] = None,
|
|
835
|
+
sources: Optional[list[str]] = None,
|
|
836
|
+
confidence: Optional[str] = None,
|
|
837
|
+
session_id: Optional[str] = None,
|
|
838
|
+
**kwargs: Any,
|
|
839
|
+
) -> dict:
|
|
840
|
+
"""Record research findings to spec node."""
|
|
841
|
+
from datetime import datetime, timezone
|
|
842
|
+
from foundry_mcp.core.spec import save_spec, find_specs_directory
|
|
843
|
+
from foundry_mcp.core.validation import VALID_RESEARCH_RESULTS
|
|
844
|
+
|
|
845
|
+
if not spec_id:
|
|
846
|
+
return _validation_error("spec_id", "node-record", "Required")
|
|
847
|
+
if not research_node_id:
|
|
848
|
+
return _validation_error("research_node_id", "node-record", "Required")
|
|
849
|
+
if not result:
|
|
850
|
+
return _validation_error("result", "node-record", "Required (completed, inconclusive, blocked, cancelled)")
|
|
851
|
+
if result not in VALID_RESEARCH_RESULTS:
|
|
852
|
+
return _validation_error("result", "node-record", f"Must be one of: {', '.join(sorted(VALID_RESEARCH_RESULTS))}")
|
|
853
|
+
|
|
854
|
+
spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
|
|
855
|
+
if error:
|
|
856
|
+
return asdict(
|
|
857
|
+
error_response(
|
|
858
|
+
error,
|
|
859
|
+
error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
|
|
860
|
+
error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
|
|
861
|
+
)
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
metadata = node.get("metadata", {})
|
|
865
|
+
|
|
866
|
+
# Store findings
|
|
867
|
+
metadata["findings"] = {
|
|
868
|
+
"summary": summary or "",
|
|
869
|
+
"key_insights": key_insights or [],
|
|
870
|
+
"recommendations": recommendations or [],
|
|
871
|
+
"sources": sources or [],
|
|
872
|
+
"confidence": confidence or "medium",
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
# Update session link if provided
|
|
876
|
+
if session_id:
|
|
877
|
+
metadata["session_id"] = session_id
|
|
878
|
+
|
|
879
|
+
# Add to history
|
|
880
|
+
history = metadata.setdefault("research_history", [])
|
|
881
|
+
history.append({
|
|
882
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
883
|
+
"action": "completed",
|
|
884
|
+
"result": result,
|
|
885
|
+
"session_id": session_id or metadata.get("session_id"),
|
|
886
|
+
})
|
|
887
|
+
|
|
888
|
+
node["metadata"] = metadata
|
|
889
|
+
|
|
890
|
+
# Update node status based on result
|
|
891
|
+
if result == "completed":
|
|
892
|
+
node["status"] = "completed"
|
|
893
|
+
elif result == "blocked":
|
|
894
|
+
node["status"] = "blocked"
|
|
895
|
+
else:
|
|
896
|
+
node["status"] = "pending" # inconclusive or cancelled
|
|
897
|
+
|
|
898
|
+
# Save spec
|
|
899
|
+
specs_dir = find_specs_directory(workspace)
|
|
900
|
+
if specs_dir and not save_spec(spec_id, spec_data, specs_dir):
|
|
901
|
+
return asdict(
|
|
902
|
+
error_response(
|
|
903
|
+
"Failed to save specification",
|
|
904
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
905
|
+
error_type=ErrorType.INTERNAL,
|
|
906
|
+
)
|
|
907
|
+
)
|
|
908
|
+
|
|
909
|
+
return asdict(
|
|
910
|
+
success_response(
|
|
911
|
+
data={
|
|
912
|
+
"spec_id": spec_id,
|
|
913
|
+
"research_node_id": research_node_id,
|
|
914
|
+
"result": result,
|
|
915
|
+
"status": node["status"],
|
|
916
|
+
"findings_recorded": True,
|
|
917
|
+
}
|
|
918
|
+
)
|
|
919
|
+
)
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
def _handle_node_status(
|
|
923
|
+
*,
|
|
924
|
+
spec_id: Optional[str] = None,
|
|
925
|
+
research_node_id: Optional[str] = None,
|
|
926
|
+
workspace: Optional[str] = None,
|
|
927
|
+
**kwargs: Any,
|
|
928
|
+
) -> dict:
|
|
929
|
+
"""Get research node status and linked session info."""
|
|
930
|
+
if not spec_id:
|
|
931
|
+
return _validation_error("spec_id", "node-status", "Required")
|
|
932
|
+
if not research_node_id:
|
|
933
|
+
return _validation_error("research_node_id", "node-status", "Required")
|
|
934
|
+
|
|
935
|
+
spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
|
|
936
|
+
if error:
|
|
937
|
+
return asdict(
|
|
938
|
+
error_response(
|
|
939
|
+
error,
|
|
940
|
+
error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
|
|
941
|
+
error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
|
|
942
|
+
)
|
|
943
|
+
)
|
|
944
|
+
|
|
945
|
+
metadata = node.get("metadata", {})
|
|
946
|
+
|
|
947
|
+
return asdict(
|
|
948
|
+
success_response(
|
|
949
|
+
data={
|
|
950
|
+
"spec_id": spec_id,
|
|
951
|
+
"research_node_id": research_node_id,
|
|
952
|
+
"title": node.get("title"),
|
|
953
|
+
"status": node.get("status"),
|
|
954
|
+
"research_type": metadata.get("research_type"),
|
|
955
|
+
"blocking_mode": metadata.get("blocking_mode"),
|
|
956
|
+
"session_id": metadata.get("session_id"),
|
|
957
|
+
"query": metadata.get("query"),
|
|
958
|
+
"has_findings": bool(metadata.get("findings", {}).get("summary")),
|
|
959
|
+
"history_count": len(metadata.get("research_history", [])),
|
|
960
|
+
}
|
|
961
|
+
)
|
|
962
|
+
)
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
def _handle_node_findings(
|
|
966
|
+
*,
|
|
967
|
+
spec_id: Optional[str] = None,
|
|
968
|
+
research_node_id: Optional[str] = None,
|
|
969
|
+
workspace: Optional[str] = None,
|
|
970
|
+
**kwargs: Any,
|
|
971
|
+
) -> dict:
|
|
972
|
+
"""Retrieve recorded findings from spec node."""
|
|
973
|
+
if not spec_id:
|
|
974
|
+
return _validation_error("spec_id", "node-findings", "Required")
|
|
975
|
+
if not research_node_id:
|
|
976
|
+
return _validation_error("research_node_id", "node-findings", "Required")
|
|
977
|
+
|
|
978
|
+
spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
|
|
979
|
+
if error:
|
|
980
|
+
return asdict(
|
|
981
|
+
error_response(
|
|
982
|
+
error,
|
|
983
|
+
error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
|
|
984
|
+
error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
|
|
985
|
+
)
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
metadata = node.get("metadata", {})
|
|
989
|
+
findings = metadata.get("findings", {})
|
|
990
|
+
|
|
991
|
+
return asdict(
|
|
992
|
+
success_response(
|
|
993
|
+
data={
|
|
994
|
+
"spec_id": spec_id,
|
|
995
|
+
"research_node_id": research_node_id,
|
|
996
|
+
"title": node.get("title"),
|
|
997
|
+
"status": node.get("status"),
|
|
998
|
+
"findings": findings,
|
|
999
|
+
"research_history": metadata.get("research_history", []),
|
|
1000
|
+
}
|
|
1001
|
+
)
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
|
|
1005
|
+
# =============================================================================
|
|
1006
|
+
# Router Setup
|
|
1007
|
+
# =============================================================================
|
|
1008
|
+
|
|
1009
|
+
def _build_router() -> ActionRouter:
|
|
1010
|
+
"""Build the action router for research tool."""
|
|
1011
|
+
definitions = [
|
|
1012
|
+
ActionDefinition(
|
|
1013
|
+
name="chat",
|
|
1014
|
+
handler=_handle_chat,
|
|
1015
|
+
summary=_ACTION_SUMMARY["chat"],
|
|
1016
|
+
),
|
|
1017
|
+
ActionDefinition(
|
|
1018
|
+
name="consensus",
|
|
1019
|
+
handler=_handle_consensus,
|
|
1020
|
+
summary=_ACTION_SUMMARY["consensus"],
|
|
1021
|
+
),
|
|
1022
|
+
ActionDefinition(
|
|
1023
|
+
name="thinkdeep",
|
|
1024
|
+
handler=_handle_thinkdeep,
|
|
1025
|
+
summary=_ACTION_SUMMARY["thinkdeep"],
|
|
1026
|
+
),
|
|
1027
|
+
ActionDefinition(
|
|
1028
|
+
name="ideate",
|
|
1029
|
+
handler=_handle_ideate,
|
|
1030
|
+
summary=_ACTION_SUMMARY["ideate"],
|
|
1031
|
+
),
|
|
1032
|
+
ActionDefinition(
|
|
1033
|
+
name="deep-research",
|
|
1034
|
+
handler=_handle_deep_research,
|
|
1035
|
+
summary=_ACTION_SUMMARY["deep-research"],
|
|
1036
|
+
),
|
|
1037
|
+
ActionDefinition(
|
|
1038
|
+
name="deep-research-status",
|
|
1039
|
+
handler=_handle_deep_research_status,
|
|
1040
|
+
summary=_ACTION_SUMMARY["deep-research-status"],
|
|
1041
|
+
),
|
|
1042
|
+
ActionDefinition(
|
|
1043
|
+
name="deep-research-report",
|
|
1044
|
+
handler=_handle_deep_research_report,
|
|
1045
|
+
summary=_ACTION_SUMMARY["deep-research-report"],
|
|
1046
|
+
),
|
|
1047
|
+
ActionDefinition(
|
|
1048
|
+
name="deep-research-list",
|
|
1049
|
+
handler=_handle_deep_research_list,
|
|
1050
|
+
summary=_ACTION_SUMMARY["deep-research-list"],
|
|
1051
|
+
),
|
|
1052
|
+
ActionDefinition(
|
|
1053
|
+
name="deep-research-delete",
|
|
1054
|
+
handler=_handle_deep_research_delete,
|
|
1055
|
+
summary=_ACTION_SUMMARY["deep-research-delete"],
|
|
1056
|
+
),
|
|
1057
|
+
ActionDefinition(
|
|
1058
|
+
name="thread-list",
|
|
1059
|
+
handler=_handle_thread_list,
|
|
1060
|
+
summary=_ACTION_SUMMARY["thread-list"],
|
|
1061
|
+
),
|
|
1062
|
+
ActionDefinition(
|
|
1063
|
+
name="thread-get",
|
|
1064
|
+
handler=_handle_thread_get,
|
|
1065
|
+
summary=_ACTION_SUMMARY["thread-get"],
|
|
1066
|
+
),
|
|
1067
|
+
ActionDefinition(
|
|
1068
|
+
name="thread-delete",
|
|
1069
|
+
handler=_handle_thread_delete,
|
|
1070
|
+
summary=_ACTION_SUMMARY["thread-delete"],
|
|
1071
|
+
),
|
|
1072
|
+
# Spec-integrated research actions
|
|
1073
|
+
ActionDefinition(
|
|
1074
|
+
name="node-execute",
|
|
1075
|
+
handler=_handle_node_execute,
|
|
1076
|
+
summary=_ACTION_SUMMARY["node-execute"],
|
|
1077
|
+
),
|
|
1078
|
+
ActionDefinition(
|
|
1079
|
+
name="node-record",
|
|
1080
|
+
handler=_handle_node_record,
|
|
1081
|
+
summary=_ACTION_SUMMARY["node-record"],
|
|
1082
|
+
),
|
|
1083
|
+
ActionDefinition(
|
|
1084
|
+
name="node-status",
|
|
1085
|
+
handler=_handle_node_status,
|
|
1086
|
+
summary=_ACTION_SUMMARY["node-status"],
|
|
1087
|
+
),
|
|
1088
|
+
ActionDefinition(
|
|
1089
|
+
name="node-findings",
|
|
1090
|
+
handler=_handle_node_findings,
|
|
1091
|
+
summary=_ACTION_SUMMARY["node-findings"],
|
|
1092
|
+
),
|
|
1093
|
+
]
|
|
1094
|
+
return ActionRouter(tool_name="research", actions=definitions)
|
|
1095
|
+
|
|
1096
|
+
|
|
1097
|
+
_RESEARCH_ROUTER = _build_router()
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
def _dispatch_research_action(action: str, **kwargs: Any) -> dict:
|
|
1101
|
+
"""Dispatch action to appropriate handler."""
|
|
1102
|
+
try:
|
|
1103
|
+
return _RESEARCH_ROUTER.dispatch(action=action, **kwargs)
|
|
1104
|
+
except ActionRouterError as exc:
|
|
1105
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
1106
|
+
return asdict(
|
|
1107
|
+
error_response(
|
|
1108
|
+
f"Unsupported research action '{action}'. Allowed: {allowed}",
|
|
1109
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
1110
|
+
error_type=ErrorType.VALIDATION,
|
|
1111
|
+
remediation=f"Use one of: {allowed}",
|
|
1112
|
+
details={"action": action, "allowed_actions": exc.allowed_actions},
|
|
1113
|
+
)
|
|
1114
|
+
)
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
# =============================================================================
|
|
1118
|
+
# Tool Registration
|
|
1119
|
+
# =============================================================================
|
|
1120
|
+
|
|
1121
|
+
def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
1122
|
+
"""Register the unified research tool.
|
|
1123
|
+
|
|
1124
|
+
Args:
|
|
1125
|
+
mcp: FastMCP server instance
|
|
1126
|
+
config: Server configuration
|
|
1127
|
+
"""
|
|
1128
|
+
global _config, _memory
|
|
1129
|
+
_config = config
|
|
1130
|
+
_memory = None # Reset to use new config
|
|
1131
|
+
|
|
1132
|
+
# Check if research tools are enabled
|
|
1133
|
+
if not config.research.enabled:
|
|
1134
|
+
logger.info("Research tools disabled in config")
|
|
1135
|
+
return
|
|
1136
|
+
|
|
1137
|
+
@canonical_tool(mcp, canonical_name="research")
|
|
1138
|
+
def research(
|
|
1139
|
+
action: str,
|
|
1140
|
+
prompt: Optional[str] = None,
|
|
1141
|
+
thread_id: Optional[str] = None,
|
|
1142
|
+
investigation_id: Optional[str] = None,
|
|
1143
|
+
ideation_id: Optional[str] = None,
|
|
1144
|
+
research_id: Optional[str] = None,
|
|
1145
|
+
topic: Optional[str] = None,
|
|
1146
|
+
query: Optional[str] = None,
|
|
1147
|
+
system_prompt: Optional[str] = None,
|
|
1148
|
+
provider_id: Optional[str] = None,
|
|
1149
|
+
model: Optional[str] = None,
|
|
1150
|
+
providers: Optional[list[str]] = None,
|
|
1151
|
+
strategy: Optional[str] = None,
|
|
1152
|
+
synthesis_provider: Optional[str] = None,
|
|
1153
|
+
timeout_per_provider: float = 30.0,
|
|
1154
|
+
timeout_per_operation: float = 120.0,
|
|
1155
|
+
max_concurrent: int = 3,
|
|
1156
|
+
require_all: bool = False,
|
|
1157
|
+
min_responses: int = 1,
|
|
1158
|
+
max_depth: Optional[int] = None,
|
|
1159
|
+
max_iterations: int = 3,
|
|
1160
|
+
max_sub_queries: int = 5,
|
|
1161
|
+
max_sources_per_query: int = 5,
|
|
1162
|
+
follow_links: bool = True,
|
|
1163
|
+
deep_research_action: str = "start",
|
|
1164
|
+
task_timeout: Optional[float] = None,
|
|
1165
|
+
ideate_action: str = "generate",
|
|
1166
|
+
perspective: Optional[str] = None,
|
|
1167
|
+
perspectives: Optional[list[str]] = None,
|
|
1168
|
+
cluster_ids: Optional[list[str]] = None,
|
|
1169
|
+
scoring_criteria: Optional[list[str]] = None,
|
|
1170
|
+
temperature: Optional[float] = None,
|
|
1171
|
+
max_tokens: Optional[int] = None,
|
|
1172
|
+
title: Optional[str] = None,
|
|
1173
|
+
status: Optional[str] = None,
|
|
1174
|
+
limit: int = 50,
|
|
1175
|
+
cursor: Optional[str] = None,
|
|
1176
|
+
completed_only: bool = False,
|
|
1177
|
+
) -> dict:
|
|
1178
|
+
"""Execute research workflows via the action router.
|
|
1179
|
+
|
|
1180
|
+
Actions:
|
|
1181
|
+
- chat: Single-model conversation with thread persistence
|
|
1182
|
+
- consensus: Multi-model parallel consultation with synthesis
|
|
1183
|
+
- thinkdeep: Hypothesis-driven systematic investigation
|
|
1184
|
+
- ideate: Creative brainstorming with idea clustering
|
|
1185
|
+
- deep-research: Multi-phase iterative deep research with query decomposition
|
|
1186
|
+
- deep-research-status: Get status of deep research session
|
|
1187
|
+
- deep-research-report: Get final report from deep research
|
|
1188
|
+
- deep-research-list: List deep research sessions
|
|
1189
|
+
- deep-research-delete: Delete a deep research session
|
|
1190
|
+
- thread-list: List conversation threads
|
|
1191
|
+
- thread-get: Get thread details including messages
|
|
1192
|
+
- thread-delete: Delete a conversation thread
|
|
1193
|
+
|
|
1194
|
+
Args:
|
|
1195
|
+
action: The research action to execute
|
|
1196
|
+
prompt: User prompt/message (chat, consensus)
|
|
1197
|
+
thread_id: Thread ID for continuing conversations (chat)
|
|
1198
|
+
investigation_id: Investigation ID to continue (thinkdeep)
|
|
1199
|
+
ideation_id: Ideation session ID to continue (ideate)
|
|
1200
|
+
research_id: Deep research session ID (deep-research-*)
|
|
1201
|
+
topic: Topic for new investigation/ideation
|
|
1202
|
+
query: Research query (deep-research) or follow-up (thinkdeep)
|
|
1203
|
+
system_prompt: System prompt for workflows
|
|
1204
|
+
provider_id: Provider to use for single-model operations
|
|
1205
|
+
model: Model override
|
|
1206
|
+
providers: Provider list for consensus
|
|
1207
|
+
strategy: Consensus strategy (all_responses, synthesize, majority, first_valid)
|
|
1208
|
+
synthesis_provider: Provider for synthesis
|
|
1209
|
+
timeout_per_provider: Timeout per provider in seconds (consensus)
|
|
1210
|
+
timeout_per_operation: Timeout per operation in seconds (deep-research)
|
|
1211
|
+
max_concurrent: Max concurrent provider/operation calls
|
|
1212
|
+
require_all: Require all providers to succeed
|
|
1213
|
+
min_responses: Minimum successful responses needed
|
|
1214
|
+
max_depth: Maximum investigation depth (thinkdeep)
|
|
1215
|
+
max_iterations: Maximum refinement iterations (deep-research)
|
|
1216
|
+
max_sub_queries: Maximum sub-queries to generate (deep-research)
|
|
1217
|
+
max_sources_per_query: Maximum sources per sub-query (deep-research)
|
|
1218
|
+
follow_links: Whether to follow and extract links (deep-research)
|
|
1219
|
+
deep_research_action: Sub-action for deep-research (start, continue, resume)
|
|
1220
|
+
task_timeout: Overall timeout for background research task in seconds
|
|
1221
|
+
ideate_action: Ideation sub-action (generate, cluster, score, select, elaborate)
|
|
1222
|
+
perspective: Specific perspective for idea generation
|
|
1223
|
+
perspectives: Custom perspectives list
|
|
1224
|
+
cluster_ids: Cluster IDs for selection/elaboration
|
|
1225
|
+
scoring_criteria: Custom scoring criteria
|
|
1226
|
+
temperature: Sampling temperature
|
|
1227
|
+
max_tokens: Maximum output tokens
|
|
1228
|
+
title: Title for new threads
|
|
1229
|
+
status: Filter threads by status
|
|
1230
|
+
limit: Maximum items to return
|
|
1231
|
+
cursor: Pagination cursor for deep-research-list
|
|
1232
|
+
completed_only: Filter to completed sessions only (deep-research-list)
|
|
1233
|
+
|
|
1234
|
+
Returns:
|
|
1235
|
+
Response envelope with action results
|
|
1236
|
+
"""
|
|
1237
|
+
return _dispatch_research_action(
|
|
1238
|
+
action=action,
|
|
1239
|
+
prompt=prompt,
|
|
1240
|
+
thread_id=thread_id,
|
|
1241
|
+
investigation_id=investigation_id,
|
|
1242
|
+
ideation_id=ideation_id,
|
|
1243
|
+
research_id=research_id,
|
|
1244
|
+
topic=topic,
|
|
1245
|
+
query=query,
|
|
1246
|
+
system_prompt=system_prompt,
|
|
1247
|
+
provider_id=provider_id,
|
|
1248
|
+
model=model,
|
|
1249
|
+
providers=providers,
|
|
1250
|
+
strategy=strategy,
|
|
1251
|
+
synthesis_provider=synthesis_provider,
|
|
1252
|
+
timeout_per_provider=timeout_per_provider,
|
|
1253
|
+
timeout_per_operation=timeout_per_operation,
|
|
1254
|
+
max_concurrent=max_concurrent,
|
|
1255
|
+
require_all=require_all,
|
|
1256
|
+
min_responses=min_responses,
|
|
1257
|
+
max_depth=max_depth,
|
|
1258
|
+
max_iterations=max_iterations,
|
|
1259
|
+
max_sub_queries=max_sub_queries,
|
|
1260
|
+
max_sources_per_query=max_sources_per_query,
|
|
1261
|
+
follow_links=follow_links,
|
|
1262
|
+
deep_research_action=deep_research_action,
|
|
1263
|
+
task_timeout=task_timeout,
|
|
1264
|
+
ideate_action=ideate_action,
|
|
1265
|
+
perspective=perspective,
|
|
1266
|
+
perspectives=perspectives,
|
|
1267
|
+
cluster_ids=cluster_ids,
|
|
1268
|
+
scoring_criteria=scoring_criteria,
|
|
1269
|
+
temperature=temperature,
|
|
1270
|
+
max_tokens=max_tokens,
|
|
1271
|
+
title=title,
|
|
1272
|
+
status=status,
|
|
1273
|
+
limit=limit,
|
|
1274
|
+
cursor=cursor,
|
|
1275
|
+
completed_only=completed_only,
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
logger.debug("Registered unified research tool")
|
|
1279
|
+
|
|
1280
|
+
|
|
1281
|
+
__all__ = [
|
|
1282
|
+
"register_unified_research_tool",
|
|
1283
|
+
]
|