foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +235 -5
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/discovery.py +6 -6
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +20 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/claude.py +6 -47
- foundry_mcp/core/providers/codex.py +6 -57
- foundry_mcp/core/providers/cursor_agent.py +3 -44
- foundry_mcp/core/providers/gemini.py +6 -57
- foundry_mcp/core/providers/opencode.py +35 -5
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +425 -0
- foundry_mcp/core/research/models.py +437 -0
- foundry_mcp/core/research/workflows/__init__.py +22 -0
- foundry_mcp/core/research/workflows/base.py +204 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +396 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +450 -0
- foundry_mcp/core/spec.py +2438 -236
- foundry_mcp/core/task.py +1064 -19
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +313 -42
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +38 -0
- foundry_mcp/tools/unified/__init__.py +4 -2
- foundry_mcp/tools/unified/authoring.py +2423 -267
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +235 -6
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +113 -1
- foundry_mcp/tools/unified/research.py +658 -0
- foundry_mcp/tools/unified/review.py +370 -16
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1163 -48
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,658 @@
|
|
|
1
|
+
"""Unified research tool with action routing.
|
|
2
|
+
|
|
3
|
+
Provides multi-model orchestration capabilities through CHAT, CONSENSUS,
|
|
4
|
+
THINKDEEP, and IDEATE workflows via a unified MCP tool interface.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from dataclasses import asdict
|
|
11
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
12
|
+
|
|
13
|
+
from mcp.server.fastmcp import FastMCP
|
|
14
|
+
|
|
15
|
+
from foundry_mcp.config import ServerConfig
|
|
16
|
+
from foundry_mcp.core.feature_flags import get_flag_service
|
|
17
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
18
|
+
from foundry_mcp.core.research.memory import ResearchMemory
|
|
19
|
+
from foundry_mcp.core.research.models import ConsensusStrategy, ThreadStatus
|
|
20
|
+
from foundry_mcp.core.research.workflows import (
|
|
21
|
+
ChatWorkflow,
|
|
22
|
+
ConsensusWorkflow,
|
|
23
|
+
IdeateWorkflow,
|
|
24
|
+
ThinkDeepWorkflow,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.core.responses import (
|
|
27
|
+
ErrorCode,
|
|
28
|
+
ErrorType,
|
|
29
|
+
error_response,
|
|
30
|
+
success_response,
|
|
31
|
+
)
|
|
32
|
+
from foundry_mcp.tools.unified.router import (
|
|
33
|
+
ActionDefinition,
|
|
34
|
+
ActionRouter,
|
|
35
|
+
ActionRouterError,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
logger = logging.getLogger(__name__)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# =============================================================================
|
|
45
|
+
# Action Summaries
|
|
46
|
+
# =============================================================================
|
|
47
|
+
|
|
48
|
+
_ACTION_SUMMARY = {
|
|
49
|
+
"chat": "Single-model conversation with thread persistence",
|
|
50
|
+
"consensus": "Multi-model parallel consultation with synthesis",
|
|
51
|
+
"thinkdeep": "Hypothesis-driven systematic investigation",
|
|
52
|
+
"ideate": "Creative brainstorming with idea clustering",
|
|
53
|
+
"thread-list": "List conversation threads",
|
|
54
|
+
"thread-get": "Get full thread details including messages",
|
|
55
|
+
"thread-delete": "Delete a conversation thread",
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# =============================================================================
|
|
60
|
+
# Module State
|
|
61
|
+
# =============================================================================
|
|
62
|
+
|
|
63
|
+
_config: Optional[ServerConfig] = None
|
|
64
|
+
_memory: Optional[ResearchMemory] = None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _get_memory() -> ResearchMemory:
|
|
68
|
+
"""Get or create the research memory instance."""
|
|
69
|
+
global _memory, _config
|
|
70
|
+
if _memory is None:
|
|
71
|
+
if _config is not None:
|
|
72
|
+
_memory = ResearchMemory(
|
|
73
|
+
base_path=_config.research.get_storage_path(),
|
|
74
|
+
ttl_hours=_config.research.ttl_hours,
|
|
75
|
+
)
|
|
76
|
+
else:
|
|
77
|
+
_memory = ResearchMemory()
|
|
78
|
+
return _memory
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _get_config() -> ServerConfig:
|
|
82
|
+
"""Get the server config, raising if not initialized."""
|
|
83
|
+
global _config
|
|
84
|
+
if _config is None:
|
|
85
|
+
# Create default config if not set
|
|
86
|
+
_config = ServerConfig()
|
|
87
|
+
return _config
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# =============================================================================
|
|
91
|
+
# Validation Helpers
|
|
92
|
+
# =============================================================================
|
|
93
|
+
|
|
94
|
+
def _validation_error(
|
|
95
|
+
field: str,
|
|
96
|
+
action: str,
|
|
97
|
+
message: str,
|
|
98
|
+
*,
|
|
99
|
+
code: ErrorCode = ErrorCode.VALIDATION_ERROR,
|
|
100
|
+
remediation: Optional[str] = None,
|
|
101
|
+
) -> dict:
|
|
102
|
+
"""Create a validation error response."""
|
|
103
|
+
return asdict(
|
|
104
|
+
error_response(
|
|
105
|
+
f"Invalid field '{field}' for research.{action}: {message}",
|
|
106
|
+
error_code=code,
|
|
107
|
+
error_type=ErrorType.VALIDATION,
|
|
108
|
+
remediation=remediation or f"Provide a valid '{field}' value",
|
|
109
|
+
details={"field": field, "action": f"research.{action}"},
|
|
110
|
+
)
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
# =============================================================================
|
|
115
|
+
# Action Handlers
|
|
116
|
+
# =============================================================================
|
|
117
|
+
|
|
118
|
+
def _handle_chat(
|
|
119
|
+
*,
|
|
120
|
+
prompt: Optional[str] = None,
|
|
121
|
+
thread_id: Optional[str] = None,
|
|
122
|
+
system_prompt: Optional[str] = None,
|
|
123
|
+
provider_id: Optional[str] = None,
|
|
124
|
+
model: Optional[str] = None,
|
|
125
|
+
temperature: Optional[float] = None,
|
|
126
|
+
max_tokens: Optional[int] = None,
|
|
127
|
+
title: Optional[str] = None,
|
|
128
|
+
**kwargs: Any,
|
|
129
|
+
) -> dict:
|
|
130
|
+
"""Handle chat action."""
|
|
131
|
+
if not prompt:
|
|
132
|
+
return _validation_error("prompt", "chat", "Required non-empty string")
|
|
133
|
+
|
|
134
|
+
config = _get_config()
|
|
135
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
136
|
+
|
|
137
|
+
result = workflow.execute(
|
|
138
|
+
prompt=prompt,
|
|
139
|
+
thread_id=thread_id,
|
|
140
|
+
system_prompt=system_prompt,
|
|
141
|
+
provider_id=provider_id,
|
|
142
|
+
model=model,
|
|
143
|
+
temperature=temperature,
|
|
144
|
+
max_tokens=max_tokens,
|
|
145
|
+
title=title,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if result.success:
|
|
149
|
+
return asdict(
|
|
150
|
+
success_response(
|
|
151
|
+
data={
|
|
152
|
+
"content": result.content,
|
|
153
|
+
"thread_id": result.metadata.get("thread_id"),
|
|
154
|
+
"message_count": result.metadata.get("message_count"),
|
|
155
|
+
"provider_id": result.provider_id,
|
|
156
|
+
"model_used": result.model_used,
|
|
157
|
+
"tokens_used": result.tokens_used,
|
|
158
|
+
}
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
return asdict(
|
|
163
|
+
error_response(
|
|
164
|
+
result.error or "Chat failed",
|
|
165
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
166
|
+
error_type=ErrorType.INTERNAL,
|
|
167
|
+
remediation="Check provider availability and retry",
|
|
168
|
+
)
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _handle_consensus(
|
|
173
|
+
*,
|
|
174
|
+
prompt: Optional[str] = None,
|
|
175
|
+
providers: Optional[list[str]] = None,
|
|
176
|
+
strategy: Optional[str] = None,
|
|
177
|
+
synthesis_provider: Optional[str] = None,
|
|
178
|
+
system_prompt: Optional[str] = None,
|
|
179
|
+
timeout_per_provider: float = 30.0,
|
|
180
|
+
max_concurrent: int = 3,
|
|
181
|
+
require_all: bool = False,
|
|
182
|
+
min_responses: int = 1,
|
|
183
|
+
**kwargs: Any,
|
|
184
|
+
) -> dict:
|
|
185
|
+
"""Handle consensus action."""
|
|
186
|
+
if not prompt:
|
|
187
|
+
return _validation_error("prompt", "consensus", "Required non-empty string")
|
|
188
|
+
|
|
189
|
+
# Parse strategy
|
|
190
|
+
consensus_strategy = ConsensusStrategy.SYNTHESIZE
|
|
191
|
+
if strategy:
|
|
192
|
+
try:
|
|
193
|
+
consensus_strategy = ConsensusStrategy(strategy)
|
|
194
|
+
except ValueError:
|
|
195
|
+
valid = [s.value for s in ConsensusStrategy]
|
|
196
|
+
return _validation_error(
|
|
197
|
+
"strategy",
|
|
198
|
+
"consensus",
|
|
199
|
+
f"Invalid value. Valid: {valid}",
|
|
200
|
+
remediation=f"Use one of: {', '.join(valid)}",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
config = _get_config()
|
|
204
|
+
workflow = ConsensusWorkflow(config.research, _get_memory())
|
|
205
|
+
|
|
206
|
+
result = workflow.execute(
|
|
207
|
+
prompt=prompt,
|
|
208
|
+
providers=providers,
|
|
209
|
+
strategy=consensus_strategy,
|
|
210
|
+
synthesis_provider=synthesis_provider,
|
|
211
|
+
system_prompt=system_prompt,
|
|
212
|
+
timeout_per_provider=timeout_per_provider,
|
|
213
|
+
max_concurrent=max_concurrent,
|
|
214
|
+
require_all=require_all,
|
|
215
|
+
min_responses=min_responses,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if result.success:
|
|
219
|
+
return asdict(
|
|
220
|
+
success_response(
|
|
221
|
+
data={
|
|
222
|
+
"content": result.content,
|
|
223
|
+
"consensus_id": result.metadata.get("consensus_id"),
|
|
224
|
+
"providers_consulted": result.metadata.get("providers_consulted"),
|
|
225
|
+
"strategy": result.metadata.get("strategy"),
|
|
226
|
+
"response_count": result.metadata.get("response_count"),
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
return asdict(
|
|
232
|
+
error_response(
|
|
233
|
+
result.error or "Consensus failed",
|
|
234
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
235
|
+
error_type=ErrorType.INTERNAL,
|
|
236
|
+
remediation="Check provider availability and retry",
|
|
237
|
+
details=result.metadata,
|
|
238
|
+
)
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _handle_thinkdeep(
|
|
243
|
+
*,
|
|
244
|
+
topic: Optional[str] = None,
|
|
245
|
+
investigation_id: Optional[str] = None,
|
|
246
|
+
query: Optional[str] = None,
|
|
247
|
+
system_prompt: Optional[str] = None,
|
|
248
|
+
provider_id: Optional[str] = None,
|
|
249
|
+
max_depth: Optional[int] = None,
|
|
250
|
+
**kwargs: Any,
|
|
251
|
+
) -> dict:
|
|
252
|
+
"""Handle thinkdeep action."""
|
|
253
|
+
if not topic and not investigation_id:
|
|
254
|
+
return _validation_error(
|
|
255
|
+
"topic/investigation_id",
|
|
256
|
+
"thinkdeep",
|
|
257
|
+
"Either 'topic' (new) or 'investigation_id' (continue) required",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
config = _get_config()
|
|
261
|
+
workflow = ThinkDeepWorkflow(config.research, _get_memory())
|
|
262
|
+
|
|
263
|
+
result = workflow.execute(
|
|
264
|
+
topic=topic,
|
|
265
|
+
investigation_id=investigation_id,
|
|
266
|
+
query=query,
|
|
267
|
+
system_prompt=system_prompt,
|
|
268
|
+
provider_id=provider_id,
|
|
269
|
+
max_depth=max_depth,
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
if result.success:
|
|
273
|
+
return asdict(
|
|
274
|
+
success_response(
|
|
275
|
+
data={
|
|
276
|
+
"content": result.content,
|
|
277
|
+
"investigation_id": result.metadata.get("investigation_id"),
|
|
278
|
+
"current_depth": result.metadata.get("current_depth"),
|
|
279
|
+
"max_depth": result.metadata.get("max_depth"),
|
|
280
|
+
"converged": result.metadata.get("converged"),
|
|
281
|
+
"hypothesis_count": result.metadata.get("hypothesis_count"),
|
|
282
|
+
"step_count": result.metadata.get("step_count"),
|
|
283
|
+
}
|
|
284
|
+
)
|
|
285
|
+
)
|
|
286
|
+
else:
|
|
287
|
+
return asdict(
|
|
288
|
+
error_response(
|
|
289
|
+
result.error or "ThinkDeep failed",
|
|
290
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
291
|
+
error_type=ErrorType.INTERNAL,
|
|
292
|
+
remediation="Check investigation ID or topic validity",
|
|
293
|
+
)
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _handle_ideate(
|
|
298
|
+
*,
|
|
299
|
+
topic: Optional[str] = None,
|
|
300
|
+
ideation_id: Optional[str] = None,
|
|
301
|
+
ideate_action: str = "generate",
|
|
302
|
+
perspective: Optional[str] = None,
|
|
303
|
+
cluster_ids: Optional[list[str]] = None,
|
|
304
|
+
system_prompt: Optional[str] = None,
|
|
305
|
+
provider_id: Optional[str] = None,
|
|
306
|
+
perspectives: Optional[list[str]] = None,
|
|
307
|
+
scoring_criteria: Optional[list[str]] = None,
|
|
308
|
+
**kwargs: Any,
|
|
309
|
+
) -> dict:
|
|
310
|
+
"""Handle ideate action."""
|
|
311
|
+
if not topic and not ideation_id:
|
|
312
|
+
return _validation_error(
|
|
313
|
+
"topic/ideation_id",
|
|
314
|
+
"ideate",
|
|
315
|
+
"Either 'topic' (new) or 'ideation_id' (continue) required",
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
config = _get_config()
|
|
319
|
+
workflow = IdeateWorkflow(config.research, _get_memory())
|
|
320
|
+
|
|
321
|
+
result = workflow.execute(
|
|
322
|
+
topic=topic,
|
|
323
|
+
ideation_id=ideation_id,
|
|
324
|
+
action=ideate_action,
|
|
325
|
+
perspective=perspective,
|
|
326
|
+
cluster_ids=cluster_ids,
|
|
327
|
+
system_prompt=system_prompt,
|
|
328
|
+
provider_id=provider_id,
|
|
329
|
+
perspectives=perspectives,
|
|
330
|
+
scoring_criteria=scoring_criteria,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if result.success:
|
|
334
|
+
return asdict(
|
|
335
|
+
success_response(
|
|
336
|
+
data={
|
|
337
|
+
"content": result.content,
|
|
338
|
+
"ideation_id": result.metadata.get("ideation_id"),
|
|
339
|
+
"phase": result.metadata.get("phase"),
|
|
340
|
+
"idea_count": result.metadata.get("idea_count"),
|
|
341
|
+
"cluster_count": result.metadata.get("cluster_count"),
|
|
342
|
+
}
|
|
343
|
+
)
|
|
344
|
+
)
|
|
345
|
+
else:
|
|
346
|
+
return asdict(
|
|
347
|
+
error_response(
|
|
348
|
+
result.error or "Ideate failed",
|
|
349
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
350
|
+
error_type=ErrorType.INTERNAL,
|
|
351
|
+
remediation="Check ideation ID or topic validity",
|
|
352
|
+
)
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def _handle_thread_list(
|
|
357
|
+
*,
|
|
358
|
+
status: Optional[str] = None,
|
|
359
|
+
limit: int = 50,
|
|
360
|
+
**kwargs: Any,
|
|
361
|
+
) -> dict:
|
|
362
|
+
"""Handle thread-list action."""
|
|
363
|
+
thread_status = None
|
|
364
|
+
if status:
|
|
365
|
+
try:
|
|
366
|
+
thread_status = ThreadStatus(status)
|
|
367
|
+
except ValueError:
|
|
368
|
+
valid = [s.value for s in ThreadStatus]
|
|
369
|
+
return _validation_error(
|
|
370
|
+
"status",
|
|
371
|
+
"thread-list",
|
|
372
|
+
f"Invalid value. Valid: {valid}",
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
config = _get_config()
|
|
376
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
377
|
+
threads = workflow.list_threads(status=thread_status, limit=limit)
|
|
378
|
+
|
|
379
|
+
return asdict(
|
|
380
|
+
success_response(
|
|
381
|
+
data={
|
|
382
|
+
"threads": threads,
|
|
383
|
+
"count": len(threads),
|
|
384
|
+
}
|
|
385
|
+
)
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
def _handle_thread_get(
|
|
390
|
+
*,
|
|
391
|
+
thread_id: Optional[str] = None,
|
|
392
|
+
**kwargs: Any,
|
|
393
|
+
) -> dict:
|
|
394
|
+
"""Handle thread-get action."""
|
|
395
|
+
if not thread_id:
|
|
396
|
+
return _validation_error("thread_id", "thread-get", "Required")
|
|
397
|
+
|
|
398
|
+
config = _get_config()
|
|
399
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
400
|
+
thread = workflow.get_thread(thread_id)
|
|
401
|
+
|
|
402
|
+
if not thread:
|
|
403
|
+
return asdict(
|
|
404
|
+
error_response(
|
|
405
|
+
f"Thread '{thread_id}' not found",
|
|
406
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
407
|
+
error_type=ErrorType.NOT_FOUND,
|
|
408
|
+
remediation="Use thread-list to find valid thread IDs",
|
|
409
|
+
)
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
return asdict(success_response(data=thread))
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def _handle_thread_delete(
|
|
416
|
+
*,
|
|
417
|
+
thread_id: Optional[str] = None,
|
|
418
|
+
**kwargs: Any,
|
|
419
|
+
) -> dict:
|
|
420
|
+
"""Handle thread-delete action."""
|
|
421
|
+
if not thread_id:
|
|
422
|
+
return _validation_error("thread_id", "thread-delete", "Required")
|
|
423
|
+
|
|
424
|
+
config = _get_config()
|
|
425
|
+
workflow = ChatWorkflow(config.research, _get_memory())
|
|
426
|
+
deleted = workflow.delete_thread(thread_id)
|
|
427
|
+
|
|
428
|
+
if not deleted:
|
|
429
|
+
return asdict(
|
|
430
|
+
error_response(
|
|
431
|
+
f"Thread '{thread_id}' not found",
|
|
432
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
433
|
+
error_type=ErrorType.NOT_FOUND,
|
|
434
|
+
remediation="Use thread-list to find valid thread IDs",
|
|
435
|
+
)
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
return asdict(
|
|
439
|
+
success_response(
|
|
440
|
+
data={
|
|
441
|
+
"deleted": True,
|
|
442
|
+
"thread_id": thread_id,
|
|
443
|
+
}
|
|
444
|
+
)
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
# =============================================================================
|
|
449
|
+
# Router Setup
|
|
450
|
+
# =============================================================================
|
|
451
|
+
|
|
452
|
+
def _build_router() -> ActionRouter:
|
|
453
|
+
"""Build the action router for research tool."""
|
|
454
|
+
definitions = [
|
|
455
|
+
ActionDefinition(
|
|
456
|
+
name="chat",
|
|
457
|
+
handler=_handle_chat,
|
|
458
|
+
summary=_ACTION_SUMMARY["chat"],
|
|
459
|
+
),
|
|
460
|
+
ActionDefinition(
|
|
461
|
+
name="consensus",
|
|
462
|
+
handler=_handle_consensus,
|
|
463
|
+
summary=_ACTION_SUMMARY["consensus"],
|
|
464
|
+
),
|
|
465
|
+
ActionDefinition(
|
|
466
|
+
name="thinkdeep",
|
|
467
|
+
handler=_handle_thinkdeep,
|
|
468
|
+
summary=_ACTION_SUMMARY["thinkdeep"],
|
|
469
|
+
),
|
|
470
|
+
ActionDefinition(
|
|
471
|
+
name="ideate",
|
|
472
|
+
handler=_handle_ideate,
|
|
473
|
+
summary=_ACTION_SUMMARY["ideate"],
|
|
474
|
+
),
|
|
475
|
+
ActionDefinition(
|
|
476
|
+
name="thread-list",
|
|
477
|
+
handler=_handle_thread_list,
|
|
478
|
+
summary=_ACTION_SUMMARY["thread-list"],
|
|
479
|
+
),
|
|
480
|
+
ActionDefinition(
|
|
481
|
+
name="thread-get",
|
|
482
|
+
handler=_handle_thread_get,
|
|
483
|
+
summary=_ACTION_SUMMARY["thread-get"],
|
|
484
|
+
),
|
|
485
|
+
ActionDefinition(
|
|
486
|
+
name="thread-delete",
|
|
487
|
+
handler=_handle_thread_delete,
|
|
488
|
+
summary=_ACTION_SUMMARY["thread-delete"],
|
|
489
|
+
),
|
|
490
|
+
]
|
|
491
|
+
return ActionRouter(tool_name="research", actions=definitions)
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
_RESEARCH_ROUTER = _build_router()
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def _dispatch_research_action(action: str, **kwargs: Any) -> dict:
|
|
498
|
+
"""Dispatch action to appropriate handler."""
|
|
499
|
+
try:
|
|
500
|
+
return _RESEARCH_ROUTER.dispatch(action=action, **kwargs)
|
|
501
|
+
except ActionRouterError as exc:
|
|
502
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
503
|
+
return asdict(
|
|
504
|
+
error_response(
|
|
505
|
+
f"Unsupported research action '{action}'. Allowed: {allowed}",
|
|
506
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
507
|
+
error_type=ErrorType.VALIDATION,
|
|
508
|
+
remediation=f"Use one of: {allowed}",
|
|
509
|
+
details={"action": action, "allowed_actions": exc.allowed_actions},
|
|
510
|
+
)
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
# =============================================================================
|
|
515
|
+
# Tool Registration
|
|
516
|
+
# =============================================================================
|
|
517
|
+
|
|
518
|
+
def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
519
|
+
"""Register the unified research tool.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
mcp: FastMCP server instance
|
|
523
|
+
config: Server configuration
|
|
524
|
+
"""
|
|
525
|
+
global _config, _memory
|
|
526
|
+
_config = config
|
|
527
|
+
_memory = None # Reset to use new config
|
|
528
|
+
|
|
529
|
+
# Check if research tools are enabled
|
|
530
|
+
if not config.research.enabled:
|
|
531
|
+
logger.info("Research tools disabled in config")
|
|
532
|
+
return
|
|
533
|
+
|
|
534
|
+
@canonical_tool(mcp, canonical_name="research")
|
|
535
|
+
def research(
|
|
536
|
+
action: str,
|
|
537
|
+
prompt: Optional[str] = None,
|
|
538
|
+
thread_id: Optional[str] = None,
|
|
539
|
+
investigation_id: Optional[str] = None,
|
|
540
|
+
ideation_id: Optional[str] = None,
|
|
541
|
+
topic: Optional[str] = None,
|
|
542
|
+
query: Optional[str] = None,
|
|
543
|
+
system_prompt: Optional[str] = None,
|
|
544
|
+
provider_id: Optional[str] = None,
|
|
545
|
+
model: Optional[str] = None,
|
|
546
|
+
providers: Optional[list[str]] = None,
|
|
547
|
+
strategy: Optional[str] = None,
|
|
548
|
+
synthesis_provider: Optional[str] = None,
|
|
549
|
+
timeout_per_provider: float = 30.0,
|
|
550
|
+
max_concurrent: int = 3,
|
|
551
|
+
require_all: bool = False,
|
|
552
|
+
min_responses: int = 1,
|
|
553
|
+
max_depth: Optional[int] = None,
|
|
554
|
+
ideate_action: str = "generate",
|
|
555
|
+
perspective: Optional[str] = None,
|
|
556
|
+
perspectives: Optional[list[str]] = None,
|
|
557
|
+
cluster_ids: Optional[list[str]] = None,
|
|
558
|
+
scoring_criteria: Optional[list[str]] = None,
|
|
559
|
+
temperature: Optional[float] = None,
|
|
560
|
+
max_tokens: Optional[int] = None,
|
|
561
|
+
title: Optional[str] = None,
|
|
562
|
+
status: Optional[str] = None,
|
|
563
|
+
limit: int = 50,
|
|
564
|
+
) -> dict:
|
|
565
|
+
"""Execute research workflows via the action router.
|
|
566
|
+
|
|
567
|
+
Actions:
|
|
568
|
+
- chat: Single-model conversation with thread persistence
|
|
569
|
+
- consensus: Multi-model parallel consultation with synthesis
|
|
570
|
+
- thinkdeep: Hypothesis-driven systematic investigation
|
|
571
|
+
- ideate: Creative brainstorming with idea clustering
|
|
572
|
+
- route: Intelligent workflow selection based on prompt
|
|
573
|
+
- thread-list: List conversation threads
|
|
574
|
+
- thread-get: Get thread details including messages
|
|
575
|
+
- thread-delete: Delete a conversation thread
|
|
576
|
+
|
|
577
|
+
Args:
|
|
578
|
+
action: The research action to execute
|
|
579
|
+
prompt: User prompt/message (chat, consensus, route)
|
|
580
|
+
thread_id: Thread ID for continuing conversations (chat)
|
|
581
|
+
investigation_id: Investigation ID to continue (thinkdeep)
|
|
582
|
+
ideation_id: Ideation session ID to continue (ideate)
|
|
583
|
+
topic: Topic for new investigation/ideation
|
|
584
|
+
query: Follow-up query (thinkdeep)
|
|
585
|
+
system_prompt: System prompt for workflows
|
|
586
|
+
provider_id: Provider to use for single-model operations
|
|
587
|
+
model: Model override
|
|
588
|
+
providers: Provider list for consensus
|
|
589
|
+
strategy: Consensus strategy (all_responses, synthesize, majority, first_valid)
|
|
590
|
+
synthesis_provider: Provider for synthesis
|
|
591
|
+
timeout_per_provider: Timeout per provider in seconds
|
|
592
|
+
max_concurrent: Max concurrent provider calls
|
|
593
|
+
require_all: Require all providers to succeed
|
|
594
|
+
min_responses: Minimum successful responses needed
|
|
595
|
+
max_depth: Maximum investigation depth (thinkdeep)
|
|
596
|
+
ideate_action: Ideation sub-action (generate, cluster, score, select, elaborate)
|
|
597
|
+
perspective: Specific perspective for idea generation
|
|
598
|
+
perspectives: Custom perspectives list
|
|
599
|
+
cluster_ids: Cluster IDs for selection/elaboration
|
|
600
|
+
scoring_criteria: Custom scoring criteria
|
|
601
|
+
temperature: Sampling temperature
|
|
602
|
+
max_tokens: Maximum output tokens
|
|
603
|
+
title: Title for new threads
|
|
604
|
+
status: Filter threads by status
|
|
605
|
+
limit: Maximum items to return
|
|
606
|
+
|
|
607
|
+
Returns:
|
|
608
|
+
Response envelope with action results
|
|
609
|
+
"""
|
|
610
|
+
# Check feature flag
|
|
611
|
+
flag_service = get_flag_service()
|
|
612
|
+
if not flag_service.is_enabled("research_tools"):
|
|
613
|
+
return asdict(
|
|
614
|
+
error_response(
|
|
615
|
+
"Research tools are not enabled",
|
|
616
|
+
error_code=ErrorCode.FEATURE_DISABLED,
|
|
617
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
618
|
+
remediation="Enable 'research_tools' feature flag in configuration",
|
|
619
|
+
)
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
return _dispatch_research_action(
|
|
623
|
+
action=action,
|
|
624
|
+
prompt=prompt,
|
|
625
|
+
thread_id=thread_id,
|
|
626
|
+
investigation_id=investigation_id,
|
|
627
|
+
ideation_id=ideation_id,
|
|
628
|
+
topic=topic,
|
|
629
|
+
query=query,
|
|
630
|
+
system_prompt=system_prompt,
|
|
631
|
+
provider_id=provider_id,
|
|
632
|
+
model=model,
|
|
633
|
+
providers=providers,
|
|
634
|
+
strategy=strategy,
|
|
635
|
+
synthesis_provider=synthesis_provider,
|
|
636
|
+
timeout_per_provider=timeout_per_provider,
|
|
637
|
+
max_concurrent=max_concurrent,
|
|
638
|
+
require_all=require_all,
|
|
639
|
+
min_responses=min_responses,
|
|
640
|
+
max_depth=max_depth,
|
|
641
|
+
ideate_action=ideate_action,
|
|
642
|
+
perspective=perspective,
|
|
643
|
+
perspectives=perspectives,
|
|
644
|
+
cluster_ids=cluster_ids,
|
|
645
|
+
scoring_criteria=scoring_criteria,
|
|
646
|
+
temperature=temperature,
|
|
647
|
+
max_tokens=max_tokens,
|
|
648
|
+
title=title,
|
|
649
|
+
status=status,
|
|
650
|
+
limit=limit,
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
logger.debug("Registered unified research tool")
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
__all__ = [
|
|
657
|
+
"register_unified_research_tool",
|
|
658
|
+
]
|