foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
"""THINKDEEP workflow for hypothesis-driven systematic investigation.
|
|
2
|
+
|
|
3
|
+
Provides deep investigation capabilities with hypothesis tracking,
|
|
4
|
+
evidence accumulation, and confidence progression.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
from foundry_mcp.config import ResearchConfig
|
|
11
|
+
from foundry_mcp.core.research.memory import ResearchMemory
|
|
12
|
+
from foundry_mcp.core.research.models import (
|
|
13
|
+
ConfidenceLevel,
|
|
14
|
+
Hypothesis,
|
|
15
|
+
InvestigationStep,
|
|
16
|
+
ThinkDeepState,
|
|
17
|
+
)
|
|
18
|
+
from foundry_mcp.core.research.workflows.base import ResearchWorkflowBase, WorkflowResult
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ThinkDeepWorkflow(ResearchWorkflowBase):
|
|
24
|
+
"""Hypothesis-driven systematic investigation workflow.
|
|
25
|
+
|
|
26
|
+
Features:
|
|
27
|
+
- Multi-step investigation with depth tracking
|
|
28
|
+
- Hypothesis creation and tracking
|
|
29
|
+
- Evidence accumulation (supporting/contradicting)
|
|
30
|
+
- Confidence level progression
|
|
31
|
+
- Convergence detection
|
|
32
|
+
- State persistence across sessions
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
config: ResearchConfig,
|
|
38
|
+
memory: Optional[ResearchMemory] = None,
|
|
39
|
+
) -> None:
|
|
40
|
+
"""Initialize thinkdeep workflow.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
config: Research configuration
|
|
44
|
+
memory: Optional memory instance
|
|
45
|
+
"""
|
|
46
|
+
super().__init__(config, memory)
|
|
47
|
+
|
|
48
|
+
def execute(
|
|
49
|
+
self,
|
|
50
|
+
topic: Optional[str] = None,
|
|
51
|
+
investigation_id: Optional[str] = None,
|
|
52
|
+
query: Optional[str] = None,
|
|
53
|
+
system_prompt: Optional[str] = None,
|
|
54
|
+
provider_id: Optional[str] = None,
|
|
55
|
+
max_depth: Optional[int] = None,
|
|
56
|
+
**kwargs: Any,
|
|
57
|
+
) -> WorkflowResult:
|
|
58
|
+
"""Execute an investigation step.
|
|
59
|
+
|
|
60
|
+
Either starts a new investigation (requires topic) or continues
|
|
61
|
+
an existing one (requires investigation_id and query).
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
topic: Topic for new investigation
|
|
65
|
+
investigation_id: Existing investigation to continue
|
|
66
|
+
query: Follow-up query for continuing investigation
|
|
67
|
+
system_prompt: System prompt for new investigations
|
|
68
|
+
provider_id: Provider to use
|
|
69
|
+
max_depth: Maximum investigation depth (uses config default if None)
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
WorkflowResult with investigation findings
|
|
73
|
+
"""
|
|
74
|
+
# Determine if starting new or continuing
|
|
75
|
+
if investigation_id:
|
|
76
|
+
state = self.memory.load_investigation(investigation_id)
|
|
77
|
+
if not state:
|
|
78
|
+
return WorkflowResult(
|
|
79
|
+
success=False,
|
|
80
|
+
content="",
|
|
81
|
+
error=f"Investigation {investigation_id} not found",
|
|
82
|
+
)
|
|
83
|
+
# Use query if provided, otherwise generate next question
|
|
84
|
+
current_query = query or self._generate_next_query(state)
|
|
85
|
+
elif topic:
|
|
86
|
+
state = ThinkDeepState(
|
|
87
|
+
topic=topic,
|
|
88
|
+
max_depth=max_depth or self.config.thinkdeep_max_depth,
|
|
89
|
+
system_prompt=system_prompt,
|
|
90
|
+
)
|
|
91
|
+
current_query = self._generate_initial_query(topic)
|
|
92
|
+
else:
|
|
93
|
+
return WorkflowResult(
|
|
94
|
+
success=False,
|
|
95
|
+
content="",
|
|
96
|
+
error="Either 'topic' (for new investigation) or 'investigation_id' (to continue) is required",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Check if already converged
|
|
100
|
+
if state.converged:
|
|
101
|
+
return WorkflowResult(
|
|
102
|
+
success=True,
|
|
103
|
+
content=self._format_summary(state),
|
|
104
|
+
metadata={
|
|
105
|
+
"investigation_id": state.id,
|
|
106
|
+
"converged": True,
|
|
107
|
+
"convergence_reason": state.convergence_reason,
|
|
108
|
+
"hypothesis_count": len(state.hypotheses),
|
|
109
|
+
"step_count": len(state.steps),
|
|
110
|
+
},
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Execute investigation step
|
|
114
|
+
result = self._execute_investigation_step(
|
|
115
|
+
state=state,
|
|
116
|
+
query=current_query,
|
|
117
|
+
provider_id=provider_id,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if not result.success:
|
|
121
|
+
return result
|
|
122
|
+
|
|
123
|
+
# Check for convergence
|
|
124
|
+
state.check_convergence()
|
|
125
|
+
|
|
126
|
+
# Persist state
|
|
127
|
+
self.memory.save_investigation(state)
|
|
128
|
+
|
|
129
|
+
# Add metadata
|
|
130
|
+
result.metadata["investigation_id"] = state.id
|
|
131
|
+
result.metadata["current_depth"] = state.current_depth
|
|
132
|
+
result.metadata["max_depth"] = state.max_depth
|
|
133
|
+
result.metadata["converged"] = state.converged
|
|
134
|
+
result.metadata["hypothesis_count"] = len(state.hypotheses)
|
|
135
|
+
result.metadata["step_count"] = len(state.steps)
|
|
136
|
+
|
|
137
|
+
if state.converged:
|
|
138
|
+
result.metadata["convergence_reason"] = state.convergence_reason
|
|
139
|
+
|
|
140
|
+
return result
|
|
141
|
+
|
|
142
|
+
def _generate_initial_query(self, topic: str) -> str:
|
|
143
|
+
"""Generate the initial investigation query.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
topic: Investigation topic
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Initial query string
|
|
150
|
+
"""
|
|
151
|
+
return f"Let's investigate: {topic}\n\nWhat are the key aspects we should explore? Please identify 2-3 initial hypotheses we can investigate."
|
|
152
|
+
|
|
153
|
+
def _generate_next_query(self, state: ThinkDeepState) -> str:
|
|
154
|
+
"""Generate the next investigation query based on current state.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
state: Current investigation state
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
Next query string
|
|
161
|
+
"""
|
|
162
|
+
# Summarize current hypotheses
|
|
163
|
+
hyp_summary = "\n".join(
|
|
164
|
+
f"- {h.statement} (confidence: {h.confidence.value})"
|
|
165
|
+
for h in state.hypotheses
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return f"""Based on our investigation so far:
|
|
169
|
+
|
|
170
|
+
Topic: {state.topic}
|
|
171
|
+
|
|
172
|
+
Current hypotheses:
|
|
173
|
+
{hyp_summary}
|
|
174
|
+
|
|
175
|
+
What additional evidence or questions should we explore to increase confidence in or refute these hypotheses?"""
|
|
176
|
+
|
|
177
|
+
def _execute_investigation_step(
|
|
178
|
+
self,
|
|
179
|
+
state: ThinkDeepState,
|
|
180
|
+
query: str,
|
|
181
|
+
provider_id: Optional[str],
|
|
182
|
+
) -> WorkflowResult:
|
|
183
|
+
"""Execute a single investigation step.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
state: Investigation state
|
|
187
|
+
query: Query for this step
|
|
188
|
+
provider_id: Provider to use
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
WorkflowResult with step findings
|
|
192
|
+
"""
|
|
193
|
+
# Build system prompt for investigation
|
|
194
|
+
system_prompt = state.system_prompt or self._build_investigation_system_prompt()
|
|
195
|
+
|
|
196
|
+
# Execute provider
|
|
197
|
+
result = self._execute_provider(
|
|
198
|
+
prompt=query,
|
|
199
|
+
provider_id=provider_id,
|
|
200
|
+
system_prompt=system_prompt,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
if not result.success:
|
|
204
|
+
return result
|
|
205
|
+
|
|
206
|
+
# Create investigation step
|
|
207
|
+
step = state.add_step(query=query, depth=state.current_depth)
|
|
208
|
+
step.response = result.content
|
|
209
|
+
step.provider_id = result.provider_id
|
|
210
|
+
step.model_used = result.model_used
|
|
211
|
+
|
|
212
|
+
# Parse and update hypotheses from response
|
|
213
|
+
self._update_hypotheses_from_response(state, step, result.content)
|
|
214
|
+
|
|
215
|
+
# Increment depth
|
|
216
|
+
state.current_depth += 1
|
|
217
|
+
|
|
218
|
+
return result
|
|
219
|
+
|
|
220
|
+
def _build_investigation_system_prompt(self) -> str:
|
|
221
|
+
"""Build the system prompt for investigation.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
System prompt string
|
|
225
|
+
"""
|
|
226
|
+
return """You are a systematic researcher conducting a deep investigation.
|
|
227
|
+
|
|
228
|
+
When analyzing topics:
|
|
229
|
+
1. Identify key hypotheses that could explain the phenomenon
|
|
230
|
+
2. Look for evidence that supports or contradicts each hypothesis
|
|
231
|
+
3. Update confidence levels based on evidence strength
|
|
232
|
+
4. Suggest next questions to increase understanding
|
|
233
|
+
|
|
234
|
+
For each response, structure your findings as:
|
|
235
|
+
- Key insights discovered
|
|
236
|
+
- Evidence for/against existing hypotheses
|
|
237
|
+
- New hypotheses to consider
|
|
238
|
+
- Recommended next steps
|
|
239
|
+
|
|
240
|
+
Be thorough but concise. Focus on advancing understanding systematically."""
|
|
241
|
+
|
|
242
|
+
def _update_hypotheses_from_response(
|
|
243
|
+
self,
|
|
244
|
+
state: ThinkDeepState,
|
|
245
|
+
step: InvestigationStep,
|
|
246
|
+
response: str,
|
|
247
|
+
) -> None:
|
|
248
|
+
"""Parse response and update hypotheses.
|
|
249
|
+
|
|
250
|
+
This is a simplified implementation that looks for hypothesis-related
|
|
251
|
+
keywords. A more sophisticated version could use structured output
|
|
252
|
+
or NLP to extract hypotheses more accurately.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
state: Investigation state
|
|
256
|
+
step: Current investigation step
|
|
257
|
+
response: Provider response
|
|
258
|
+
"""
|
|
259
|
+
response_lower = response.lower()
|
|
260
|
+
|
|
261
|
+
# Simple heuristic: if this is early in investigation, look for new hypotheses
|
|
262
|
+
if state.current_depth < 2:
|
|
263
|
+
# Extract potential hypotheses (simplified)
|
|
264
|
+
if "hypothesis" in response_lower or "suggests that" in response_lower:
|
|
265
|
+
# For now, create a generic hypothesis if none exist
|
|
266
|
+
if not state.hypotheses:
|
|
267
|
+
hyp = state.add_hypothesis(
|
|
268
|
+
statement=f"Initial investigation of: {state.topic}",
|
|
269
|
+
confidence=ConfidenceLevel.SPECULATION,
|
|
270
|
+
)
|
|
271
|
+
step.hypotheses_generated.append(hyp.id)
|
|
272
|
+
|
|
273
|
+
# Update existing hypotheses based on evidence language
|
|
274
|
+
for hyp in state.hypotheses:
|
|
275
|
+
# Look for supporting evidence
|
|
276
|
+
if any(
|
|
277
|
+
phrase in response_lower
|
|
278
|
+
for phrase in ["supports", "confirms", "evidence for", "consistent with"]
|
|
279
|
+
):
|
|
280
|
+
hyp.add_evidence(f"Step {step.id}: {response[:200]}...", supporting=True)
|
|
281
|
+
step.hypotheses_updated.append(hyp.id)
|
|
282
|
+
|
|
283
|
+
# Update confidence if strong support
|
|
284
|
+
if hyp.confidence == ConfidenceLevel.SPECULATION:
|
|
285
|
+
hyp.update_confidence(ConfidenceLevel.LOW)
|
|
286
|
+
elif hyp.confidence == ConfidenceLevel.LOW:
|
|
287
|
+
hyp.update_confidence(ConfidenceLevel.MEDIUM)
|
|
288
|
+
|
|
289
|
+
# Look for contradicting evidence
|
|
290
|
+
if any(
|
|
291
|
+
phrase in response_lower
|
|
292
|
+
for phrase in ["contradicts", "refutes", "evidence against", "inconsistent"]
|
|
293
|
+
):
|
|
294
|
+
hyp.add_evidence(f"Step {step.id}: {response[:200]}...", supporting=False)
|
|
295
|
+
step.hypotheses_updated.append(hyp.id)
|
|
296
|
+
|
|
297
|
+
def _format_summary(self, state: ThinkDeepState) -> str:
|
|
298
|
+
"""Format investigation summary.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
state: Investigation state
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Formatted summary string
|
|
305
|
+
"""
|
|
306
|
+
parts = [f"# Investigation Summary: {state.topic}\n"]
|
|
307
|
+
|
|
308
|
+
if state.converged:
|
|
309
|
+
parts.append(f"**Status**: Converged ({state.convergence_reason})\n")
|
|
310
|
+
else:
|
|
311
|
+
parts.append(f"**Status**: In progress (depth {state.current_depth}/{state.max_depth})\n")
|
|
312
|
+
|
|
313
|
+
parts.append(f"**Steps completed**: {len(state.steps)}\n")
|
|
314
|
+
parts.append(f"**Hypotheses tracked**: {len(state.hypotheses)}\n")
|
|
315
|
+
|
|
316
|
+
if state.hypotheses:
|
|
317
|
+
parts.append("\n## Hypotheses\n")
|
|
318
|
+
for hyp in state.hypotheses:
|
|
319
|
+
parts.append(f"### {hyp.statement}")
|
|
320
|
+
parts.append(f"- Confidence: {hyp.confidence.value}")
|
|
321
|
+
parts.append(f"- Supporting evidence: {len(hyp.supporting_evidence)}")
|
|
322
|
+
parts.append(f"- Contradicting evidence: {len(hyp.contradicting_evidence)}\n")
|
|
323
|
+
|
|
324
|
+
return "\n".join(parts)
|
|
325
|
+
|
|
326
|
+
def get_investigation(self, investigation_id: str) -> Optional[dict[str, Any]]:
|
|
327
|
+
"""Get full investigation details.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
investigation_id: Investigation identifier
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
Investigation data or None if not found
|
|
334
|
+
"""
|
|
335
|
+
state = self.memory.load_investigation(investigation_id)
|
|
336
|
+
if not state:
|
|
337
|
+
return None
|
|
338
|
+
|
|
339
|
+
return {
|
|
340
|
+
"id": state.id,
|
|
341
|
+
"topic": state.topic,
|
|
342
|
+
"current_depth": state.current_depth,
|
|
343
|
+
"max_depth": state.max_depth,
|
|
344
|
+
"converged": state.converged,
|
|
345
|
+
"convergence_reason": state.convergence_reason,
|
|
346
|
+
"created_at": state.created_at.isoformat(),
|
|
347
|
+
"updated_at": state.updated_at.isoformat(),
|
|
348
|
+
"hypotheses": [
|
|
349
|
+
{
|
|
350
|
+
"id": h.id,
|
|
351
|
+
"statement": h.statement,
|
|
352
|
+
"confidence": h.confidence.value,
|
|
353
|
+
"supporting_evidence_count": len(h.supporting_evidence),
|
|
354
|
+
"contradicting_evidence_count": len(h.contradicting_evidence),
|
|
355
|
+
}
|
|
356
|
+
for h in state.hypotheses
|
|
357
|
+
],
|
|
358
|
+
"steps": [
|
|
359
|
+
{
|
|
360
|
+
"id": s.id,
|
|
361
|
+
"depth": s.depth,
|
|
362
|
+
"query": s.query,
|
|
363
|
+
"response_preview": s.response[:200] + "..." if s.response and len(s.response) > 200 else s.response,
|
|
364
|
+
"timestamp": s.timestamp.isoformat(),
|
|
365
|
+
}
|
|
366
|
+
for s in state.steps
|
|
367
|
+
],
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
def list_investigations(self, limit: Optional[int] = 50) -> list[dict[str, Any]]:
|
|
371
|
+
"""List investigations.
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
limit: Maximum investigations to return
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
List of investigation summaries
|
|
378
|
+
"""
|
|
379
|
+
investigations = self.memory.list_investigations(limit=limit)
|
|
380
|
+
|
|
381
|
+
return [
|
|
382
|
+
{
|
|
383
|
+
"id": i.id,
|
|
384
|
+
"topic": i.topic,
|
|
385
|
+
"current_depth": i.current_depth,
|
|
386
|
+
"max_depth": i.max_depth,
|
|
387
|
+
"converged": i.converged,
|
|
388
|
+
"hypothesis_count": len(i.hypotheses),
|
|
389
|
+
"step_count": len(i.steps),
|
|
390
|
+
"created_at": i.created_at.isoformat(),
|
|
391
|
+
"updated_at": i.updated_at.isoformat(),
|
|
392
|
+
}
|
|
393
|
+
for i in investigations
|
|
394
|
+
]
|
|
395
|
+
|
|
396
|
+
def delete_investigation(self, investigation_id: str) -> bool:
|
|
397
|
+
"""Delete an investigation.
|
|
398
|
+
|
|
399
|
+
Args:
|
|
400
|
+
investigation_id: Investigation identifier
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
True if deleted, False if not found
|
|
404
|
+
"""
|
|
405
|
+
return self.memory.delete_investigation(investigation_id)
|