foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1234 @@
|
|
|
1
|
+
"""Pydantic models for research workflows.
|
|
2
|
+
|
|
3
|
+
These models define the data structures for conversation threading,
|
|
4
|
+
multi-model consensus, hypothesis-driven investigation, and creative
|
|
5
|
+
brainstorming workflows.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any, Optional
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, Field
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# =============================================================================
|
|
17
|
+
# Enums
|
|
18
|
+
# =============================================================================
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WorkflowType(str, Enum):
|
|
22
|
+
"""Types of research workflows available."""
|
|
23
|
+
|
|
24
|
+
CHAT = "chat"
|
|
25
|
+
CONSENSUS = "consensus"
|
|
26
|
+
THINKDEEP = "thinkdeep"
|
|
27
|
+
IDEATE = "ideate"
|
|
28
|
+
DEEP_RESEARCH = "deep_research"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ConfidenceLevel(str, Enum):
|
|
32
|
+
"""Confidence levels for hypotheses in THINKDEEP workflow."""
|
|
33
|
+
|
|
34
|
+
SPECULATION = "speculation"
|
|
35
|
+
LOW = "low"
|
|
36
|
+
MEDIUM = "medium"
|
|
37
|
+
HIGH = "high"
|
|
38
|
+
CONFIRMED = "confirmed"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ConsensusStrategy(str, Enum):
|
|
42
|
+
"""Strategies for synthesizing multi-model responses in CONSENSUS workflow."""
|
|
43
|
+
|
|
44
|
+
ALL_RESPONSES = "all_responses" # Return all responses without synthesis
|
|
45
|
+
SYNTHESIZE = "synthesize" # Use a model to synthesize responses
|
|
46
|
+
MAJORITY = "majority" # Use majority vote for factual questions
|
|
47
|
+
FIRST_VALID = "first_valid" # Return first successful response
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class ThreadStatus(str, Enum):
|
|
51
|
+
"""Status of a conversation thread."""
|
|
52
|
+
|
|
53
|
+
ACTIVE = "active"
|
|
54
|
+
COMPLETED = "completed"
|
|
55
|
+
ARCHIVED = "archived"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class IdeationPhase(str, Enum):
|
|
59
|
+
"""Phases of the IDEATE workflow."""
|
|
60
|
+
|
|
61
|
+
DIVERGENT = "divergent" # Generate diverse ideas
|
|
62
|
+
CONVERGENT = "convergent" # Cluster and score ideas
|
|
63
|
+
SELECTION = "selection" # Select clusters for elaboration
|
|
64
|
+
ELABORATION = "elaboration" # Develop selected ideas
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# =============================================================================
|
|
68
|
+
# Conversation Models (CHAT workflow)
|
|
69
|
+
# =============================================================================
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class ConversationMessage(BaseModel):
|
|
73
|
+
"""A single message in a conversation thread."""
|
|
74
|
+
|
|
75
|
+
id: str = Field(default_factory=lambda: f"msg-{uuid4().hex[:8]}")
|
|
76
|
+
role: str = Field(..., description="Message role: 'user' or 'assistant'")
|
|
77
|
+
content: str = Field(..., description="Message content")
|
|
78
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
79
|
+
provider_id: Optional[str] = Field(
|
|
80
|
+
default=None, description="Provider that generated this message"
|
|
81
|
+
)
|
|
82
|
+
model_used: Optional[str] = Field(
|
|
83
|
+
default=None, description="Model that generated this message"
|
|
84
|
+
)
|
|
85
|
+
tokens_used: Optional[int] = Field(
|
|
86
|
+
default=None, description="Tokens consumed for this message"
|
|
87
|
+
)
|
|
88
|
+
metadata: dict[str, Any] = Field(
|
|
89
|
+
default_factory=dict, description="Additional message metadata"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ConversationThread(BaseModel):
|
|
94
|
+
"""A conversation thread with message history."""
|
|
95
|
+
|
|
96
|
+
id: str = Field(default_factory=lambda: f"thread-{uuid4().hex[:12]}")
|
|
97
|
+
title: Optional[str] = Field(default=None, description="Optional thread title")
|
|
98
|
+
status: ThreadStatus = Field(default=ThreadStatus.ACTIVE)
|
|
99
|
+
messages: list[ConversationMessage] = Field(default_factory=list)
|
|
100
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
101
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
102
|
+
provider_id: Optional[str] = Field(
|
|
103
|
+
default=None, description="Default provider for this thread"
|
|
104
|
+
)
|
|
105
|
+
system_prompt: Optional[str] = Field(
|
|
106
|
+
default=None, description="System prompt for this thread"
|
|
107
|
+
)
|
|
108
|
+
metadata: dict[str, Any] = Field(
|
|
109
|
+
default_factory=dict, description="Additional thread metadata"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def add_message(
|
|
113
|
+
self,
|
|
114
|
+
role: str,
|
|
115
|
+
content: str,
|
|
116
|
+
provider_id: Optional[str] = None,
|
|
117
|
+
model_used: Optional[str] = None,
|
|
118
|
+
tokens_used: Optional[int] = None,
|
|
119
|
+
**metadata: Any,
|
|
120
|
+
) -> ConversationMessage:
|
|
121
|
+
"""Add a message to the thread and update timestamp."""
|
|
122
|
+
message = ConversationMessage(
|
|
123
|
+
role=role,
|
|
124
|
+
content=content,
|
|
125
|
+
provider_id=provider_id,
|
|
126
|
+
model_used=model_used,
|
|
127
|
+
tokens_used=tokens_used,
|
|
128
|
+
metadata=metadata,
|
|
129
|
+
)
|
|
130
|
+
self.messages.append(message)
|
|
131
|
+
self.updated_at = datetime.utcnow()
|
|
132
|
+
return message
|
|
133
|
+
|
|
134
|
+
def get_context_messages(
|
|
135
|
+
self, max_messages: Optional[int] = None
|
|
136
|
+
) -> list[ConversationMessage]:
|
|
137
|
+
"""Get messages for context, optionally limited to recent N messages."""
|
|
138
|
+
if max_messages is None or max_messages >= len(self.messages):
|
|
139
|
+
return self.messages
|
|
140
|
+
return self.messages[-max_messages:]
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# =============================================================================
|
|
144
|
+
# THINKDEEP Models (Hypothesis-driven investigation)
|
|
145
|
+
# =============================================================================
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class Hypothesis(BaseModel):
|
|
149
|
+
"""A hypothesis being investigated in THINKDEEP workflow."""
|
|
150
|
+
|
|
151
|
+
id: str = Field(default_factory=lambda: f"hyp-{uuid4().hex[:8]}")
|
|
152
|
+
statement: str = Field(..., description="The hypothesis statement")
|
|
153
|
+
confidence: ConfidenceLevel = Field(default=ConfidenceLevel.SPECULATION)
|
|
154
|
+
supporting_evidence: list[str] = Field(default_factory=list)
|
|
155
|
+
contradicting_evidence: list[str] = Field(default_factory=list)
|
|
156
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
157
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
158
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
159
|
+
|
|
160
|
+
def add_evidence(self, evidence: str, supporting: bool = True) -> None:
|
|
161
|
+
"""Add evidence for or against this hypothesis."""
|
|
162
|
+
if supporting:
|
|
163
|
+
self.supporting_evidence.append(evidence)
|
|
164
|
+
else:
|
|
165
|
+
self.contradicting_evidence.append(evidence)
|
|
166
|
+
self.updated_at = datetime.utcnow()
|
|
167
|
+
|
|
168
|
+
def update_confidence(self, new_confidence: ConfidenceLevel) -> None:
|
|
169
|
+
"""Update the confidence level of this hypothesis."""
|
|
170
|
+
self.confidence = new_confidence
|
|
171
|
+
self.updated_at = datetime.utcnow()
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class InvestigationStep(BaseModel):
|
|
175
|
+
"""A single step in a THINKDEEP investigation."""
|
|
176
|
+
|
|
177
|
+
id: str = Field(default_factory=lambda: f"step-{uuid4().hex[:8]}")
|
|
178
|
+
depth: int = Field(..., description="Depth level of this step (0-indexed)")
|
|
179
|
+
query: str = Field(..., description="The question or query for this step")
|
|
180
|
+
response: Optional[str] = Field(default=None, description="Model response")
|
|
181
|
+
hypotheses_generated: list[str] = Field(
|
|
182
|
+
default_factory=list, description="IDs of hypotheses generated in this step"
|
|
183
|
+
)
|
|
184
|
+
hypotheses_updated: list[str] = Field(
|
|
185
|
+
default_factory=list, description="IDs of hypotheses updated in this step"
|
|
186
|
+
)
|
|
187
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
188
|
+
provider_id: Optional[str] = Field(default=None)
|
|
189
|
+
model_used: Optional[str] = Field(default=None)
|
|
190
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class ThinkDeepState(BaseModel):
|
|
194
|
+
"""State for a THINKDEEP investigation session."""
|
|
195
|
+
|
|
196
|
+
id: str = Field(default_factory=lambda: f"investigation-{uuid4().hex[:12]}")
|
|
197
|
+
topic: str = Field(..., description="The topic being investigated")
|
|
198
|
+
current_depth: int = Field(default=0, description="Current investigation depth")
|
|
199
|
+
max_depth: int = Field(default=5, description="Maximum investigation depth")
|
|
200
|
+
hypotheses: list[Hypothesis] = Field(default_factory=list)
|
|
201
|
+
steps: list[InvestigationStep] = Field(default_factory=list)
|
|
202
|
+
converged: bool = Field(
|
|
203
|
+
default=False, description="Whether investigation has converged"
|
|
204
|
+
)
|
|
205
|
+
convergence_reason: Optional[str] = Field(
|
|
206
|
+
default=None, description="Reason for convergence if converged"
|
|
207
|
+
)
|
|
208
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
209
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
210
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
211
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
212
|
+
|
|
213
|
+
def add_hypothesis(self, statement: str, **kwargs: Any) -> Hypothesis:
|
|
214
|
+
"""Create and add a new hypothesis."""
|
|
215
|
+
hypothesis = Hypothesis(statement=statement, **kwargs)
|
|
216
|
+
self.hypotheses.append(hypothesis)
|
|
217
|
+
self.updated_at = datetime.utcnow()
|
|
218
|
+
return hypothesis
|
|
219
|
+
|
|
220
|
+
def get_hypothesis(self, hypothesis_id: str) -> Optional[Hypothesis]:
|
|
221
|
+
"""Get a hypothesis by ID."""
|
|
222
|
+
for h in self.hypotheses:
|
|
223
|
+
if h.id == hypothesis_id:
|
|
224
|
+
return h
|
|
225
|
+
return None
|
|
226
|
+
|
|
227
|
+
def add_step(self, query: str, depth: Optional[int] = None) -> InvestigationStep:
|
|
228
|
+
"""Create and add a new investigation step."""
|
|
229
|
+
step = InvestigationStep(
|
|
230
|
+
depth=depth if depth is not None else self.current_depth, query=query
|
|
231
|
+
)
|
|
232
|
+
self.steps.append(step)
|
|
233
|
+
self.updated_at = datetime.utcnow()
|
|
234
|
+
return step
|
|
235
|
+
|
|
236
|
+
def check_convergence(self) -> bool:
|
|
237
|
+
"""Check if investigation should converge based on criteria."""
|
|
238
|
+
# Converge if max depth reached
|
|
239
|
+
if self.current_depth >= self.max_depth:
|
|
240
|
+
self.converged = True
|
|
241
|
+
self.convergence_reason = "Maximum depth reached"
|
|
242
|
+
return True
|
|
243
|
+
|
|
244
|
+
# Converge if all hypotheses have high confidence
|
|
245
|
+
if self.hypotheses and all(
|
|
246
|
+
h.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.CONFIRMED)
|
|
247
|
+
for h in self.hypotheses
|
|
248
|
+
):
|
|
249
|
+
self.converged = True
|
|
250
|
+
self.convergence_reason = "All hypotheses reached high confidence"
|
|
251
|
+
return True
|
|
252
|
+
|
|
253
|
+
return False
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
# =============================================================================
|
|
257
|
+
# IDEATE Models (Creative brainstorming)
|
|
258
|
+
# =============================================================================
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class Idea(BaseModel):
|
|
262
|
+
"""A single idea generated in IDEATE workflow."""
|
|
263
|
+
|
|
264
|
+
id: str = Field(default_factory=lambda: f"idea-{uuid4().hex[:8]}")
|
|
265
|
+
content: str = Field(..., description="The idea content")
|
|
266
|
+
perspective: Optional[str] = Field(
|
|
267
|
+
default=None, description="Perspective that generated this idea"
|
|
268
|
+
)
|
|
269
|
+
score: Optional[float] = Field(
|
|
270
|
+
default=None, description="Score from 0-1 based on criteria"
|
|
271
|
+
)
|
|
272
|
+
cluster_id: Optional[str] = Field(
|
|
273
|
+
default=None, description="ID of cluster this idea belongs to"
|
|
274
|
+
)
|
|
275
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
276
|
+
provider_id: Optional[str] = Field(default=None)
|
|
277
|
+
model_used: Optional[str] = Field(default=None)
|
|
278
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class IdeaCluster(BaseModel):
|
|
282
|
+
"""A cluster of related ideas in IDEATE workflow."""
|
|
283
|
+
|
|
284
|
+
id: str = Field(default_factory=lambda: f"cluster-{uuid4().hex[:8]}")
|
|
285
|
+
name: str = Field(..., description="Cluster name/theme")
|
|
286
|
+
description: Optional[str] = Field(default=None, description="Cluster description")
|
|
287
|
+
idea_ids: list[str] = Field(default_factory=list, description="IDs of ideas in cluster")
|
|
288
|
+
average_score: Optional[float] = Field(default=None)
|
|
289
|
+
selected_for_elaboration: bool = Field(default=False)
|
|
290
|
+
elaboration: Optional[str] = Field(
|
|
291
|
+
default=None, description="Detailed elaboration if selected"
|
|
292
|
+
)
|
|
293
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
294
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
class IdeationState(BaseModel):
|
|
298
|
+
"""State for an IDEATE brainstorming session."""
|
|
299
|
+
|
|
300
|
+
id: str = Field(default_factory=lambda: f"ideation-{uuid4().hex[:12]}")
|
|
301
|
+
topic: str = Field(..., description="The topic being brainstormed")
|
|
302
|
+
phase: IdeationPhase = Field(default=IdeationPhase.DIVERGENT)
|
|
303
|
+
perspectives: list[str] = Field(
|
|
304
|
+
default_factory=lambda: ["technical", "creative", "practical", "visionary"]
|
|
305
|
+
)
|
|
306
|
+
ideas: list[Idea] = Field(default_factory=list)
|
|
307
|
+
clusters: list[IdeaCluster] = Field(default_factory=list)
|
|
308
|
+
scoring_criteria: list[str] = Field(
|
|
309
|
+
default_factory=lambda: ["novelty", "feasibility", "impact"]
|
|
310
|
+
)
|
|
311
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
312
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
313
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
314
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
315
|
+
|
|
316
|
+
def add_idea(
|
|
317
|
+
self,
|
|
318
|
+
content: str,
|
|
319
|
+
perspective: Optional[str] = None,
|
|
320
|
+
**kwargs: Any,
|
|
321
|
+
) -> Idea:
|
|
322
|
+
"""Add a new idea to the session."""
|
|
323
|
+
idea = Idea(content=content, perspective=perspective, **kwargs)
|
|
324
|
+
self.ideas.append(idea)
|
|
325
|
+
self.updated_at = datetime.utcnow()
|
|
326
|
+
return idea
|
|
327
|
+
|
|
328
|
+
def create_cluster(self, name: str, description: Optional[str] = None) -> IdeaCluster:
|
|
329
|
+
"""Create a new idea cluster."""
|
|
330
|
+
cluster = IdeaCluster(name=name, description=description)
|
|
331
|
+
self.clusters.append(cluster)
|
|
332
|
+
self.updated_at = datetime.utcnow()
|
|
333
|
+
return cluster
|
|
334
|
+
|
|
335
|
+
def assign_idea_to_cluster(self, idea_id: str, cluster_id: str) -> bool:
|
|
336
|
+
"""Assign an idea to a cluster."""
|
|
337
|
+
idea = next((i for i in self.ideas if i.id == idea_id), None)
|
|
338
|
+
cluster = next((c for c in self.clusters if c.id == cluster_id), None)
|
|
339
|
+
|
|
340
|
+
if idea and cluster:
|
|
341
|
+
idea.cluster_id = cluster_id
|
|
342
|
+
if idea_id not in cluster.idea_ids:
|
|
343
|
+
cluster.idea_ids.append(idea_id)
|
|
344
|
+
self.updated_at = datetime.utcnow()
|
|
345
|
+
return True
|
|
346
|
+
return False
|
|
347
|
+
|
|
348
|
+
def advance_phase(self) -> IdeationPhase:
|
|
349
|
+
"""Advance to the next ideation phase."""
|
|
350
|
+
phase_order = list(IdeationPhase)
|
|
351
|
+
current_index = phase_order.index(self.phase)
|
|
352
|
+
if current_index < len(phase_order) - 1:
|
|
353
|
+
self.phase = phase_order[current_index + 1]
|
|
354
|
+
self.updated_at = datetime.utcnow()
|
|
355
|
+
return self.phase
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
# =============================================================================
|
|
359
|
+
# CONSENSUS Models (Multi-model parallel execution)
|
|
360
|
+
# =============================================================================
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
class ModelResponse(BaseModel):
|
|
364
|
+
"""A response from a single model in CONSENSUS workflow."""
|
|
365
|
+
|
|
366
|
+
provider_id: str = Field(..., description="Provider that generated this response")
|
|
367
|
+
model_used: Optional[str] = Field(default=None)
|
|
368
|
+
content: str = Field(..., description="Response content")
|
|
369
|
+
success: bool = Field(default=True)
|
|
370
|
+
error_message: Optional[str] = Field(default=None)
|
|
371
|
+
tokens_used: Optional[int] = Field(default=None)
|
|
372
|
+
duration_ms: Optional[float] = Field(default=None)
|
|
373
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
374
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
class ConsensusConfig(BaseModel):
|
|
378
|
+
"""Configuration for a CONSENSUS workflow execution."""
|
|
379
|
+
|
|
380
|
+
providers: list[str] = Field(
|
|
381
|
+
..., description="List of provider IDs to consult", min_length=1
|
|
382
|
+
)
|
|
383
|
+
strategy: ConsensusStrategy = Field(default=ConsensusStrategy.SYNTHESIZE)
|
|
384
|
+
synthesis_provider: Optional[str] = Field(
|
|
385
|
+
default=None, description="Provider to use for synthesis (if strategy=synthesize)"
|
|
386
|
+
)
|
|
387
|
+
timeout_per_provider: float = Field(
|
|
388
|
+
default=360.0, description="Timeout in seconds per provider"
|
|
389
|
+
)
|
|
390
|
+
max_concurrent: int = Field(
|
|
391
|
+
default=3, description="Maximum concurrent provider calls"
|
|
392
|
+
)
|
|
393
|
+
require_all: bool = Field(
|
|
394
|
+
default=False, description="Require all providers to succeed"
|
|
395
|
+
)
|
|
396
|
+
min_responses: int = Field(
|
|
397
|
+
default=1, description="Minimum responses needed for success"
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
class ConsensusState(BaseModel):
|
|
402
|
+
"""State for a CONSENSUS workflow execution."""
|
|
403
|
+
|
|
404
|
+
id: str = Field(default_factory=lambda: f"consensus-{uuid4().hex[:12]}")
|
|
405
|
+
prompt: str = Field(..., description="The prompt sent to all providers")
|
|
406
|
+
config: ConsensusConfig = Field(..., description="Consensus configuration")
|
|
407
|
+
responses: list[ModelResponse] = Field(default_factory=list)
|
|
408
|
+
synthesis: Optional[str] = Field(
|
|
409
|
+
default=None, description="Synthesized response if strategy requires it"
|
|
410
|
+
)
|
|
411
|
+
completed: bool = Field(default=False)
|
|
412
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
413
|
+
completed_at: Optional[datetime] = Field(default=None)
|
|
414
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
415
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
416
|
+
|
|
417
|
+
def add_response(self, response: ModelResponse) -> None:
|
|
418
|
+
"""Add a model response to the consensus."""
|
|
419
|
+
self.responses.append(response)
|
|
420
|
+
|
|
421
|
+
def successful_responses(self) -> list[ModelResponse]:
|
|
422
|
+
"""Get only successful responses."""
|
|
423
|
+
return [r for r in self.responses if r.success]
|
|
424
|
+
|
|
425
|
+
def failed_responses(self) -> list[ModelResponse]:
|
|
426
|
+
"""Get failed responses."""
|
|
427
|
+
return [r for r in self.responses if not r.success]
|
|
428
|
+
|
|
429
|
+
def is_quorum_met(self) -> bool:
|
|
430
|
+
"""Check if minimum response requirement is met."""
|
|
431
|
+
return len(self.successful_responses()) >= self.config.min_responses
|
|
432
|
+
|
|
433
|
+
def mark_completed(self, synthesis: Optional[str] = None) -> None:
|
|
434
|
+
"""Mark the consensus as completed."""
|
|
435
|
+
self.completed = True
|
|
436
|
+
self.completed_at = datetime.utcnow()
|
|
437
|
+
if synthesis:
|
|
438
|
+
self.synthesis = synthesis
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
class DeepResearchConfig(BaseModel):
|
|
442
|
+
"""Configuration for DEEP_RESEARCH workflow execution.
|
|
443
|
+
|
|
444
|
+
Groups deep research parameters into a single config object to reduce
|
|
445
|
+
parameter sprawl in the MCP tool interface. All fields have sensible
|
|
446
|
+
defaults that can be overridden at the tool level.
|
|
447
|
+
|
|
448
|
+
Note: Provider configuration is handled via ResearchConfig TOML settings,
|
|
449
|
+
not through this config object. This is intentional - providers should be
|
|
450
|
+
configured at the server level, not per-request.
|
|
451
|
+
"""
|
|
452
|
+
|
|
453
|
+
max_iterations: int = Field(
|
|
454
|
+
default=3,
|
|
455
|
+
ge=1,
|
|
456
|
+
le=10,
|
|
457
|
+
description="Maximum refinement iterations before forced completion",
|
|
458
|
+
)
|
|
459
|
+
max_sub_queries: int = Field(
|
|
460
|
+
default=5,
|
|
461
|
+
ge=1,
|
|
462
|
+
le=20,
|
|
463
|
+
description="Maximum sub-queries for query decomposition",
|
|
464
|
+
)
|
|
465
|
+
max_sources_per_query: int = Field(
|
|
466
|
+
default=5,
|
|
467
|
+
ge=1,
|
|
468
|
+
le=50,
|
|
469
|
+
description="Maximum sources to gather per sub-query",
|
|
470
|
+
)
|
|
471
|
+
follow_links: bool = Field(
|
|
472
|
+
default=True,
|
|
473
|
+
description="Whether to follow URLs and extract full content",
|
|
474
|
+
)
|
|
475
|
+
timeout_per_operation: float = Field(
|
|
476
|
+
default=360.0,
|
|
477
|
+
ge=1.0,
|
|
478
|
+
le=1800.0,
|
|
479
|
+
description="Timeout in seconds for each search/fetch operation",
|
|
480
|
+
)
|
|
481
|
+
max_concurrent: int = Field(
|
|
482
|
+
default=3,
|
|
483
|
+
ge=1,
|
|
484
|
+
le=10,
|
|
485
|
+
description="Maximum concurrent operations (search, fetch)",
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
@classmethod
|
|
489
|
+
def from_defaults(cls) -> "DeepResearchConfig":
|
|
490
|
+
"""Create config with all default values.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
DeepResearchConfig with sensible defaults
|
|
494
|
+
"""
|
|
495
|
+
return cls()
|
|
496
|
+
|
|
497
|
+
def merge_overrides(self, **overrides: Any) -> "DeepResearchConfig":
|
|
498
|
+
"""Create a new config with specified overrides applied.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
**overrides: Field values to override (None values are ignored)
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
New DeepResearchConfig with overrides applied
|
|
505
|
+
"""
|
|
506
|
+
current = self.model_dump()
|
|
507
|
+
for key, value in overrides.items():
|
|
508
|
+
if value is not None and key in current:
|
|
509
|
+
current[key] = value
|
|
510
|
+
return DeepResearchConfig(**current)
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
# =============================================================================
|
|
514
|
+
# DEEP RESEARCH Models (Multi-phase iterative research)
|
|
515
|
+
# =============================================================================
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
class DeepResearchPhase(str, Enum):
|
|
519
|
+
"""Phases of the DEEP_RESEARCH workflow.
|
|
520
|
+
|
|
521
|
+
The deep research workflow progresses through five sequential phases:
|
|
522
|
+
1. PLANNING - Analyze the query and decompose into focused sub-queries
|
|
523
|
+
2. GATHERING - Execute sub-queries in parallel and collect sources
|
|
524
|
+
3. ANALYSIS - Extract findings and assess source quality
|
|
525
|
+
4. SYNTHESIS - Combine findings into a comprehensive report
|
|
526
|
+
5. REFINEMENT - Identify gaps and potentially loop back for more research
|
|
527
|
+
|
|
528
|
+
The ordering of these enum values is significant - it defines the
|
|
529
|
+
progression through advance_phase() method.
|
|
530
|
+
"""
|
|
531
|
+
|
|
532
|
+
PLANNING = "planning"
|
|
533
|
+
GATHERING = "gathering"
|
|
534
|
+
ANALYSIS = "analysis"
|
|
535
|
+
SYNTHESIS = "synthesis"
|
|
536
|
+
REFINEMENT = "refinement"
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
class PhaseMetrics(BaseModel):
|
|
540
|
+
"""Metrics for a single phase execution.
|
|
541
|
+
|
|
542
|
+
Tracks timing, token usage, and provider information for each phase
|
|
543
|
+
of the deep research workflow. Used for audit and cost tracking.
|
|
544
|
+
"""
|
|
545
|
+
|
|
546
|
+
phase: str = Field(..., description="Phase name (planning, analysis, etc.)")
|
|
547
|
+
duration_ms: float = Field(default=0.0, description="Phase duration in milliseconds")
|
|
548
|
+
input_tokens: int = Field(default=0, description="Tokens consumed by the prompt")
|
|
549
|
+
output_tokens: int = Field(default=0, description="Tokens generated in the response")
|
|
550
|
+
cached_tokens: int = Field(default=0, description="Tokens served from cache")
|
|
551
|
+
provider_id: Optional[str] = Field(default=None, description="Provider used for this phase")
|
|
552
|
+
model_used: Optional[str] = Field(default=None, description="Model used for this phase")
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
class SourceType(str, Enum):
|
|
556
|
+
"""Types of research sources that can be discovered.
|
|
557
|
+
|
|
558
|
+
V1 Implementation:
|
|
559
|
+
- WEB: General web search results (via Tavily/Google)
|
|
560
|
+
- ACADEMIC: Academic papers and journals (via Semantic Scholar)
|
|
561
|
+
|
|
562
|
+
Future Extensions (placeholders):
|
|
563
|
+
- EXPERT: Expert profiles and interviews (reserved)
|
|
564
|
+
- CODE: Code repositories and examples (reserved for GitHub search)
|
|
565
|
+
- NEWS: News articles and press releases
|
|
566
|
+
- DOCUMENTATION: Technical documentation
|
|
567
|
+
"""
|
|
568
|
+
|
|
569
|
+
WEB = "web"
|
|
570
|
+
ACADEMIC = "academic"
|
|
571
|
+
EXPERT = "expert" # Future: expert profiles, interviews
|
|
572
|
+
CODE = "code" # Future: GitHub, code search
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
class SourceQuality(str, Enum):
|
|
576
|
+
"""Quality assessment for research sources.
|
|
577
|
+
|
|
578
|
+
Quality levels are assigned during the ANALYSIS phase based on:
|
|
579
|
+
- Source authority and credibility
|
|
580
|
+
- Content recency and relevance
|
|
581
|
+
- Citation count and peer review status (for academic)
|
|
582
|
+
- Domain reputation (for web sources)
|
|
583
|
+
"""
|
|
584
|
+
|
|
585
|
+
UNKNOWN = "unknown" # Not yet assessed
|
|
586
|
+
LOW = "low" # Questionable reliability
|
|
587
|
+
MEDIUM = "medium" # Generally reliable
|
|
588
|
+
HIGH = "high" # Authoritative source
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
class ResearchMode(str, Enum):
|
|
592
|
+
"""Research modes that control source prioritization.
|
|
593
|
+
|
|
594
|
+
Each mode applies different domain-based quality heuristics:
|
|
595
|
+
- GENERAL: No domain preferences, balanced approach (default)
|
|
596
|
+
- ACADEMIC: Prioritizes journals, publishers, preprints
|
|
597
|
+
- TECHNICAL: Prioritizes official docs, arxiv, code repositories
|
|
598
|
+
"""
|
|
599
|
+
|
|
600
|
+
GENERAL = "general"
|
|
601
|
+
ACADEMIC = "academic"
|
|
602
|
+
TECHNICAL = "technical"
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
# Domain tier lists for source quality assessment by research mode
|
|
606
|
+
# Patterns support wildcards: "*.edu" matches any .edu domain
|
|
607
|
+
DOMAIN_TIERS: dict[str, dict[str, list[str]]] = {
|
|
608
|
+
"academic": {
|
|
609
|
+
"high": [
|
|
610
|
+
# Aggregators & indexes
|
|
611
|
+
"scholar.google.com",
|
|
612
|
+
"semanticscholar.org",
|
|
613
|
+
"pubmed.gov",
|
|
614
|
+
"ncbi.nlm.nih.gov",
|
|
615
|
+
"jstor.org",
|
|
616
|
+
# Major publishers
|
|
617
|
+
"springer.com",
|
|
618
|
+
"link.springer.com",
|
|
619
|
+
"sciencedirect.com",
|
|
620
|
+
"elsevier.com",
|
|
621
|
+
"wiley.com",
|
|
622
|
+
"onlinelibrary.wiley.com",
|
|
623
|
+
"tandfonline.com", # Taylor & Francis
|
|
624
|
+
"sagepub.com",
|
|
625
|
+
"nature.com",
|
|
626
|
+
"science.org", # AAAS/Science
|
|
627
|
+
"frontiersin.org",
|
|
628
|
+
"plos.org",
|
|
629
|
+
"journals.plos.org",
|
|
630
|
+
"mdpi.com",
|
|
631
|
+
"oup.com",
|
|
632
|
+
"academic.oup.com", # Oxford
|
|
633
|
+
"cambridge.org",
|
|
634
|
+
# Preprints & open access
|
|
635
|
+
"arxiv.org",
|
|
636
|
+
"biorxiv.org",
|
|
637
|
+
"medrxiv.org",
|
|
638
|
+
"psyarxiv.com",
|
|
639
|
+
"ssrn.com",
|
|
640
|
+
# Field-specific
|
|
641
|
+
"apa.org",
|
|
642
|
+
"psycnet.apa.org", # Psychology
|
|
643
|
+
"aclanthology.org", # Computational linguistics
|
|
644
|
+
# CS/Tech academic
|
|
645
|
+
"acm.org",
|
|
646
|
+
"dl.acm.org",
|
|
647
|
+
"ieee.org",
|
|
648
|
+
"ieeexplore.ieee.org",
|
|
649
|
+
# Institutional patterns
|
|
650
|
+
"*.edu",
|
|
651
|
+
"*.ac.uk",
|
|
652
|
+
"*.edu.au",
|
|
653
|
+
],
|
|
654
|
+
"low": [
|
|
655
|
+
"reddit.com",
|
|
656
|
+
"quora.com",
|
|
657
|
+
"medium.com",
|
|
658
|
+
"linkedin.com",
|
|
659
|
+
"twitter.com",
|
|
660
|
+
"x.com",
|
|
661
|
+
"facebook.com",
|
|
662
|
+
"pinterest.com",
|
|
663
|
+
"instagram.com",
|
|
664
|
+
"tiktok.com",
|
|
665
|
+
"youtube.com", # Can have good content but inconsistent
|
|
666
|
+
],
|
|
667
|
+
},
|
|
668
|
+
"technical": {
|
|
669
|
+
"high": [
|
|
670
|
+
# Preprints (technical papers)
|
|
671
|
+
"arxiv.org",
|
|
672
|
+
# Official documentation patterns
|
|
673
|
+
"docs.*",
|
|
674
|
+
"developer.*",
|
|
675
|
+
"*.dev",
|
|
676
|
+
"devdocs.io",
|
|
677
|
+
# Code & technical resources
|
|
678
|
+
"github.com",
|
|
679
|
+
"stackoverflow.com",
|
|
680
|
+
"stackexchange.com",
|
|
681
|
+
# Language/framework official sites
|
|
682
|
+
"python.org",
|
|
683
|
+
"docs.python.org",
|
|
684
|
+
"nodejs.org",
|
|
685
|
+
"rust-lang.org",
|
|
686
|
+
"doc.rust-lang.org",
|
|
687
|
+
"go.dev",
|
|
688
|
+
"typescriptlang.org",
|
|
689
|
+
"react.dev",
|
|
690
|
+
"vuejs.org",
|
|
691
|
+
"angular.io",
|
|
692
|
+
# Cloud providers
|
|
693
|
+
"aws.amazon.com",
|
|
694
|
+
"cloud.google.com",
|
|
695
|
+
"docs.microsoft.com",
|
|
696
|
+
"learn.microsoft.com",
|
|
697
|
+
"azure.microsoft.com",
|
|
698
|
+
# Tech company engineering blogs
|
|
699
|
+
"engineering.fb.com",
|
|
700
|
+
"netflixtechblog.com",
|
|
701
|
+
"uber.com/blog/engineering",
|
|
702
|
+
"blog.google",
|
|
703
|
+
# Academic (relevant for technical research)
|
|
704
|
+
"acm.org",
|
|
705
|
+
"dl.acm.org",
|
|
706
|
+
"ieee.org",
|
|
707
|
+
"ieeexplore.ieee.org",
|
|
708
|
+
],
|
|
709
|
+
"low": [
|
|
710
|
+
"reddit.com",
|
|
711
|
+
"quora.com",
|
|
712
|
+
"linkedin.com",
|
|
713
|
+
"twitter.com",
|
|
714
|
+
"x.com",
|
|
715
|
+
"facebook.com",
|
|
716
|
+
"pinterest.com",
|
|
717
|
+
],
|
|
718
|
+
},
|
|
719
|
+
"general": {
|
|
720
|
+
"high": [], # No domain preferences
|
|
721
|
+
"low": [
|
|
722
|
+
# Still deprioritize social media
|
|
723
|
+
"pinterest.com",
|
|
724
|
+
"facebook.com",
|
|
725
|
+
"instagram.com",
|
|
726
|
+
"tiktok.com",
|
|
727
|
+
],
|
|
728
|
+
},
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
class SubQuery(BaseModel):
|
|
733
|
+
"""A decomposed sub-query for focused research.
|
|
734
|
+
|
|
735
|
+
During the PLANNING phase, the original research query is decomposed
|
|
736
|
+
into multiple focused sub-queries. Each sub-query targets a specific
|
|
737
|
+
aspect of the research question and can be executed independently
|
|
738
|
+
during the GATHERING phase.
|
|
739
|
+
|
|
740
|
+
Status transitions:
|
|
741
|
+
- pending -> executing -> completed (success path)
|
|
742
|
+
- pending -> executing -> failed (error path)
|
|
743
|
+
"""
|
|
744
|
+
|
|
745
|
+
id: str = Field(default_factory=lambda: f"subq-{uuid4().hex[:8]}")
|
|
746
|
+
query: str = Field(..., description="The focused sub-query text")
|
|
747
|
+
rationale: Optional[str] = Field(
|
|
748
|
+
default=None,
|
|
749
|
+
description="Why this sub-query was generated and what aspect it covers",
|
|
750
|
+
)
|
|
751
|
+
priority: int = Field(
|
|
752
|
+
default=1,
|
|
753
|
+
description="Execution priority (1=highest, larger=lower priority)",
|
|
754
|
+
)
|
|
755
|
+
status: str = Field(
|
|
756
|
+
default="pending",
|
|
757
|
+
description="Current status: pending, executing, completed, failed",
|
|
758
|
+
)
|
|
759
|
+
source_ids: list[str] = Field(
|
|
760
|
+
default_factory=list,
|
|
761
|
+
description="IDs of ResearchSource objects found for this query",
|
|
762
|
+
)
|
|
763
|
+
findings_summary: Optional[str] = Field(
|
|
764
|
+
default=None,
|
|
765
|
+
description="Brief summary of what was found from this sub-query",
|
|
766
|
+
)
|
|
767
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
768
|
+
completed_at: Optional[datetime] = Field(default=None)
|
|
769
|
+
error: Optional[str] = Field(
|
|
770
|
+
default=None,
|
|
771
|
+
description="Error message if status is 'failed'",
|
|
772
|
+
)
|
|
773
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
774
|
+
|
|
775
|
+
def mark_completed(self, findings: Optional[str] = None) -> None:
|
|
776
|
+
"""Mark this sub-query as successfully completed.
|
|
777
|
+
|
|
778
|
+
Args:
|
|
779
|
+
findings: Optional summary of findings from this sub-query
|
|
780
|
+
"""
|
|
781
|
+
self.status = "completed"
|
|
782
|
+
self.completed_at = datetime.utcnow()
|
|
783
|
+
if findings:
|
|
784
|
+
self.findings_summary = findings
|
|
785
|
+
|
|
786
|
+
def mark_failed(self, error: str) -> None:
|
|
787
|
+
"""Mark this sub-query as failed with an error message.
|
|
788
|
+
|
|
789
|
+
Args:
|
|
790
|
+
error: Description of why the sub-query failed
|
|
791
|
+
"""
|
|
792
|
+
self.status = "failed"
|
|
793
|
+
self.completed_at = datetime.utcnow()
|
|
794
|
+
self.error = error
|
|
795
|
+
|
|
796
|
+
|
|
797
|
+
class ResearchSource(BaseModel):
|
|
798
|
+
"""A source discovered during research.
|
|
799
|
+
|
|
800
|
+
Sources are collected during the GATHERING phase when sub-queries
|
|
801
|
+
are executed against search providers. Each source represents a
|
|
802
|
+
piece of external content (web page, paper, etc.) that may contain
|
|
803
|
+
relevant information for the research query.
|
|
804
|
+
|
|
805
|
+
Quality is assessed during the ANALYSIS phase based on source
|
|
806
|
+
authority, content relevance, and other factors.
|
|
807
|
+
"""
|
|
808
|
+
|
|
809
|
+
id: str = Field(default_factory=lambda: f"src-{uuid4().hex[:8]}")
|
|
810
|
+
url: Optional[str] = Field(
|
|
811
|
+
default=None,
|
|
812
|
+
description="URL of the source (may be None for non-web sources)",
|
|
813
|
+
)
|
|
814
|
+
title: str = Field(..., description="Title or headline of the source")
|
|
815
|
+
source_type: SourceType = Field(
|
|
816
|
+
default=SourceType.WEB,
|
|
817
|
+
description="Type of source (web, academic, etc.)",
|
|
818
|
+
)
|
|
819
|
+
quality: SourceQuality = Field(
|
|
820
|
+
default=SourceQuality.UNKNOWN,
|
|
821
|
+
description="Assessed quality level of this source",
|
|
822
|
+
)
|
|
823
|
+
snippet: Optional[str] = Field(
|
|
824
|
+
default=None,
|
|
825
|
+
description="Brief excerpt or description from the source",
|
|
826
|
+
)
|
|
827
|
+
content: Optional[str] = Field(
|
|
828
|
+
default=None,
|
|
829
|
+
description="Full extracted content (if follow_links enabled)",
|
|
830
|
+
)
|
|
831
|
+
sub_query_id: Optional[str] = Field(
|
|
832
|
+
default=None,
|
|
833
|
+
description="ID of the SubQuery that discovered this source",
|
|
834
|
+
)
|
|
835
|
+
discovered_at: datetime = Field(default_factory=datetime.utcnow)
|
|
836
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
837
|
+
|
|
838
|
+
|
|
839
|
+
class ResearchFinding(BaseModel):
|
|
840
|
+
"""A key finding extracted from research sources.
|
|
841
|
+
|
|
842
|
+
Findings are extracted during the ANALYSIS phase by examining
|
|
843
|
+
source content and identifying key insights. Each finding has
|
|
844
|
+
an associated confidence level and links back to supporting sources.
|
|
845
|
+
|
|
846
|
+
Findings are organized by category/theme during synthesis to
|
|
847
|
+
create a structured report.
|
|
848
|
+
"""
|
|
849
|
+
|
|
850
|
+
id: str = Field(default_factory=lambda: f"find-{uuid4().hex[:8]}")
|
|
851
|
+
content: str = Field(..., description="The key finding or insight")
|
|
852
|
+
confidence: ConfidenceLevel = Field(
|
|
853
|
+
default=ConfidenceLevel.MEDIUM,
|
|
854
|
+
description="Confidence level in this finding",
|
|
855
|
+
)
|
|
856
|
+
source_ids: list[str] = Field(
|
|
857
|
+
default_factory=list,
|
|
858
|
+
description="IDs of ResearchSource objects supporting this finding",
|
|
859
|
+
)
|
|
860
|
+
sub_query_id: Optional[str] = Field(
|
|
861
|
+
default=None,
|
|
862
|
+
description="ID of SubQuery that produced this finding",
|
|
863
|
+
)
|
|
864
|
+
category: Optional[str] = Field(
|
|
865
|
+
default=None,
|
|
866
|
+
description="Theme or category for organizing findings",
|
|
867
|
+
)
|
|
868
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
869
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
class ResearchGap(BaseModel):
|
|
873
|
+
"""An identified gap in the research requiring follow-up.
|
|
874
|
+
|
|
875
|
+
Gaps are identified during the ANALYSIS and SYNTHESIS phases when
|
|
876
|
+
the research reveals missing information or unanswered questions.
|
|
877
|
+
Each gap includes suggested follow-up queries that can be used
|
|
878
|
+
in subsequent refinement iterations.
|
|
879
|
+
|
|
880
|
+
Gaps drive the REFINEMENT phase: if unresolved gaps exist and
|
|
881
|
+
max_iterations hasn't been reached, the workflow loops back
|
|
882
|
+
to GATHERING with new sub-queries derived from gap suggestions.
|
|
883
|
+
"""
|
|
884
|
+
|
|
885
|
+
id: str = Field(default_factory=lambda: f"gap-{uuid4().hex[:8]}")
|
|
886
|
+
description: str = Field(
|
|
887
|
+
...,
|
|
888
|
+
description="Description of the knowledge gap or missing information",
|
|
889
|
+
)
|
|
890
|
+
suggested_queries: list[str] = Field(
|
|
891
|
+
default_factory=list,
|
|
892
|
+
description="Follow-up queries that could fill this gap",
|
|
893
|
+
)
|
|
894
|
+
priority: int = Field(
|
|
895
|
+
default=1,
|
|
896
|
+
description="Priority for follow-up (1=highest, larger=lower priority)",
|
|
897
|
+
)
|
|
898
|
+
resolved: bool = Field(
|
|
899
|
+
default=False,
|
|
900
|
+
description="Whether this gap has been addressed in a refinement iteration",
|
|
901
|
+
)
|
|
902
|
+
resolution_notes: Optional[str] = Field(
|
|
903
|
+
default=None,
|
|
904
|
+
description="Notes on how the gap was resolved",
|
|
905
|
+
)
|
|
906
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
907
|
+
|
|
908
|
+
|
|
909
|
+
class DeepResearchState(BaseModel):
|
|
910
|
+
"""Main state model for a deep research session.
|
|
911
|
+
|
|
912
|
+
Manages the entire lifecycle of a multi-phase research workflow:
|
|
913
|
+
- Tracks the current phase and iteration
|
|
914
|
+
- Contains all sub-queries, sources, findings, and gaps
|
|
915
|
+
- Provides helper methods for state manipulation
|
|
916
|
+
- Handles phase advancement and refinement iteration logic
|
|
917
|
+
|
|
918
|
+
The state is persisted to enable session resume capability.
|
|
919
|
+
"""
|
|
920
|
+
|
|
921
|
+
id: str = Field(default_factory=lambda: f"deepres-{uuid4().hex[:12]}")
|
|
922
|
+
original_query: str = Field(..., description="The original research query")
|
|
923
|
+
research_brief: Optional[str] = Field(
|
|
924
|
+
default=None,
|
|
925
|
+
description="Expanded research plan generated in PLANNING phase",
|
|
926
|
+
)
|
|
927
|
+
phase: DeepResearchPhase = Field(
|
|
928
|
+
default=DeepResearchPhase.PLANNING,
|
|
929
|
+
description="Current workflow phase",
|
|
930
|
+
)
|
|
931
|
+
iteration: int = Field(
|
|
932
|
+
default=1,
|
|
933
|
+
description="Current refinement iteration (1-based)",
|
|
934
|
+
)
|
|
935
|
+
max_iterations: int = Field(
|
|
936
|
+
default=3,
|
|
937
|
+
description="Maximum refinement iterations before forced completion",
|
|
938
|
+
)
|
|
939
|
+
|
|
940
|
+
# Collections
|
|
941
|
+
sub_queries: list[SubQuery] = Field(default_factory=list)
|
|
942
|
+
sources: list[ResearchSource] = Field(default_factory=list)
|
|
943
|
+
findings: list[ResearchFinding] = Field(default_factory=list)
|
|
944
|
+
gaps: list[ResearchGap] = Field(default_factory=list)
|
|
945
|
+
|
|
946
|
+
# Final output
|
|
947
|
+
report: Optional[str] = Field(
|
|
948
|
+
default=None,
|
|
949
|
+
description="Final synthesized research report",
|
|
950
|
+
)
|
|
951
|
+
report_sections: dict[str, str] = Field(
|
|
952
|
+
default_factory=dict,
|
|
953
|
+
description="Named sections of the report for structured access",
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
# Execution tracking
|
|
957
|
+
total_sources_examined: int = Field(default=0)
|
|
958
|
+
total_tokens_used: int = Field(default=0)
|
|
959
|
+
total_duration_ms: float = Field(default=0.0)
|
|
960
|
+
|
|
961
|
+
# Per-phase metrics for audit
|
|
962
|
+
phase_metrics: list[PhaseMetrics] = Field(
|
|
963
|
+
default_factory=list,
|
|
964
|
+
description="Metrics for each executed phase (timing, tokens, provider)",
|
|
965
|
+
)
|
|
966
|
+
# Search provider query counts (provider_name -> query_count)
|
|
967
|
+
search_provider_stats: dict[str, int] = Field(
|
|
968
|
+
default_factory=dict,
|
|
969
|
+
description="Count of queries executed per search provider",
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
# Configuration
|
|
973
|
+
source_types: list[SourceType] = Field(
|
|
974
|
+
default_factory=lambda: [SourceType.WEB, SourceType.ACADEMIC],
|
|
975
|
+
)
|
|
976
|
+
max_sources_per_query: int = Field(default=5)
|
|
977
|
+
max_sub_queries: int = Field(default=5)
|
|
978
|
+
follow_links: bool = Field(
|
|
979
|
+
default=True,
|
|
980
|
+
description="Whether to follow URLs and extract full content",
|
|
981
|
+
)
|
|
982
|
+
research_mode: ResearchMode = Field(
|
|
983
|
+
default=ResearchMode.GENERAL,
|
|
984
|
+
description="Research mode for source prioritization",
|
|
985
|
+
)
|
|
986
|
+
|
|
987
|
+
# Timestamps
|
|
988
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
989
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
990
|
+
completed_at: Optional[datetime] = Field(default=None)
|
|
991
|
+
|
|
992
|
+
# Provider tracking (per-phase LLM provider configuration)
|
|
993
|
+
# Supports ProviderSpec format: "[cli]gemini:pro" or simple names: "gemini"
|
|
994
|
+
planning_provider: Optional[str] = Field(default=None)
|
|
995
|
+
analysis_provider: Optional[str] = Field(default=None)
|
|
996
|
+
synthesis_provider: Optional[str] = Field(default=None)
|
|
997
|
+
refinement_provider: Optional[str] = Field(default=None)
|
|
998
|
+
# Per-phase model overrides (from ProviderSpec parsing)
|
|
999
|
+
planning_model: Optional[str] = Field(default=None)
|
|
1000
|
+
analysis_model: Optional[str] = Field(default=None)
|
|
1001
|
+
synthesis_model: Optional[str] = Field(default=None)
|
|
1002
|
+
refinement_model: Optional[str] = Field(default=None)
|
|
1003
|
+
|
|
1004
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
1005
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
1006
|
+
|
|
1007
|
+
# =========================================================================
|
|
1008
|
+
# Collection Management Methods
|
|
1009
|
+
# =========================================================================
|
|
1010
|
+
|
|
1011
|
+
def add_sub_query(
|
|
1012
|
+
self,
|
|
1013
|
+
query: str,
|
|
1014
|
+
rationale: Optional[str] = None,
|
|
1015
|
+
priority: int = 1,
|
|
1016
|
+
) -> SubQuery:
|
|
1017
|
+
"""Add a new sub-query for research.
|
|
1018
|
+
|
|
1019
|
+
Args:
|
|
1020
|
+
query: The focused sub-query text
|
|
1021
|
+
rationale: Why this sub-query was generated
|
|
1022
|
+
priority: Execution priority (1=highest)
|
|
1023
|
+
|
|
1024
|
+
Returns:
|
|
1025
|
+
The created SubQuery instance
|
|
1026
|
+
"""
|
|
1027
|
+
sub_query = SubQuery(query=query, rationale=rationale, priority=priority)
|
|
1028
|
+
self.sub_queries.append(sub_query)
|
|
1029
|
+
self.updated_at = datetime.utcnow()
|
|
1030
|
+
return sub_query
|
|
1031
|
+
|
|
1032
|
+
def get_sub_query(self, sub_query_id: str) -> Optional[SubQuery]:
|
|
1033
|
+
"""Get a sub-query by ID."""
|
|
1034
|
+
for sq in self.sub_queries:
|
|
1035
|
+
if sq.id == sub_query_id:
|
|
1036
|
+
return sq
|
|
1037
|
+
return None
|
|
1038
|
+
|
|
1039
|
+
def get_source(self, source_id: str) -> Optional[ResearchSource]:
|
|
1040
|
+
"""Get a source by ID."""
|
|
1041
|
+
for source in self.sources:
|
|
1042
|
+
if source.id == source_id:
|
|
1043
|
+
return source
|
|
1044
|
+
return None
|
|
1045
|
+
|
|
1046
|
+
def get_gap(self, gap_id: str) -> Optional[ResearchGap]:
|
|
1047
|
+
"""Get a gap by ID."""
|
|
1048
|
+
for gap in self.gaps:
|
|
1049
|
+
if gap.id == gap_id:
|
|
1050
|
+
return gap
|
|
1051
|
+
return None
|
|
1052
|
+
|
|
1053
|
+
def add_source(
|
|
1054
|
+
self,
|
|
1055
|
+
title: str,
|
|
1056
|
+
url: Optional[str] = None,
|
|
1057
|
+
source_type: SourceType = SourceType.WEB,
|
|
1058
|
+
snippet: Optional[str] = None,
|
|
1059
|
+
sub_query_id: Optional[str] = None,
|
|
1060
|
+
**kwargs: Any,
|
|
1061
|
+
) -> ResearchSource:
|
|
1062
|
+
"""Add a discovered source.
|
|
1063
|
+
|
|
1064
|
+
Args:
|
|
1065
|
+
title: Source title
|
|
1066
|
+
url: Source URL (optional)
|
|
1067
|
+
source_type: Type of source
|
|
1068
|
+
snippet: Brief excerpt
|
|
1069
|
+
sub_query_id: ID of sub-query that found this
|
|
1070
|
+
**kwargs: Additional fields
|
|
1071
|
+
|
|
1072
|
+
Returns:
|
|
1073
|
+
The created ResearchSource instance
|
|
1074
|
+
"""
|
|
1075
|
+
source = ResearchSource(
|
|
1076
|
+
title=title,
|
|
1077
|
+
url=url,
|
|
1078
|
+
source_type=source_type,
|
|
1079
|
+
snippet=snippet,
|
|
1080
|
+
sub_query_id=sub_query_id,
|
|
1081
|
+
**kwargs,
|
|
1082
|
+
)
|
|
1083
|
+
self.sources.append(source)
|
|
1084
|
+
self.total_sources_examined += 1
|
|
1085
|
+
self.updated_at = datetime.utcnow()
|
|
1086
|
+
return source
|
|
1087
|
+
|
|
1088
|
+
def add_finding(
|
|
1089
|
+
self,
|
|
1090
|
+
content: str,
|
|
1091
|
+
confidence: ConfidenceLevel = ConfidenceLevel.MEDIUM,
|
|
1092
|
+
source_ids: Optional[list[str]] = None,
|
|
1093
|
+
sub_query_id: Optional[str] = None,
|
|
1094
|
+
category: Optional[str] = None,
|
|
1095
|
+
) -> ResearchFinding:
|
|
1096
|
+
"""Add a research finding.
|
|
1097
|
+
|
|
1098
|
+
Args:
|
|
1099
|
+
content: The finding content
|
|
1100
|
+
confidence: Confidence level
|
|
1101
|
+
source_ids: Supporting source IDs
|
|
1102
|
+
sub_query_id: Originating sub-query ID
|
|
1103
|
+
category: Theme/category
|
|
1104
|
+
|
|
1105
|
+
Returns:
|
|
1106
|
+
The created ResearchFinding instance
|
|
1107
|
+
"""
|
|
1108
|
+
finding = ResearchFinding(
|
|
1109
|
+
content=content,
|
|
1110
|
+
confidence=confidence,
|
|
1111
|
+
source_ids=source_ids or [],
|
|
1112
|
+
sub_query_id=sub_query_id,
|
|
1113
|
+
category=category,
|
|
1114
|
+
)
|
|
1115
|
+
self.findings.append(finding)
|
|
1116
|
+
self.updated_at = datetime.utcnow()
|
|
1117
|
+
return finding
|
|
1118
|
+
|
|
1119
|
+
def add_gap(
|
|
1120
|
+
self,
|
|
1121
|
+
description: str,
|
|
1122
|
+
suggested_queries: Optional[list[str]] = None,
|
|
1123
|
+
priority: int = 1,
|
|
1124
|
+
) -> ResearchGap:
|
|
1125
|
+
"""Add an identified research gap.
|
|
1126
|
+
|
|
1127
|
+
Args:
|
|
1128
|
+
description: What information is missing
|
|
1129
|
+
suggested_queries: Follow-up queries to fill the gap
|
|
1130
|
+
priority: Priority for follow-up (1=highest)
|
|
1131
|
+
|
|
1132
|
+
Returns:
|
|
1133
|
+
The created ResearchGap instance
|
|
1134
|
+
"""
|
|
1135
|
+
gap = ResearchGap(
|
|
1136
|
+
description=description,
|
|
1137
|
+
suggested_queries=suggested_queries or [],
|
|
1138
|
+
priority=priority,
|
|
1139
|
+
)
|
|
1140
|
+
self.gaps.append(gap)
|
|
1141
|
+
self.updated_at = datetime.utcnow()
|
|
1142
|
+
return gap
|
|
1143
|
+
|
|
1144
|
+
# =========================================================================
|
|
1145
|
+
# Query Helpers
|
|
1146
|
+
# =========================================================================
|
|
1147
|
+
|
|
1148
|
+
def pending_sub_queries(self) -> list[SubQuery]:
|
|
1149
|
+
"""Get sub-queries that haven't been executed yet."""
|
|
1150
|
+
return [sq for sq in self.sub_queries if sq.status == "pending"]
|
|
1151
|
+
|
|
1152
|
+
def completed_sub_queries(self) -> list[SubQuery]:
|
|
1153
|
+
"""Get successfully completed sub-queries."""
|
|
1154
|
+
return [sq for sq in self.sub_queries if sq.status == "completed"]
|
|
1155
|
+
|
|
1156
|
+
def unresolved_gaps(self) -> list[ResearchGap]:
|
|
1157
|
+
"""Get gaps that haven't been resolved yet."""
|
|
1158
|
+
return [g for g in self.gaps if not g.resolved]
|
|
1159
|
+
|
|
1160
|
+
# =========================================================================
|
|
1161
|
+
# Phase Management
|
|
1162
|
+
# =========================================================================
|
|
1163
|
+
|
|
1164
|
+
def advance_phase(self) -> DeepResearchPhase:
|
|
1165
|
+
"""Advance to the next research phase.
|
|
1166
|
+
|
|
1167
|
+
Phases advance in order: PLANNING -> GATHERING -> ANALYSIS ->
|
|
1168
|
+
SYNTHESIS -> REFINEMENT. Does nothing if already at REFINEMENT.
|
|
1169
|
+
|
|
1170
|
+
Returns:
|
|
1171
|
+
The new phase after advancement
|
|
1172
|
+
"""
|
|
1173
|
+
phase_order = list(DeepResearchPhase)
|
|
1174
|
+
current_index = phase_order.index(self.phase)
|
|
1175
|
+
if current_index < len(phase_order) - 1:
|
|
1176
|
+
self.phase = phase_order[current_index + 1]
|
|
1177
|
+
self.updated_at = datetime.utcnow()
|
|
1178
|
+
return self.phase
|
|
1179
|
+
|
|
1180
|
+
def should_continue_refinement(self) -> bool:
|
|
1181
|
+
"""Check if another refinement iteration should occur.
|
|
1182
|
+
|
|
1183
|
+
Returns True if:
|
|
1184
|
+
- Current iteration < max_iterations AND
|
|
1185
|
+
- There are unresolved gaps
|
|
1186
|
+
|
|
1187
|
+
Returns:
|
|
1188
|
+
True if refinement should continue, False otherwise
|
|
1189
|
+
"""
|
|
1190
|
+
if self.iteration >= self.max_iterations:
|
|
1191
|
+
return False
|
|
1192
|
+
if not self.unresolved_gaps():
|
|
1193
|
+
return False
|
|
1194
|
+
return True
|
|
1195
|
+
|
|
1196
|
+
def start_new_iteration(self) -> int:
|
|
1197
|
+
"""Start a new refinement iteration.
|
|
1198
|
+
|
|
1199
|
+
Increments iteration counter and resets phase to GATHERING
|
|
1200
|
+
to begin collecting sources for the new sub-queries.
|
|
1201
|
+
|
|
1202
|
+
Returns:
|
|
1203
|
+
The new iteration number
|
|
1204
|
+
"""
|
|
1205
|
+
self.iteration += 1
|
|
1206
|
+
self.phase = DeepResearchPhase.GATHERING
|
|
1207
|
+
self.updated_at = datetime.utcnow()
|
|
1208
|
+
return self.iteration
|
|
1209
|
+
|
|
1210
|
+
def mark_completed(self, report: Optional[str] = None) -> None:
|
|
1211
|
+
"""Mark the research session as completed.
|
|
1212
|
+
|
|
1213
|
+
Args:
|
|
1214
|
+
report: Optional final report content
|
|
1215
|
+
"""
|
|
1216
|
+
self.phase = DeepResearchPhase.SYNTHESIS
|
|
1217
|
+
self.completed_at = datetime.utcnow()
|
|
1218
|
+
self.updated_at = datetime.utcnow()
|
|
1219
|
+
if report:
|
|
1220
|
+
self.report = report
|
|
1221
|
+
|
|
1222
|
+
def mark_failed(self, error: str) -> None:
|
|
1223
|
+
"""Mark the research session as failed with an error message.
|
|
1224
|
+
|
|
1225
|
+
This sets completed_at to indicate the session has ended, and stores
|
|
1226
|
+
the failure information in metadata for status reporting.
|
|
1227
|
+
|
|
1228
|
+
Args:
|
|
1229
|
+
error: Description of why the research failed
|
|
1230
|
+
"""
|
|
1231
|
+
self.completed_at = datetime.utcnow()
|
|
1232
|
+
self.updated_at = datetime.utcnow()
|
|
1233
|
+
self.metadata["failed"] = True
|
|
1234
|
+
self.metadata["failure_error"] = error
|