foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +235 -5
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/discovery.py +6 -6
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +20 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/claude.py +6 -47
- foundry_mcp/core/providers/codex.py +6 -57
- foundry_mcp/core/providers/cursor_agent.py +3 -44
- foundry_mcp/core/providers/gemini.py +6 -57
- foundry_mcp/core/providers/opencode.py +35 -5
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +425 -0
- foundry_mcp/core/research/models.py +437 -0
- foundry_mcp/core/research/workflows/__init__.py +22 -0
- foundry_mcp/core/research/workflows/base.py +204 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +396 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +450 -0
- foundry_mcp/core/spec.py +2438 -236
- foundry_mcp/core/task.py +1064 -19
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +313 -42
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +38 -0
- foundry_mcp/tools/unified/__init__.py +4 -2
- foundry_mcp/tools/unified/authoring.py +2423 -267
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +235 -6
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +113 -1
- foundry_mcp/tools/unified/research.py +658 -0
- foundry_mcp/tools/unified/review.py +370 -16
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1163 -48
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,437 @@
|
|
|
1
|
+
"""Pydantic models for research workflows.
|
|
2
|
+
|
|
3
|
+
These models define the data structures for conversation threading,
|
|
4
|
+
multi-model consensus, hypothesis-driven investigation, and creative
|
|
5
|
+
brainstorming workflows.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any, Optional
|
|
11
|
+
from uuid import uuid4
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, Field
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# =============================================================================
|
|
17
|
+
# Enums
|
|
18
|
+
# =============================================================================
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WorkflowType(str, Enum):
|
|
22
|
+
"""Types of research workflows available."""
|
|
23
|
+
|
|
24
|
+
CHAT = "chat"
|
|
25
|
+
CONSENSUS = "consensus"
|
|
26
|
+
THINKDEEP = "thinkdeep"
|
|
27
|
+
IDEATE = "ideate"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ConfidenceLevel(str, Enum):
|
|
31
|
+
"""Confidence levels for hypotheses in THINKDEEP workflow."""
|
|
32
|
+
|
|
33
|
+
SPECULATION = "speculation"
|
|
34
|
+
LOW = "low"
|
|
35
|
+
MEDIUM = "medium"
|
|
36
|
+
HIGH = "high"
|
|
37
|
+
CONFIRMED = "confirmed"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ConsensusStrategy(str, Enum):
|
|
41
|
+
"""Strategies for synthesizing multi-model responses in CONSENSUS workflow."""
|
|
42
|
+
|
|
43
|
+
ALL_RESPONSES = "all_responses" # Return all responses without synthesis
|
|
44
|
+
SYNTHESIZE = "synthesize" # Use a model to synthesize responses
|
|
45
|
+
MAJORITY = "majority" # Use majority vote for factual questions
|
|
46
|
+
FIRST_VALID = "first_valid" # Return first successful response
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ThreadStatus(str, Enum):
|
|
50
|
+
"""Status of a conversation thread."""
|
|
51
|
+
|
|
52
|
+
ACTIVE = "active"
|
|
53
|
+
COMPLETED = "completed"
|
|
54
|
+
ARCHIVED = "archived"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class IdeationPhase(str, Enum):
|
|
58
|
+
"""Phases of the IDEATE workflow."""
|
|
59
|
+
|
|
60
|
+
DIVERGENT = "divergent" # Generate diverse ideas
|
|
61
|
+
CONVERGENT = "convergent" # Cluster and score ideas
|
|
62
|
+
SELECTION = "selection" # Select clusters for elaboration
|
|
63
|
+
ELABORATION = "elaboration" # Develop selected ideas
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# =============================================================================
|
|
67
|
+
# Conversation Models (CHAT workflow)
|
|
68
|
+
# =============================================================================
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ConversationMessage(BaseModel):
|
|
72
|
+
"""A single message in a conversation thread."""
|
|
73
|
+
|
|
74
|
+
id: str = Field(default_factory=lambda: f"msg-{uuid4().hex[:8]}")
|
|
75
|
+
role: str = Field(..., description="Message role: 'user' or 'assistant'")
|
|
76
|
+
content: str = Field(..., description="Message content")
|
|
77
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
78
|
+
provider_id: Optional[str] = Field(
|
|
79
|
+
default=None, description="Provider that generated this message"
|
|
80
|
+
)
|
|
81
|
+
model_used: Optional[str] = Field(
|
|
82
|
+
default=None, description="Model that generated this message"
|
|
83
|
+
)
|
|
84
|
+
tokens_used: Optional[int] = Field(
|
|
85
|
+
default=None, description="Tokens consumed for this message"
|
|
86
|
+
)
|
|
87
|
+
metadata: dict[str, Any] = Field(
|
|
88
|
+
default_factory=dict, description="Additional message metadata"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class ConversationThread(BaseModel):
|
|
93
|
+
"""A conversation thread with message history."""
|
|
94
|
+
|
|
95
|
+
id: str = Field(default_factory=lambda: f"thread-{uuid4().hex[:12]}")
|
|
96
|
+
title: Optional[str] = Field(default=None, description="Optional thread title")
|
|
97
|
+
status: ThreadStatus = Field(default=ThreadStatus.ACTIVE)
|
|
98
|
+
messages: list[ConversationMessage] = Field(default_factory=list)
|
|
99
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
100
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
101
|
+
provider_id: Optional[str] = Field(
|
|
102
|
+
default=None, description="Default provider for this thread"
|
|
103
|
+
)
|
|
104
|
+
system_prompt: Optional[str] = Field(
|
|
105
|
+
default=None, description="System prompt for this thread"
|
|
106
|
+
)
|
|
107
|
+
metadata: dict[str, Any] = Field(
|
|
108
|
+
default_factory=dict, description="Additional thread metadata"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
def add_message(
|
|
112
|
+
self,
|
|
113
|
+
role: str,
|
|
114
|
+
content: str,
|
|
115
|
+
provider_id: Optional[str] = None,
|
|
116
|
+
model_used: Optional[str] = None,
|
|
117
|
+
tokens_used: Optional[int] = None,
|
|
118
|
+
**metadata: Any,
|
|
119
|
+
) -> ConversationMessage:
|
|
120
|
+
"""Add a message to the thread and update timestamp."""
|
|
121
|
+
message = ConversationMessage(
|
|
122
|
+
role=role,
|
|
123
|
+
content=content,
|
|
124
|
+
provider_id=provider_id,
|
|
125
|
+
model_used=model_used,
|
|
126
|
+
tokens_used=tokens_used,
|
|
127
|
+
metadata=metadata,
|
|
128
|
+
)
|
|
129
|
+
self.messages.append(message)
|
|
130
|
+
self.updated_at = datetime.utcnow()
|
|
131
|
+
return message
|
|
132
|
+
|
|
133
|
+
def get_context_messages(
|
|
134
|
+
self, max_messages: Optional[int] = None
|
|
135
|
+
) -> list[ConversationMessage]:
|
|
136
|
+
"""Get messages for context, optionally limited to recent N messages."""
|
|
137
|
+
if max_messages is None or max_messages >= len(self.messages):
|
|
138
|
+
return self.messages
|
|
139
|
+
return self.messages[-max_messages:]
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =============================================================================
|
|
143
|
+
# THINKDEEP Models (Hypothesis-driven investigation)
|
|
144
|
+
# =============================================================================
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class Hypothesis(BaseModel):
|
|
148
|
+
"""A hypothesis being investigated in THINKDEEP workflow."""
|
|
149
|
+
|
|
150
|
+
id: str = Field(default_factory=lambda: f"hyp-{uuid4().hex[:8]}")
|
|
151
|
+
statement: str = Field(..., description="The hypothesis statement")
|
|
152
|
+
confidence: ConfidenceLevel = Field(default=ConfidenceLevel.SPECULATION)
|
|
153
|
+
supporting_evidence: list[str] = Field(default_factory=list)
|
|
154
|
+
contradicting_evidence: list[str] = Field(default_factory=list)
|
|
155
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
156
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
157
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
158
|
+
|
|
159
|
+
def add_evidence(self, evidence: str, supporting: bool = True) -> None:
|
|
160
|
+
"""Add evidence for or against this hypothesis."""
|
|
161
|
+
if supporting:
|
|
162
|
+
self.supporting_evidence.append(evidence)
|
|
163
|
+
else:
|
|
164
|
+
self.contradicting_evidence.append(evidence)
|
|
165
|
+
self.updated_at = datetime.utcnow()
|
|
166
|
+
|
|
167
|
+
def update_confidence(self, new_confidence: ConfidenceLevel) -> None:
|
|
168
|
+
"""Update the confidence level of this hypothesis."""
|
|
169
|
+
self.confidence = new_confidence
|
|
170
|
+
self.updated_at = datetime.utcnow()
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class InvestigationStep(BaseModel):
|
|
174
|
+
"""A single step in a THINKDEEP investigation."""
|
|
175
|
+
|
|
176
|
+
id: str = Field(default_factory=lambda: f"step-{uuid4().hex[:8]}")
|
|
177
|
+
depth: int = Field(..., description="Depth level of this step (0-indexed)")
|
|
178
|
+
query: str = Field(..., description="The question or query for this step")
|
|
179
|
+
response: Optional[str] = Field(default=None, description="Model response")
|
|
180
|
+
hypotheses_generated: list[str] = Field(
|
|
181
|
+
default_factory=list, description="IDs of hypotheses generated in this step"
|
|
182
|
+
)
|
|
183
|
+
hypotheses_updated: list[str] = Field(
|
|
184
|
+
default_factory=list, description="IDs of hypotheses updated in this step"
|
|
185
|
+
)
|
|
186
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
187
|
+
provider_id: Optional[str] = Field(default=None)
|
|
188
|
+
model_used: Optional[str] = Field(default=None)
|
|
189
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class ThinkDeepState(BaseModel):
|
|
193
|
+
"""State for a THINKDEEP investigation session."""
|
|
194
|
+
|
|
195
|
+
id: str = Field(default_factory=lambda: f"investigation-{uuid4().hex[:12]}")
|
|
196
|
+
topic: str = Field(..., description="The topic being investigated")
|
|
197
|
+
current_depth: int = Field(default=0, description="Current investigation depth")
|
|
198
|
+
max_depth: int = Field(default=5, description="Maximum investigation depth")
|
|
199
|
+
hypotheses: list[Hypothesis] = Field(default_factory=list)
|
|
200
|
+
steps: list[InvestigationStep] = Field(default_factory=list)
|
|
201
|
+
converged: bool = Field(
|
|
202
|
+
default=False, description="Whether investigation has converged"
|
|
203
|
+
)
|
|
204
|
+
convergence_reason: Optional[str] = Field(
|
|
205
|
+
default=None, description="Reason for convergence if converged"
|
|
206
|
+
)
|
|
207
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
208
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
209
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
210
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
211
|
+
|
|
212
|
+
def add_hypothesis(self, statement: str, **kwargs: Any) -> Hypothesis:
|
|
213
|
+
"""Create and add a new hypothesis."""
|
|
214
|
+
hypothesis = Hypothesis(statement=statement, **kwargs)
|
|
215
|
+
self.hypotheses.append(hypothesis)
|
|
216
|
+
self.updated_at = datetime.utcnow()
|
|
217
|
+
return hypothesis
|
|
218
|
+
|
|
219
|
+
def get_hypothesis(self, hypothesis_id: str) -> Optional[Hypothesis]:
|
|
220
|
+
"""Get a hypothesis by ID."""
|
|
221
|
+
for h in self.hypotheses:
|
|
222
|
+
if h.id == hypothesis_id:
|
|
223
|
+
return h
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
def add_step(self, query: str, depth: Optional[int] = None) -> InvestigationStep:
|
|
227
|
+
"""Create and add a new investigation step."""
|
|
228
|
+
step = InvestigationStep(
|
|
229
|
+
depth=depth if depth is not None else self.current_depth, query=query
|
|
230
|
+
)
|
|
231
|
+
self.steps.append(step)
|
|
232
|
+
self.updated_at = datetime.utcnow()
|
|
233
|
+
return step
|
|
234
|
+
|
|
235
|
+
def check_convergence(self) -> bool:
|
|
236
|
+
"""Check if investigation should converge based on criteria."""
|
|
237
|
+
# Converge if max depth reached
|
|
238
|
+
if self.current_depth >= self.max_depth:
|
|
239
|
+
self.converged = True
|
|
240
|
+
self.convergence_reason = "Maximum depth reached"
|
|
241
|
+
return True
|
|
242
|
+
|
|
243
|
+
# Converge if all hypotheses have high confidence
|
|
244
|
+
if self.hypotheses and all(
|
|
245
|
+
h.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.CONFIRMED)
|
|
246
|
+
for h in self.hypotheses
|
|
247
|
+
):
|
|
248
|
+
self.converged = True
|
|
249
|
+
self.convergence_reason = "All hypotheses reached high confidence"
|
|
250
|
+
return True
|
|
251
|
+
|
|
252
|
+
return False
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
# =============================================================================
|
|
256
|
+
# IDEATE Models (Creative brainstorming)
|
|
257
|
+
# =============================================================================
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class Idea(BaseModel):
|
|
261
|
+
"""A single idea generated in IDEATE workflow."""
|
|
262
|
+
|
|
263
|
+
id: str = Field(default_factory=lambda: f"idea-{uuid4().hex[:8]}")
|
|
264
|
+
content: str = Field(..., description="The idea content")
|
|
265
|
+
perspective: Optional[str] = Field(
|
|
266
|
+
default=None, description="Perspective that generated this idea"
|
|
267
|
+
)
|
|
268
|
+
score: Optional[float] = Field(
|
|
269
|
+
default=None, description="Score from 0-1 based on criteria"
|
|
270
|
+
)
|
|
271
|
+
cluster_id: Optional[str] = Field(
|
|
272
|
+
default=None, description="ID of cluster this idea belongs to"
|
|
273
|
+
)
|
|
274
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
275
|
+
provider_id: Optional[str] = Field(default=None)
|
|
276
|
+
model_used: Optional[str] = Field(default=None)
|
|
277
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
class IdeaCluster(BaseModel):
|
|
281
|
+
"""A cluster of related ideas in IDEATE workflow."""
|
|
282
|
+
|
|
283
|
+
id: str = Field(default_factory=lambda: f"cluster-{uuid4().hex[:8]}")
|
|
284
|
+
name: str = Field(..., description="Cluster name/theme")
|
|
285
|
+
description: Optional[str] = Field(default=None, description="Cluster description")
|
|
286
|
+
idea_ids: list[str] = Field(default_factory=list, description="IDs of ideas in cluster")
|
|
287
|
+
average_score: Optional[float] = Field(default=None)
|
|
288
|
+
selected_for_elaboration: bool = Field(default=False)
|
|
289
|
+
elaboration: Optional[str] = Field(
|
|
290
|
+
default=None, description="Detailed elaboration if selected"
|
|
291
|
+
)
|
|
292
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
293
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class IdeationState(BaseModel):
|
|
297
|
+
"""State for an IDEATE brainstorming session."""
|
|
298
|
+
|
|
299
|
+
id: str = Field(default_factory=lambda: f"ideation-{uuid4().hex[:12]}")
|
|
300
|
+
topic: str = Field(..., description="The topic being brainstormed")
|
|
301
|
+
phase: IdeationPhase = Field(default=IdeationPhase.DIVERGENT)
|
|
302
|
+
perspectives: list[str] = Field(
|
|
303
|
+
default_factory=lambda: ["technical", "creative", "practical", "visionary"]
|
|
304
|
+
)
|
|
305
|
+
ideas: list[Idea] = Field(default_factory=list)
|
|
306
|
+
clusters: list[IdeaCluster] = Field(default_factory=list)
|
|
307
|
+
scoring_criteria: list[str] = Field(
|
|
308
|
+
default_factory=lambda: ["novelty", "feasibility", "impact"]
|
|
309
|
+
)
|
|
310
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
311
|
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
312
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
313
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
314
|
+
|
|
315
|
+
def add_idea(
|
|
316
|
+
self,
|
|
317
|
+
content: str,
|
|
318
|
+
perspective: Optional[str] = None,
|
|
319
|
+
**kwargs: Any,
|
|
320
|
+
) -> Idea:
|
|
321
|
+
"""Add a new idea to the session."""
|
|
322
|
+
idea = Idea(content=content, perspective=perspective, **kwargs)
|
|
323
|
+
self.ideas.append(idea)
|
|
324
|
+
self.updated_at = datetime.utcnow()
|
|
325
|
+
return idea
|
|
326
|
+
|
|
327
|
+
def create_cluster(self, name: str, description: Optional[str] = None) -> IdeaCluster:
|
|
328
|
+
"""Create a new idea cluster."""
|
|
329
|
+
cluster = IdeaCluster(name=name, description=description)
|
|
330
|
+
self.clusters.append(cluster)
|
|
331
|
+
self.updated_at = datetime.utcnow()
|
|
332
|
+
return cluster
|
|
333
|
+
|
|
334
|
+
def assign_idea_to_cluster(self, idea_id: str, cluster_id: str) -> bool:
|
|
335
|
+
"""Assign an idea to a cluster."""
|
|
336
|
+
idea = next((i for i in self.ideas if i.id == idea_id), None)
|
|
337
|
+
cluster = next((c for c in self.clusters if c.id == cluster_id), None)
|
|
338
|
+
|
|
339
|
+
if idea and cluster:
|
|
340
|
+
idea.cluster_id = cluster_id
|
|
341
|
+
if idea_id not in cluster.idea_ids:
|
|
342
|
+
cluster.idea_ids.append(idea_id)
|
|
343
|
+
self.updated_at = datetime.utcnow()
|
|
344
|
+
return True
|
|
345
|
+
return False
|
|
346
|
+
|
|
347
|
+
def advance_phase(self) -> IdeationPhase:
|
|
348
|
+
"""Advance to the next ideation phase."""
|
|
349
|
+
phase_order = list(IdeationPhase)
|
|
350
|
+
current_index = phase_order.index(self.phase)
|
|
351
|
+
if current_index < len(phase_order) - 1:
|
|
352
|
+
self.phase = phase_order[current_index + 1]
|
|
353
|
+
self.updated_at = datetime.utcnow()
|
|
354
|
+
return self.phase
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
# =============================================================================
|
|
358
|
+
# CONSENSUS Models (Multi-model parallel execution)
|
|
359
|
+
# =============================================================================
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
class ModelResponse(BaseModel):
|
|
363
|
+
"""A response from a single model in CONSENSUS workflow."""
|
|
364
|
+
|
|
365
|
+
provider_id: str = Field(..., description="Provider that generated this response")
|
|
366
|
+
model_used: Optional[str] = Field(default=None)
|
|
367
|
+
content: str = Field(..., description="Response content")
|
|
368
|
+
success: bool = Field(default=True)
|
|
369
|
+
error_message: Optional[str] = Field(default=None)
|
|
370
|
+
tokens_used: Optional[int] = Field(default=None)
|
|
371
|
+
duration_ms: Optional[float] = Field(default=None)
|
|
372
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
373
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
class ConsensusConfig(BaseModel):
|
|
377
|
+
"""Configuration for a CONSENSUS workflow execution."""
|
|
378
|
+
|
|
379
|
+
providers: list[str] = Field(
|
|
380
|
+
..., description="List of provider IDs to consult", min_length=1
|
|
381
|
+
)
|
|
382
|
+
strategy: ConsensusStrategy = Field(default=ConsensusStrategy.SYNTHESIZE)
|
|
383
|
+
synthesis_provider: Optional[str] = Field(
|
|
384
|
+
default=None, description="Provider to use for synthesis (if strategy=synthesize)"
|
|
385
|
+
)
|
|
386
|
+
timeout_per_provider: float = Field(
|
|
387
|
+
default=30.0, description="Timeout in seconds per provider"
|
|
388
|
+
)
|
|
389
|
+
max_concurrent: int = Field(
|
|
390
|
+
default=3, description="Maximum concurrent provider calls"
|
|
391
|
+
)
|
|
392
|
+
require_all: bool = Field(
|
|
393
|
+
default=False, description="Require all providers to succeed"
|
|
394
|
+
)
|
|
395
|
+
min_responses: int = Field(
|
|
396
|
+
default=1, description="Minimum responses needed for success"
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
class ConsensusState(BaseModel):
|
|
401
|
+
"""State for a CONSENSUS workflow execution."""
|
|
402
|
+
|
|
403
|
+
id: str = Field(default_factory=lambda: f"consensus-{uuid4().hex[:12]}")
|
|
404
|
+
prompt: str = Field(..., description="The prompt sent to all providers")
|
|
405
|
+
config: ConsensusConfig = Field(..., description="Consensus configuration")
|
|
406
|
+
responses: list[ModelResponse] = Field(default_factory=list)
|
|
407
|
+
synthesis: Optional[str] = Field(
|
|
408
|
+
default=None, description="Synthesized response if strategy requires it"
|
|
409
|
+
)
|
|
410
|
+
completed: bool = Field(default=False)
|
|
411
|
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
412
|
+
completed_at: Optional[datetime] = Field(default=None)
|
|
413
|
+
system_prompt: Optional[str] = Field(default=None)
|
|
414
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
415
|
+
|
|
416
|
+
def add_response(self, response: ModelResponse) -> None:
|
|
417
|
+
"""Add a model response to the consensus."""
|
|
418
|
+
self.responses.append(response)
|
|
419
|
+
|
|
420
|
+
def successful_responses(self) -> list[ModelResponse]:
|
|
421
|
+
"""Get only successful responses."""
|
|
422
|
+
return [r for r in self.responses if r.success]
|
|
423
|
+
|
|
424
|
+
def failed_responses(self) -> list[ModelResponse]:
|
|
425
|
+
"""Get failed responses."""
|
|
426
|
+
return [r for r in self.responses if not r.success]
|
|
427
|
+
|
|
428
|
+
def is_quorum_met(self) -> bool:
|
|
429
|
+
"""Check if minimum response requirement is met."""
|
|
430
|
+
return len(self.successful_responses()) >= self.config.min_responses
|
|
431
|
+
|
|
432
|
+
def mark_completed(self, synthesis: Optional[str] = None) -> None:
|
|
433
|
+
"""Mark the consensus as completed."""
|
|
434
|
+
self.completed = True
|
|
435
|
+
self.completed_at = datetime.utcnow()
|
|
436
|
+
if synthesis:
|
|
437
|
+
self.synthesis = synthesis
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Research workflow implementations.
|
|
2
|
+
|
|
3
|
+
This package provides the workflow classes for multi-model orchestration:
|
|
4
|
+
- ChatWorkflow: Single-model conversation with thread persistence
|
|
5
|
+
- ConsensusWorkflow: Multi-model parallel consultation with synthesis
|
|
6
|
+
- ThinkDeepWorkflow: Hypothesis-driven systematic investigation
|
|
7
|
+
- IdeateWorkflow: Creative brainstorming with idea clustering
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from foundry_mcp.core.research.workflows.base import ResearchWorkflowBase
|
|
11
|
+
from foundry_mcp.core.research.workflows.chat import ChatWorkflow
|
|
12
|
+
from foundry_mcp.core.research.workflows.consensus import ConsensusWorkflow
|
|
13
|
+
from foundry_mcp.core.research.workflows.thinkdeep import ThinkDeepWorkflow
|
|
14
|
+
from foundry_mcp.core.research.workflows.ideate import IdeateWorkflow
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"ResearchWorkflowBase",
|
|
18
|
+
"ChatWorkflow",
|
|
19
|
+
"ConsensusWorkflow",
|
|
20
|
+
"ThinkDeepWorkflow",
|
|
21
|
+
"IdeateWorkflow",
|
|
22
|
+
]
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
"""Base class for research workflows.
|
|
2
|
+
|
|
3
|
+
Provides common infrastructure for provider integration, error handling,
|
|
4
|
+
and response normalization across all research workflow types.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any, Optional
|
|
11
|
+
|
|
12
|
+
from foundry_mcp.config import ResearchConfig
|
|
13
|
+
from foundry_mcp.core.providers import (
|
|
14
|
+
ProviderContext,
|
|
15
|
+
ProviderHooks,
|
|
16
|
+
ProviderRequest,
|
|
17
|
+
ProviderResult,
|
|
18
|
+
ProviderStatus,
|
|
19
|
+
)
|
|
20
|
+
from foundry_mcp.core.providers.registry import available_providers, resolve_provider
|
|
21
|
+
from foundry_mcp.core.research.memory import ResearchMemory
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class WorkflowResult:
|
|
28
|
+
"""Result of a workflow execution.
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
success: Whether the workflow completed successfully
|
|
32
|
+
content: Main response content
|
|
33
|
+
provider_id: Provider that generated the response
|
|
34
|
+
model_used: Model that generated the response
|
|
35
|
+
tokens_used: Total tokens consumed
|
|
36
|
+
duration_ms: Execution duration in milliseconds
|
|
37
|
+
metadata: Additional workflow-specific data
|
|
38
|
+
error: Error message if success is False
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
success: bool
|
|
42
|
+
content: str
|
|
43
|
+
provider_id: Optional[str] = None
|
|
44
|
+
model_used: Optional[str] = None
|
|
45
|
+
tokens_used: Optional[int] = None
|
|
46
|
+
duration_ms: Optional[float] = None
|
|
47
|
+
metadata: dict[str, Any] = None
|
|
48
|
+
error: Optional[str] = None
|
|
49
|
+
|
|
50
|
+
def __post_init__(self) -> None:
|
|
51
|
+
if self.metadata is None:
|
|
52
|
+
self.metadata = {}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ResearchWorkflowBase(ABC):
|
|
56
|
+
"""Base class for all research workflows.
|
|
57
|
+
|
|
58
|
+
Provides common functionality for provider resolution, request execution,
|
|
59
|
+
and memory management.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(
|
|
63
|
+
self,
|
|
64
|
+
config: ResearchConfig,
|
|
65
|
+
memory: Optional[ResearchMemory] = None,
|
|
66
|
+
) -> None:
|
|
67
|
+
"""Initialize workflow with configuration and memory.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
config: Research configuration
|
|
71
|
+
memory: Optional memory instance (creates default if not provided)
|
|
72
|
+
"""
|
|
73
|
+
self.config = config
|
|
74
|
+
self.memory = memory or ResearchMemory(
|
|
75
|
+
base_path=config.get_storage_path(),
|
|
76
|
+
ttl_hours=config.ttl_hours,
|
|
77
|
+
)
|
|
78
|
+
self._provider_cache: dict[str, ProviderContext] = {}
|
|
79
|
+
|
|
80
|
+
def _resolve_provider(
|
|
81
|
+
self,
|
|
82
|
+
provider_id: Optional[str] = None,
|
|
83
|
+
hooks: Optional[ProviderHooks] = None,
|
|
84
|
+
) -> Optional[ProviderContext]:
|
|
85
|
+
"""Resolve and cache a provider instance.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
provider_id: Provider ID to resolve (uses config default if None)
|
|
89
|
+
hooks: Optional provider hooks
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
ProviderContext instance or None if unavailable
|
|
93
|
+
"""
|
|
94
|
+
provider_id = provider_id or self.config.default_provider
|
|
95
|
+
|
|
96
|
+
# Check cache first
|
|
97
|
+
if provider_id in self._provider_cache:
|
|
98
|
+
return self._provider_cache[provider_id]
|
|
99
|
+
|
|
100
|
+
# Check availability
|
|
101
|
+
available = available_providers()
|
|
102
|
+
if provider_id not in available:
|
|
103
|
+
logger.warning("Provider %s not available. Available: %s", provider_id, available)
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
provider = resolve_provider(provider_id, hooks=hooks or ProviderHooks())
|
|
108
|
+
self._provider_cache[provider_id] = provider
|
|
109
|
+
return provider
|
|
110
|
+
except Exception as exc:
|
|
111
|
+
logger.error("Failed to resolve provider %s: %s", provider_id, exc)
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
def _execute_provider(
|
|
115
|
+
self,
|
|
116
|
+
prompt: str,
|
|
117
|
+
provider_id: Optional[str] = None,
|
|
118
|
+
system_prompt: Optional[str] = None,
|
|
119
|
+
model: Optional[str] = None,
|
|
120
|
+
timeout: Optional[float] = None,
|
|
121
|
+
temperature: Optional[float] = None,
|
|
122
|
+
max_tokens: Optional[int] = None,
|
|
123
|
+
hooks: Optional[ProviderHooks] = None,
|
|
124
|
+
) -> WorkflowResult:
|
|
125
|
+
"""Execute a single provider request.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
prompt: User prompt
|
|
129
|
+
provider_id: Provider to use (uses config default if None)
|
|
130
|
+
system_prompt: Optional system prompt
|
|
131
|
+
model: Optional model override
|
|
132
|
+
timeout: Optional timeout in seconds
|
|
133
|
+
temperature: Optional temperature setting
|
|
134
|
+
max_tokens: Optional max tokens
|
|
135
|
+
hooks: Optional provider hooks
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
WorkflowResult with response or error
|
|
139
|
+
"""
|
|
140
|
+
provider = self._resolve_provider(provider_id, hooks)
|
|
141
|
+
if provider is None:
|
|
142
|
+
return WorkflowResult(
|
|
143
|
+
success=False,
|
|
144
|
+
content="",
|
|
145
|
+
error=f"Provider '{provider_id or self.config.default_provider}' is not available",
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
request = ProviderRequest(
|
|
149
|
+
prompt=prompt,
|
|
150
|
+
system_prompt=system_prompt,
|
|
151
|
+
model=model,
|
|
152
|
+
timeout=timeout or 30.0,
|
|
153
|
+
temperature=temperature,
|
|
154
|
+
max_tokens=max_tokens,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
result: ProviderResult = provider.generate(request)
|
|
159
|
+
|
|
160
|
+
if result.status != ProviderStatus.SUCCESS:
|
|
161
|
+
return WorkflowResult(
|
|
162
|
+
success=False,
|
|
163
|
+
content=result.content or "",
|
|
164
|
+
provider_id=result.provider_id,
|
|
165
|
+
model_used=result.model_used,
|
|
166
|
+
error=f"Provider returned status: {result.status.value}",
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
return WorkflowResult(
|
|
170
|
+
success=True,
|
|
171
|
+
content=result.content,
|
|
172
|
+
provider_id=result.provider_id,
|
|
173
|
+
model_used=result.model_used,
|
|
174
|
+
tokens_used=result.tokens.total_tokens if result.tokens else None,
|
|
175
|
+
duration_ms=result.duration_ms,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
except Exception as exc:
|
|
179
|
+
logger.error("Provider execution failed: %s", exc)
|
|
180
|
+
return WorkflowResult(
|
|
181
|
+
success=False,
|
|
182
|
+
content="",
|
|
183
|
+
provider_id=provider_id,
|
|
184
|
+
error=str(exc),
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def get_available_providers(self) -> list[str]:
|
|
188
|
+
"""Get list of available provider IDs.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
List of available provider identifiers
|
|
192
|
+
"""
|
|
193
|
+
return available_providers()
|
|
194
|
+
|
|
195
|
+
@abstractmethod
|
|
196
|
+
def execute(self, **kwargs: Any) -> WorkflowResult:
|
|
197
|
+
"""Execute the workflow.
|
|
198
|
+
|
|
199
|
+
Subclasses must implement this method with their specific logic.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
WorkflowResult with response or error
|
|
203
|
+
"""
|
|
204
|
+
...
|