foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/commands/plan.py +10 -3
  3. foundry_mcp/cli/commands/review.py +19 -4
  4. foundry_mcp/cli/commands/specs.py +38 -208
  5. foundry_mcp/cli/output.py +3 -3
  6. foundry_mcp/config.py +235 -5
  7. foundry_mcp/core/ai_consultation.py +146 -9
  8. foundry_mcp/core/discovery.py +6 -6
  9. foundry_mcp/core/error_store.py +2 -2
  10. foundry_mcp/core/intake.py +933 -0
  11. foundry_mcp/core/llm_config.py +20 -2
  12. foundry_mcp/core/metrics_store.py +2 -2
  13. foundry_mcp/core/progress.py +70 -0
  14. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  15. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  16. foundry_mcp/core/prompts/plan_review.py +5 -1
  17. foundry_mcp/core/providers/claude.py +6 -47
  18. foundry_mcp/core/providers/codex.py +6 -57
  19. foundry_mcp/core/providers/cursor_agent.py +3 -44
  20. foundry_mcp/core/providers/gemini.py +6 -57
  21. foundry_mcp/core/providers/opencode.py +35 -5
  22. foundry_mcp/core/research/__init__.py +68 -0
  23. foundry_mcp/core/research/memory.py +425 -0
  24. foundry_mcp/core/research/models.py +437 -0
  25. foundry_mcp/core/research/workflows/__init__.py +22 -0
  26. foundry_mcp/core/research/workflows/base.py +204 -0
  27. foundry_mcp/core/research/workflows/chat.py +271 -0
  28. foundry_mcp/core/research/workflows/consensus.py +396 -0
  29. foundry_mcp/core/research/workflows/ideate.py +682 -0
  30. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  31. foundry_mcp/core/responses.py +450 -0
  32. foundry_mcp/core/spec.py +2438 -236
  33. foundry_mcp/core/task.py +1064 -19
  34. foundry_mcp/core/testing.py +512 -123
  35. foundry_mcp/core/validation.py +313 -42
  36. foundry_mcp/dashboard/components/charts.py +0 -57
  37. foundry_mcp/dashboard/launcher.py +11 -0
  38. foundry_mcp/dashboard/views/metrics.py +25 -35
  39. foundry_mcp/dashboard/views/overview.py +1 -65
  40. foundry_mcp/resources/specs.py +25 -25
  41. foundry_mcp/schemas/intake-schema.json +89 -0
  42. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  43. foundry_mcp/server.py +38 -0
  44. foundry_mcp/tools/unified/__init__.py +4 -2
  45. foundry_mcp/tools/unified/authoring.py +2423 -267
  46. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  47. foundry_mcp/tools/unified/environment.py +235 -6
  48. foundry_mcp/tools/unified/error.py +18 -1
  49. foundry_mcp/tools/unified/lifecycle.py +8 -0
  50. foundry_mcp/tools/unified/plan.py +113 -1
  51. foundry_mcp/tools/unified/research.py +658 -0
  52. foundry_mcp/tools/unified/review.py +370 -16
  53. foundry_mcp/tools/unified/spec.py +367 -0
  54. foundry_mcp/tools/unified/task.py +1163 -48
  55. foundry_mcp/tools/unified/test.py +69 -8
  56. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
  57. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
  58. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
  59. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
  60. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,271 @@
1
+ """CHAT workflow for single-model conversation with thread persistence.
2
+
3
+ Provides conversational interaction with context preservation across messages,
4
+ supporting thread creation, continuation, and message history management.
5
+ """
6
+
7
+ import logging
8
+ from typing import Any, Optional
9
+
10
+ from foundry_mcp.config import ResearchConfig
11
+ from foundry_mcp.core.research.memory import ResearchMemory
12
+ from foundry_mcp.core.research.models import (
13
+ ConversationMessage,
14
+ ConversationThread,
15
+ ThreadStatus,
16
+ )
17
+ from foundry_mcp.core.research.workflows.base import ResearchWorkflowBase, WorkflowResult
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class ChatWorkflow(ResearchWorkflowBase):
23
+ """Single-model conversation workflow with thread persistence.
24
+
25
+ Features:
26
+ - Create new conversation threads
27
+ - Continue existing threads with full context
28
+ - Token-aware context window management
29
+ - Message persistence across invocations
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ config: ResearchConfig,
35
+ memory: Optional[ResearchMemory] = None,
36
+ ) -> None:
37
+ """Initialize chat workflow.
38
+
39
+ Args:
40
+ config: Research configuration
41
+ memory: Optional memory instance
42
+ """
43
+ super().__init__(config, memory)
44
+
45
+ def execute(
46
+ self,
47
+ prompt: str,
48
+ thread_id: Optional[str] = None,
49
+ system_prompt: Optional[str] = None,
50
+ provider_id: Optional[str] = None,
51
+ model: Optional[str] = None,
52
+ temperature: Optional[float] = None,
53
+ max_tokens: Optional[int] = None,
54
+ title: Optional[str] = None,
55
+ **kwargs: Any,
56
+ ) -> WorkflowResult:
57
+ """Execute a chat turn.
58
+
59
+ Creates a new thread or continues an existing one, sends the prompt
60
+ to the provider, and persists the conversation.
61
+
62
+ Args:
63
+ prompt: User message
64
+ thread_id: Existing thread to continue (creates new if None)
65
+ system_prompt: System prompt (only used for new threads)
66
+ provider_id: Provider to use (uses config default if None)
67
+ model: Optional model override
68
+ temperature: Optional temperature setting
69
+ max_tokens: Optional max tokens
70
+ title: Optional title for new threads
71
+
72
+ Returns:
73
+ WorkflowResult with assistant response and thread metadata
74
+ """
75
+ # Get or create thread
76
+ thread = self._get_or_create_thread(
77
+ thread_id=thread_id,
78
+ system_prompt=system_prompt,
79
+ provider_id=provider_id,
80
+ title=title,
81
+ )
82
+
83
+ # Add user message
84
+ thread.add_message(role="user", content=prompt)
85
+
86
+ # Build context for provider
87
+ context = self._build_context(thread)
88
+
89
+ # Execute provider
90
+ result = self._execute_provider(
91
+ prompt=context,
92
+ provider_id=thread.provider_id or provider_id,
93
+ system_prompt=thread.system_prompt,
94
+ model=model,
95
+ temperature=temperature,
96
+ max_tokens=max_tokens,
97
+ )
98
+
99
+ if result.success:
100
+ # Add assistant message
101
+ thread.add_message(
102
+ role="assistant",
103
+ content=result.content,
104
+ provider_id=result.provider_id,
105
+ model_used=result.model_used,
106
+ tokens_used=result.tokens_used,
107
+ )
108
+
109
+ # Persist thread
110
+ self.memory.save_thread(thread)
111
+
112
+ # Add thread info to result metadata
113
+ result.metadata["thread_id"] = thread.id
114
+ result.metadata["message_count"] = len(thread.messages)
115
+ result.metadata["thread_title"] = thread.title
116
+
117
+ return result
118
+
119
+ def _get_or_create_thread(
120
+ self,
121
+ thread_id: Optional[str] = None,
122
+ system_prompt: Optional[str] = None,
123
+ provider_id: Optional[str] = None,
124
+ title: Optional[str] = None,
125
+ ) -> ConversationThread:
126
+ """Get existing thread or create a new one.
127
+
128
+ Args:
129
+ thread_id: Existing thread ID to load
130
+ system_prompt: System prompt for new threads
131
+ provider_id: Provider ID for new threads
132
+ title: Title for new threads
133
+
134
+ Returns:
135
+ ConversationThread instance
136
+ """
137
+ if thread_id:
138
+ thread = self.memory.load_thread(thread_id)
139
+ if thread:
140
+ return thread
141
+ logger.warning("Thread %s not found, creating new thread", thread_id)
142
+
143
+ # Create new thread
144
+ return ConversationThread(
145
+ title=title,
146
+ system_prompt=system_prompt,
147
+ provider_id=provider_id or self.config.default_provider,
148
+ )
149
+
150
+ def _build_context(self, thread: ConversationThread) -> str:
151
+ """Build conversation context for the provider.
152
+
153
+ Formats message history with token-aware truncation to fit
154
+ within context window limits.
155
+
156
+ Args:
157
+ thread: Conversation thread
158
+
159
+ Returns:
160
+ Formatted context string
161
+ """
162
+ # Get recent messages (respecting max_messages config)
163
+ messages = thread.get_context_messages(
164
+ max_messages=self.config.max_messages_per_thread
165
+ )
166
+
167
+ # Format messages for context
168
+ parts = []
169
+ for msg in messages:
170
+ role_label = "User" if msg.role == "user" else "Assistant"
171
+ parts.append(f"{role_label}: {msg.content}")
172
+
173
+ return "\n\n".join(parts)
174
+
175
+ def list_threads(
176
+ self,
177
+ status: Optional[ThreadStatus] = None,
178
+ limit: Optional[int] = 50,
179
+ ) -> list[dict[str, Any]]:
180
+ """List conversation threads.
181
+
182
+ Args:
183
+ status: Filter by thread status
184
+ limit: Maximum threads to return
185
+
186
+ Returns:
187
+ List of thread summaries
188
+ """
189
+ threads = self.memory.list_threads(status=status, limit=limit)
190
+
191
+ return [
192
+ {
193
+ "id": t.id,
194
+ "title": t.title,
195
+ "status": t.status.value,
196
+ "message_count": len(t.messages),
197
+ "created_at": t.created_at.isoformat(),
198
+ "updated_at": t.updated_at.isoformat(),
199
+ "provider_id": t.provider_id,
200
+ }
201
+ for t in threads
202
+ ]
203
+
204
+ def get_thread(self, thread_id: str) -> Optional[dict[str, Any]]:
205
+ """Get full thread details including messages.
206
+
207
+ Args:
208
+ thread_id: Thread identifier
209
+
210
+ Returns:
211
+ Thread data with messages or None if not found
212
+ """
213
+ thread = self.memory.load_thread(thread_id)
214
+ if not thread:
215
+ return None
216
+
217
+ return {
218
+ "id": thread.id,
219
+ "title": thread.title,
220
+ "status": thread.status.value,
221
+ "system_prompt": thread.system_prompt,
222
+ "provider_id": thread.provider_id,
223
+ "created_at": thread.created_at.isoformat(),
224
+ "updated_at": thread.updated_at.isoformat(),
225
+ "messages": [
226
+ {
227
+ "id": m.id,
228
+ "role": m.role,
229
+ "content": m.content,
230
+ "timestamp": m.timestamp.isoformat(),
231
+ "provider_id": m.provider_id,
232
+ "model_used": m.model_used,
233
+ "tokens_used": m.tokens_used,
234
+ }
235
+ for m in thread.messages
236
+ ],
237
+ "metadata": thread.metadata,
238
+ }
239
+
240
+ def delete_thread(self, thread_id: str) -> bool:
241
+ """Delete a conversation thread.
242
+
243
+ Args:
244
+ thread_id: Thread identifier
245
+
246
+ Returns:
247
+ True if deleted, False if not found
248
+ """
249
+ return self.memory.delete_thread(thread_id)
250
+
251
+ def update_thread_status(
252
+ self,
253
+ thread_id: str,
254
+ status: ThreadStatus,
255
+ ) -> bool:
256
+ """Update thread status.
257
+
258
+ Args:
259
+ thread_id: Thread identifier
260
+ status: New status
261
+
262
+ Returns:
263
+ True if updated, False if not found
264
+ """
265
+ thread = self.memory.load_thread(thread_id)
266
+ if not thread:
267
+ return False
268
+
269
+ thread.status = status
270
+ self.memory.save_thread(thread)
271
+ return True
@@ -0,0 +1,396 @@
1
+ """CONSENSUS workflow for multi-model parallel consultation with synthesis.
2
+
3
+ Provides parallel execution across multiple providers with configurable
4
+ synthesis strategies for combining responses.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ from typing import Any, Optional
10
+
11
+ from foundry_mcp.config import ResearchConfig
12
+ from foundry_mcp.core.providers import ProviderHooks, ProviderRequest, ProviderStatus
13
+ from foundry_mcp.core.providers.registry import available_providers, resolve_provider
14
+ from foundry_mcp.core.research.memory import ResearchMemory
15
+ from foundry_mcp.core.research.models import (
16
+ ConsensusConfig,
17
+ ConsensusState,
18
+ ConsensusStrategy,
19
+ ModelResponse,
20
+ )
21
+ from foundry_mcp.core.research.workflows.base import ResearchWorkflowBase, WorkflowResult
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class ConsensusWorkflow(ResearchWorkflowBase):
27
+ """Multi-model consensus workflow with synthesis strategies.
28
+
29
+ Features:
30
+ - Parallel execution across multiple providers
31
+ - Concurrency limiting with semaphore
32
+ - Multiple synthesis strategies (all_responses, synthesize, majority, first_valid)
33
+ - Partial failure handling (continue on some provider errors)
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ config: ResearchConfig,
39
+ memory: Optional[ResearchMemory] = None,
40
+ ) -> None:
41
+ """Initialize consensus workflow.
42
+
43
+ Args:
44
+ config: Research configuration
45
+ memory: Optional memory instance
46
+ """
47
+ super().__init__(config, memory)
48
+
49
+ def execute(
50
+ self,
51
+ prompt: str,
52
+ providers: Optional[list[str]] = None,
53
+ strategy: ConsensusStrategy = ConsensusStrategy.SYNTHESIZE,
54
+ synthesis_provider: Optional[str] = None,
55
+ system_prompt: Optional[str] = None,
56
+ timeout_per_provider: float = 30.0,
57
+ max_concurrent: int = 3,
58
+ require_all: bool = False,
59
+ min_responses: int = 1,
60
+ **kwargs: Any,
61
+ ) -> WorkflowResult:
62
+ """Execute consensus across multiple providers.
63
+
64
+ Args:
65
+ prompt: User prompt to send to all providers
66
+ providers: List of provider IDs (uses config default if None)
67
+ strategy: Synthesis strategy for combining responses
68
+ synthesis_provider: Provider for synthesis (if strategy=synthesize)
69
+ system_prompt: Optional system prompt
70
+ timeout_per_provider: Timeout per provider in seconds
71
+ max_concurrent: Maximum concurrent provider calls
72
+ require_all: Require all providers to succeed
73
+ min_responses: Minimum responses needed for success
74
+
75
+ Returns:
76
+ WorkflowResult with synthesized or combined response
77
+ """
78
+ # Resolve providers
79
+ provider_ids = providers or self.config.consensus_providers
80
+ available = available_providers()
81
+ valid_providers = [p for p in provider_ids if p in available]
82
+
83
+ if not valid_providers:
84
+ return WorkflowResult(
85
+ success=False,
86
+ content="",
87
+ error=f"No valid providers available. Requested: {provider_ids}, Available: {available}",
88
+ )
89
+
90
+ # Create consensus config and state
91
+ consensus_config = ConsensusConfig(
92
+ providers=valid_providers,
93
+ strategy=strategy,
94
+ synthesis_provider=synthesis_provider or self.config.default_provider,
95
+ timeout_per_provider=timeout_per_provider,
96
+ max_concurrent=max_concurrent,
97
+ require_all=require_all,
98
+ min_responses=min_responses,
99
+ )
100
+
101
+ state = ConsensusState(
102
+ prompt=prompt,
103
+ config=consensus_config,
104
+ system_prompt=system_prompt,
105
+ )
106
+
107
+ # Execute parallel requests
108
+ try:
109
+ responses = asyncio.run(
110
+ self._execute_parallel(
111
+ prompt=prompt,
112
+ providers=valid_providers,
113
+ system_prompt=system_prompt,
114
+ timeout=timeout_per_provider,
115
+ max_concurrent=max_concurrent,
116
+ )
117
+ )
118
+ except Exception as exc:
119
+ logger.error("Parallel execution failed: %s", exc)
120
+ return WorkflowResult(
121
+ success=False,
122
+ content="",
123
+ error=f"Parallel execution failed: {exc}",
124
+ )
125
+
126
+ # Add responses to state
127
+ for response in responses:
128
+ state.add_response(response)
129
+
130
+ # Check if we have enough responses
131
+ successful = state.successful_responses()
132
+ if len(successful) < min_responses:
133
+ failed_info = [
134
+ f"{r.provider_id}: {r.error_message}"
135
+ for r in state.failed_responses()
136
+ ]
137
+ return WorkflowResult(
138
+ success=False,
139
+ content="",
140
+ error=f"Insufficient responses ({len(successful)}/{min_responses}). Failures: {failed_info}",
141
+ metadata={
142
+ "successful_count": len(successful),
143
+ "failed_count": len(state.failed_responses()),
144
+ "responses": [r.model_dump() for r in responses],
145
+ },
146
+ )
147
+
148
+ if require_all and len(state.failed_responses()) > 0:
149
+ return WorkflowResult(
150
+ success=False,
151
+ content="",
152
+ error=f"Not all providers succeeded (require_all=True). Failed: {[r.provider_id for r in state.failed_responses()]}",
153
+ )
154
+
155
+ # Apply synthesis strategy
156
+ result = self._apply_strategy(state)
157
+
158
+ # Persist state
159
+ state.mark_completed(synthesis=result.content if result.success else None)
160
+ self.memory.save_consensus(state)
161
+
162
+ # Add consensus metadata
163
+ result.metadata["consensus_id"] = state.id
164
+ result.metadata["providers_consulted"] = [r.provider_id for r in successful]
165
+ result.metadata["strategy"] = strategy.value
166
+ result.metadata["response_count"] = len(successful)
167
+
168
+ return result
169
+
170
+ async def _execute_parallel(
171
+ self,
172
+ prompt: str,
173
+ providers: list[str],
174
+ system_prompt: Optional[str],
175
+ timeout: float,
176
+ max_concurrent: int,
177
+ ) -> list[ModelResponse]:
178
+ """Execute requests to multiple providers in parallel.
179
+
180
+ Args:
181
+ prompt: User prompt
182
+ providers: Provider IDs to query
183
+ system_prompt: Optional system prompt
184
+ timeout: Timeout per provider
185
+ max_concurrent: Max concurrent requests
186
+
187
+ Returns:
188
+ List of ModelResponse objects
189
+ """
190
+ semaphore = asyncio.Semaphore(max_concurrent)
191
+
192
+ async def query_provider(provider_id: str) -> ModelResponse:
193
+ async with semaphore:
194
+ return await self._query_single_provider(
195
+ provider_id=provider_id,
196
+ prompt=prompt,
197
+ system_prompt=system_prompt,
198
+ timeout=timeout,
199
+ )
200
+
201
+ tasks = [query_provider(pid) for pid in providers]
202
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
203
+
204
+ # Convert exceptions to failed responses
205
+ result = []
206
+ for i, response in enumerate(responses):
207
+ if isinstance(response, Exception):
208
+ result.append(
209
+ ModelResponse(
210
+ provider_id=providers[i],
211
+ content="",
212
+ success=False,
213
+ error_message=str(response),
214
+ )
215
+ )
216
+ else:
217
+ result.append(response)
218
+
219
+ return result
220
+
221
+ async def _query_single_provider(
222
+ self,
223
+ provider_id: str,
224
+ prompt: str,
225
+ system_prompt: Optional[str],
226
+ timeout: float,
227
+ ) -> ModelResponse:
228
+ """Query a single provider asynchronously.
229
+
230
+ Args:
231
+ provider_id: Provider to query
232
+ prompt: User prompt
233
+ system_prompt: Optional system prompt
234
+ timeout: Request timeout
235
+
236
+ Returns:
237
+ ModelResponse with result or error
238
+ """
239
+ import time
240
+
241
+ start_time = time.perf_counter()
242
+
243
+ try:
244
+ provider = resolve_provider(provider_id, hooks=ProviderHooks())
245
+ request = ProviderRequest(
246
+ prompt=prompt,
247
+ system_prompt=system_prompt,
248
+ timeout=timeout,
249
+ )
250
+
251
+ # Run synchronous generate in thread pool
252
+ loop = asyncio.get_event_loop()
253
+ result = await asyncio.wait_for(
254
+ loop.run_in_executor(None, provider.generate, request),
255
+ timeout=timeout,
256
+ )
257
+
258
+ duration_ms = (time.perf_counter() - start_time) * 1000
259
+
260
+ if result.status != ProviderStatus.SUCCESS:
261
+ return ModelResponse(
262
+ provider_id=provider_id,
263
+ model_used=result.model_used,
264
+ content=result.content or "",
265
+ success=False,
266
+ error_message=f"Provider returned status: {result.status.value}",
267
+ duration_ms=duration_ms,
268
+ )
269
+
270
+ return ModelResponse(
271
+ provider_id=provider_id,
272
+ model_used=result.model_used,
273
+ content=result.content,
274
+ success=True,
275
+ tokens_used=result.tokens.total_tokens if result.tokens else None,
276
+ duration_ms=duration_ms,
277
+ )
278
+
279
+ except asyncio.TimeoutError:
280
+ return ModelResponse(
281
+ provider_id=provider_id,
282
+ content="",
283
+ success=False,
284
+ error_message=f"Timeout after {timeout}s",
285
+ duration_ms=timeout * 1000,
286
+ )
287
+ except Exception as exc:
288
+ duration_ms = (time.perf_counter() - start_time) * 1000
289
+ return ModelResponse(
290
+ provider_id=provider_id,
291
+ content="",
292
+ success=False,
293
+ error_message=str(exc),
294
+ duration_ms=duration_ms,
295
+ )
296
+
297
+ def _apply_strategy(self, state: ConsensusState) -> WorkflowResult:
298
+ """Apply synthesis strategy to responses.
299
+
300
+ Args:
301
+ state: ConsensusState with collected responses
302
+
303
+ Returns:
304
+ WorkflowResult with synthesized content
305
+ """
306
+ successful = state.successful_responses()
307
+ strategy = state.config.strategy
308
+
309
+ if strategy == ConsensusStrategy.ALL_RESPONSES:
310
+ # Return all responses without synthesis
311
+ content_parts = []
312
+ for resp in successful:
313
+ content_parts.append(f"### {resp.provider_id}\n\n{resp.content}")
314
+ return WorkflowResult(
315
+ success=True,
316
+ content="\n\n---\n\n".join(content_parts),
317
+ metadata={"strategy": "all_responses"},
318
+ )
319
+
320
+ elif strategy == ConsensusStrategy.FIRST_VALID:
321
+ # Return first successful response
322
+ first = successful[0]
323
+ return WorkflowResult(
324
+ success=True,
325
+ content=first.content,
326
+ provider_id=first.provider_id,
327
+ model_used=first.model_used,
328
+ tokens_used=first.tokens_used,
329
+ metadata={"strategy": "first_valid"},
330
+ )
331
+
332
+ elif strategy == ConsensusStrategy.MAJORITY:
333
+ # For factual questions, try to find majority agreement
334
+ # Simple heuristic: if responses are similar, use first; otherwise synthesize
335
+ # A more sophisticated implementation would compare semantic similarity
336
+ return self._synthesize_responses(state, successful)
337
+
338
+ elif strategy == ConsensusStrategy.SYNTHESIZE:
339
+ # Use a model to synthesize all responses
340
+ return self._synthesize_responses(state, successful)
341
+
342
+ else:
343
+ # Default to first valid
344
+ first = successful[0]
345
+ return WorkflowResult(
346
+ success=True,
347
+ content=first.content,
348
+ provider_id=first.provider_id,
349
+ )
350
+
351
+ def _synthesize_responses(
352
+ self,
353
+ state: ConsensusState,
354
+ responses: list[ModelResponse],
355
+ ) -> WorkflowResult:
356
+ """Synthesize multiple responses using a model.
357
+
358
+ Args:
359
+ state: ConsensusState with original prompt
360
+ responses: Successful responses to synthesize
361
+
362
+ Returns:
363
+ WorkflowResult with synthesized content
364
+ """
365
+ # Build synthesis prompt
366
+ response_text = "\n\n---\n\n".join(
367
+ f"Response from {r.provider_id}:\n{r.content}"
368
+ for r in responses
369
+ )
370
+
371
+ synthesis_prompt = f"""You are synthesizing multiple AI responses to the same question.
372
+
373
+ Original question: {state.prompt}
374
+
375
+ {response_text}
376
+
377
+ Please synthesize these responses into a single, comprehensive answer that:
378
+ 1. Captures the key points from all responses
379
+ 2. Resolves any contradictions by noting different perspectives
380
+ 3. Provides a clear, well-structured response
381
+
382
+ Synthesized response:"""
383
+
384
+ # Execute synthesis
385
+ result = self._execute_provider(
386
+ prompt=synthesis_prompt,
387
+ provider_id=state.config.synthesis_provider,
388
+ system_prompt="You are a helpful assistant that synthesizes multiple AI responses into a coherent, comprehensive answer.",
389
+ )
390
+
391
+ if result.success:
392
+ result.metadata["strategy"] = "synthesize"
393
+ result.metadata["synthesis_provider"] = state.config.synthesis_provider
394
+ result.metadata["source_providers"] = [r.provider_id for r in responses]
395
+
396
+ return result