foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,539 @@
1
+ """CONSENSUS workflow for multi-model parallel consultation with synthesis.
2
+
3
+ Provides parallel execution across multiple providers with configurable
4
+ synthesis strategies for combining responses.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import time
10
+ from concurrent.futures import ThreadPoolExecutor, as_completed
11
+ from typing import Any, Optional
12
+
13
+ from foundry_mcp.config import ResearchConfig
14
+ from foundry_mcp.core.llm_config import ProviderSpec
15
+ from foundry_mcp.core.providers import ProviderHooks, ProviderRequest, ProviderStatus
16
+ from foundry_mcp.core.providers.registry import available_providers, resolve_provider
17
+ from foundry_mcp.core.research.memory import ResearchMemory
18
+ from foundry_mcp.core.research.models import (
19
+ ConsensusConfig,
20
+ ConsensusState,
21
+ ConsensusStrategy,
22
+ ModelResponse,
23
+ )
24
+ from foundry_mcp.core.research.workflows.base import ResearchWorkflowBase, WorkflowResult
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class ConsensusWorkflow(ResearchWorkflowBase):
30
+ """Multi-model consensus workflow with synthesis strategies.
31
+
32
+ Features:
33
+ - Parallel execution across multiple providers
34
+ - Concurrency limiting with semaphore
35
+ - Multiple synthesis strategies (all_responses, synthesize, majority, first_valid)
36
+ - Partial failure handling (continue on some provider errors)
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ config: ResearchConfig,
42
+ memory: Optional[ResearchMemory] = None,
43
+ ) -> None:
44
+ """Initialize consensus workflow.
45
+
46
+ Args:
47
+ config: Research configuration
48
+ memory: Optional memory instance
49
+ """
50
+ super().__init__(config, memory)
51
+
52
+ def execute(
53
+ self,
54
+ prompt: str,
55
+ providers: Optional[list[str]] = None,
56
+ strategy: ConsensusStrategy = ConsensusStrategy.SYNTHESIZE,
57
+ synthesis_provider: Optional[str] = None,
58
+ system_prompt: Optional[str] = None,
59
+ timeout_per_provider: float = 30.0,
60
+ max_concurrent: int = 3,
61
+ require_all: bool = False,
62
+ min_responses: int = 1,
63
+ **kwargs: Any,
64
+ ) -> WorkflowResult:
65
+ """Execute consensus across multiple providers.
66
+
67
+ Args:
68
+ prompt: User prompt to send to all providers
69
+ providers: List of provider IDs (uses config default if None)
70
+ strategy: Synthesis strategy for combining responses
71
+ synthesis_provider: Provider for synthesis (if strategy=synthesize)
72
+ system_prompt: Optional system prompt
73
+ timeout_per_provider: Timeout per provider in seconds
74
+ max_concurrent: Maximum concurrent provider calls
75
+ require_all: Require all providers to succeed
76
+ min_responses: Minimum responses needed for success
77
+
78
+ Returns:
79
+ WorkflowResult with synthesized or combined response
80
+ """
81
+ # Resolve providers - parse specs and check availability
82
+ provider_specs = providers or self.config.consensus_providers
83
+ available = available_providers()
84
+
85
+ # Parse each provider spec and filter by availability
86
+ valid_specs: list[ProviderSpec] = []
87
+ for spec_str in provider_specs:
88
+ try:
89
+ spec = ProviderSpec.parse_flexible(spec_str)
90
+ if spec.provider in available:
91
+ valid_specs.append(spec)
92
+ else:
93
+ logger.warning(
94
+ "Provider %s (from spec '%s') not available",
95
+ spec.provider,
96
+ spec_str,
97
+ )
98
+ except ValueError as exc:
99
+ logger.warning("Invalid provider spec '%s': %s", spec_str, exc)
100
+
101
+ if not valid_specs:
102
+ return WorkflowResult(
103
+ success=False,
104
+ content="",
105
+ error=f"No valid providers available. Requested: {provider_specs}, Available: {available}",
106
+ )
107
+
108
+ # Use full spec strings for tracking, but we'll parse again when resolving
109
+ valid_providers = [spec.raw or f"{spec.provider}:{spec.model}" if spec.model else spec.provider for spec in valid_specs]
110
+
111
+ # Create consensus config and state
112
+ consensus_config = ConsensusConfig(
113
+ providers=valid_providers,
114
+ strategy=strategy,
115
+ synthesis_provider=synthesis_provider or self.config.default_provider,
116
+ timeout_per_provider=timeout_per_provider,
117
+ max_concurrent=max_concurrent,
118
+ require_all=require_all,
119
+ min_responses=min_responses,
120
+ )
121
+
122
+ state = ConsensusState(
123
+ prompt=prompt,
124
+ config=consensus_config,
125
+ system_prompt=system_prompt,
126
+ )
127
+
128
+ # Execute parallel requests using ThreadPoolExecutor
129
+ # This avoids asyncio.run() conflicts with MCP server's event loop
130
+ try:
131
+ responses = self._execute_parallel_sync(
132
+ prompt=prompt,
133
+ providers=valid_providers,
134
+ system_prompt=system_prompt,
135
+ timeout=timeout_per_provider,
136
+ max_concurrent=max_concurrent,
137
+ )
138
+ except Exception as exc:
139
+ logger.error("Parallel execution failed: %s", exc)
140
+ return WorkflowResult(
141
+ success=False,
142
+ content="",
143
+ error=f"Parallel execution failed: {exc}",
144
+ )
145
+
146
+ # Add responses to state
147
+ for response in responses:
148
+ state.add_response(response)
149
+
150
+ # Check if we have enough responses
151
+ successful = state.successful_responses()
152
+ if len(successful) < min_responses:
153
+ failed_info = [
154
+ f"{r.provider_id}: {r.error_message}"
155
+ for r in state.failed_responses()
156
+ ]
157
+ return WorkflowResult(
158
+ success=False,
159
+ content="",
160
+ error=f"Insufficient responses ({len(successful)}/{min_responses}). Failures: {failed_info}",
161
+ metadata={
162
+ "successful_count": len(successful),
163
+ "failed_count": len(state.failed_responses()),
164
+ "responses": [r.model_dump() for r in responses],
165
+ },
166
+ )
167
+
168
+ if require_all and len(state.failed_responses()) > 0:
169
+ return WorkflowResult(
170
+ success=False,
171
+ content="",
172
+ error=f"Not all providers succeeded (require_all=True). Failed: {[r.provider_id for r in state.failed_responses()]}",
173
+ )
174
+
175
+ # Apply synthesis strategy
176
+ result = self._apply_strategy(state)
177
+
178
+ # Persist state
179
+ state.mark_completed(synthesis=result.content if result.success else None)
180
+ self.memory.save_consensus(state)
181
+
182
+ # Add consensus metadata
183
+ result.metadata["consensus_id"] = state.id
184
+ result.metadata["providers_consulted"] = [r.provider_id for r in successful]
185
+ result.metadata["strategy"] = strategy.value
186
+ result.metadata["response_count"] = len(successful)
187
+
188
+ return result
189
+
190
+ def _execute_parallel_sync(
191
+ self,
192
+ prompt: str,
193
+ providers: list[str],
194
+ system_prompt: Optional[str],
195
+ timeout: float,
196
+ max_concurrent: int,
197
+ ) -> list[ModelResponse]:
198
+ """Execute requests to multiple providers in parallel using ThreadPoolExecutor.
199
+
200
+ This approach avoids asyncio.run() conflicts when called from within
201
+ an MCP server's event loop.
202
+
203
+ Args:
204
+ prompt: User prompt
205
+ providers: Provider IDs to query
206
+ system_prompt: Optional system prompt
207
+ timeout: Timeout per provider
208
+ max_concurrent: Max concurrent requests
209
+
210
+ Returns:
211
+ List of ModelResponse objects
212
+ """
213
+ responses: list[ModelResponse] = []
214
+
215
+ with ThreadPoolExecutor(max_workers=max_concurrent) as executor:
216
+ # Submit all provider queries
217
+ future_to_provider = {
218
+ executor.submit(
219
+ self._query_provider_sync,
220
+ provider_id,
221
+ prompt,
222
+ system_prompt,
223
+ timeout,
224
+ ): provider_id
225
+ for provider_id in providers
226
+ }
227
+
228
+ # Collect results as they complete
229
+ for future in as_completed(future_to_provider, timeout=timeout * len(providers)):
230
+ provider_id = future_to_provider[future]
231
+ try:
232
+ response = future.result()
233
+ responses.append(response)
234
+ except Exception as exc:
235
+ responses.append(
236
+ ModelResponse(
237
+ provider_id=provider_id,
238
+ content="",
239
+ success=False,
240
+ error_message=str(exc),
241
+ )
242
+ )
243
+
244
+ return responses
245
+
246
+ def _query_provider_sync(
247
+ self,
248
+ provider_id: str,
249
+ prompt: str,
250
+ system_prompt: Optional[str],
251
+ timeout: float,
252
+ ) -> ModelResponse:
253
+ """Query a single provider synchronously.
254
+
255
+ Args:
256
+ provider_id: Provider ID or full spec (e.g., "[cli]codex:gpt-5.2")
257
+ prompt: User prompt
258
+ system_prompt: Optional system prompt
259
+ timeout: Request timeout
260
+
261
+ Returns:
262
+ ModelResponse with result or error
263
+ """
264
+ start_time = time.perf_counter()
265
+
266
+ try:
267
+ # Parse provider spec to extract base ID and model
268
+ spec = ProviderSpec.parse_flexible(provider_id)
269
+ provider = resolve_provider(spec.provider, hooks=ProviderHooks(), model=spec.model)
270
+ request = ProviderRequest(
271
+ prompt=prompt,
272
+ system_prompt=system_prompt,
273
+ timeout=timeout,
274
+ )
275
+
276
+ result = provider.generate(request)
277
+ duration_ms = (time.perf_counter() - start_time) * 1000
278
+
279
+ if result.status != ProviderStatus.SUCCESS:
280
+ return ModelResponse(
281
+ provider_id=provider_id,
282
+ model_used=result.model_used,
283
+ content=result.content or "",
284
+ success=False,
285
+ error_message=f"Provider returned status: {result.status.value}",
286
+ duration_ms=duration_ms,
287
+ )
288
+
289
+ return ModelResponse(
290
+ provider_id=provider_id,
291
+ model_used=result.model_used,
292
+ content=result.content,
293
+ success=True,
294
+ tokens_used=result.tokens.total_tokens if result.tokens else None,
295
+ duration_ms=duration_ms,
296
+ )
297
+
298
+ except Exception as exc:
299
+ duration_ms = (time.perf_counter() - start_time) * 1000
300
+ return ModelResponse(
301
+ provider_id=provider_id,
302
+ content="",
303
+ success=False,
304
+ error_message=str(exc),
305
+ duration_ms=duration_ms,
306
+ )
307
+
308
+ async def _execute_parallel(
309
+ self,
310
+ prompt: str,
311
+ providers: list[str],
312
+ system_prompt: Optional[str],
313
+ timeout: float,
314
+ max_concurrent: int,
315
+ ) -> list[ModelResponse]:
316
+ """Execute requests to multiple providers in parallel (async version).
317
+
318
+ Note: This async method is kept for potential future use but the sync
319
+ version (_execute_parallel_sync) is preferred to avoid event loop conflicts.
320
+
321
+ Args:
322
+ prompt: User prompt
323
+ providers: Provider IDs to query
324
+ system_prompt: Optional system prompt
325
+ timeout: Timeout per provider
326
+ max_concurrent: Max concurrent requests
327
+
328
+ Returns:
329
+ List of ModelResponse objects
330
+ """
331
+ semaphore = asyncio.Semaphore(max_concurrent)
332
+
333
+ async def query_provider(provider_id: str) -> ModelResponse:
334
+ async with semaphore:
335
+ return await self._query_single_provider(
336
+ provider_id=provider_id,
337
+ prompt=prompt,
338
+ system_prompt=system_prompt,
339
+ timeout=timeout,
340
+ )
341
+
342
+ tasks = [query_provider(pid) for pid in providers]
343
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
344
+
345
+ # Convert exceptions to failed responses
346
+ result = []
347
+ for i, response in enumerate(responses):
348
+ if isinstance(response, Exception):
349
+ result.append(
350
+ ModelResponse(
351
+ provider_id=providers[i],
352
+ content="",
353
+ success=False,
354
+ error_message=str(response),
355
+ )
356
+ )
357
+ else:
358
+ result.append(response)
359
+
360
+ return result
361
+
362
+ async def _query_single_provider(
363
+ self,
364
+ provider_id: str,
365
+ prompt: str,
366
+ system_prompt: Optional[str],
367
+ timeout: float,
368
+ ) -> ModelResponse:
369
+ """Query a single provider asynchronously.
370
+
371
+ Args:
372
+ provider_id: Provider ID or full spec (e.g., "[cli]codex:gpt-5.2")
373
+ prompt: User prompt
374
+ system_prompt: Optional system prompt
375
+ timeout: Request timeout
376
+
377
+ Returns:
378
+ ModelResponse with result or error
379
+ """
380
+ import time
381
+
382
+ start_time = time.perf_counter()
383
+
384
+ try:
385
+ # Parse provider spec to extract base ID and model
386
+ spec = ProviderSpec.parse_flexible(provider_id)
387
+ provider = resolve_provider(spec.provider, hooks=ProviderHooks(), model=spec.model)
388
+ request = ProviderRequest(
389
+ prompt=prompt,
390
+ system_prompt=system_prompt,
391
+ timeout=timeout,
392
+ )
393
+
394
+ # Run synchronous generate in thread pool
395
+ loop = asyncio.get_event_loop()
396
+ result = await asyncio.wait_for(
397
+ loop.run_in_executor(None, provider.generate, request),
398
+ timeout=timeout,
399
+ )
400
+
401
+ duration_ms = (time.perf_counter() - start_time) * 1000
402
+
403
+ if result.status != ProviderStatus.SUCCESS:
404
+ return ModelResponse(
405
+ provider_id=provider_id,
406
+ model_used=result.model_used,
407
+ content=result.content or "",
408
+ success=False,
409
+ error_message=f"Provider returned status: {result.status.value}",
410
+ duration_ms=duration_ms,
411
+ )
412
+
413
+ return ModelResponse(
414
+ provider_id=provider_id,
415
+ model_used=result.model_used,
416
+ content=result.content,
417
+ success=True,
418
+ tokens_used=result.tokens.total_tokens if result.tokens else None,
419
+ duration_ms=duration_ms,
420
+ )
421
+
422
+ except asyncio.TimeoutError:
423
+ return ModelResponse(
424
+ provider_id=provider_id,
425
+ content="",
426
+ success=False,
427
+ error_message=f"Timeout after {timeout}s",
428
+ duration_ms=timeout * 1000,
429
+ )
430
+ except Exception as exc:
431
+ duration_ms = (time.perf_counter() - start_time) * 1000
432
+ return ModelResponse(
433
+ provider_id=provider_id,
434
+ content="",
435
+ success=False,
436
+ error_message=str(exc),
437
+ duration_ms=duration_ms,
438
+ )
439
+
440
+ def _apply_strategy(self, state: ConsensusState) -> WorkflowResult:
441
+ """Apply synthesis strategy to responses.
442
+
443
+ Args:
444
+ state: ConsensusState with collected responses
445
+
446
+ Returns:
447
+ WorkflowResult with synthesized content
448
+ """
449
+ successful = state.successful_responses()
450
+ strategy = state.config.strategy
451
+
452
+ if strategy == ConsensusStrategy.ALL_RESPONSES:
453
+ # Return all responses without synthesis
454
+ content_parts = []
455
+ for resp in successful:
456
+ content_parts.append(f"### {resp.provider_id}\n\n{resp.content}")
457
+ return WorkflowResult(
458
+ success=True,
459
+ content="\n\n---\n\n".join(content_parts),
460
+ metadata={"strategy": "all_responses"},
461
+ )
462
+
463
+ elif strategy == ConsensusStrategy.FIRST_VALID:
464
+ # Return first successful response
465
+ first = successful[0]
466
+ return WorkflowResult(
467
+ success=True,
468
+ content=first.content,
469
+ provider_id=first.provider_id,
470
+ model_used=first.model_used,
471
+ tokens_used=first.tokens_used,
472
+ metadata={"strategy": "first_valid"},
473
+ )
474
+
475
+ elif strategy == ConsensusStrategy.MAJORITY:
476
+ # For factual questions, try to find majority agreement
477
+ # Simple heuristic: if responses are similar, use first; otherwise synthesize
478
+ # A more sophisticated implementation would compare semantic similarity
479
+ return self._synthesize_responses(state, successful)
480
+
481
+ elif strategy == ConsensusStrategy.SYNTHESIZE:
482
+ # Use a model to synthesize all responses
483
+ return self._synthesize_responses(state, successful)
484
+
485
+ else:
486
+ # Default to first valid
487
+ first = successful[0]
488
+ return WorkflowResult(
489
+ success=True,
490
+ content=first.content,
491
+ provider_id=first.provider_id,
492
+ )
493
+
494
+ def _synthesize_responses(
495
+ self,
496
+ state: ConsensusState,
497
+ responses: list[ModelResponse],
498
+ ) -> WorkflowResult:
499
+ """Synthesize multiple responses using a model.
500
+
501
+ Args:
502
+ state: ConsensusState with original prompt
503
+ responses: Successful responses to synthesize
504
+
505
+ Returns:
506
+ WorkflowResult with synthesized content
507
+ """
508
+ # Build synthesis prompt
509
+ response_text = "\n\n---\n\n".join(
510
+ f"Response from {r.provider_id}:\n{r.content}"
511
+ for r in responses
512
+ )
513
+
514
+ synthesis_prompt = f"""You are synthesizing multiple AI responses to the same question.
515
+
516
+ Original question: {state.prompt}
517
+
518
+ {response_text}
519
+
520
+ Please synthesize these responses into a single, comprehensive answer that:
521
+ 1. Captures the key points from all responses
522
+ 2. Resolves any contradictions by noting different perspectives
523
+ 3. Provides a clear, well-structured response
524
+
525
+ Synthesized response:"""
526
+
527
+ # Execute synthesis
528
+ result = self._execute_provider(
529
+ prompt=synthesis_prompt,
530
+ provider_id=state.config.synthesis_provider,
531
+ system_prompt="You are a helpful assistant that synthesizes multiple AI responses into a coherent, comprehensive answer.",
532
+ )
533
+
534
+ if result.success:
535
+ result.metadata["strategy"] = "synthesize"
536
+ result.metadata["synthesis_provider"] = state.config.synthesis_provider
537
+ result.metadata["source_providers"] = [r.provider_id for r in responses]
538
+
539
+ return result