foundry-mcp 0.7.0__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. foundry_mcp/cli/__init__.py +0 -13
  2. foundry_mcp/cli/commands/session.py +1 -8
  3. foundry_mcp/cli/context.py +39 -0
  4. foundry_mcp/config.py +381 -7
  5. foundry_mcp/core/batch_operations.py +1196 -0
  6. foundry_mcp/core/discovery.py +1 -1
  7. foundry_mcp/core/llm_config.py +8 -0
  8. foundry_mcp/core/naming.py +25 -2
  9. foundry_mcp/core/prometheus.py +0 -13
  10. foundry_mcp/core/providers/__init__.py +12 -0
  11. foundry_mcp/core/providers/base.py +39 -0
  12. foundry_mcp/core/providers/claude.py +45 -1
  13. foundry_mcp/core/providers/codex.py +64 -3
  14. foundry_mcp/core/providers/cursor_agent.py +22 -3
  15. foundry_mcp/core/providers/detectors.py +34 -7
  16. foundry_mcp/core/providers/gemini.py +63 -1
  17. foundry_mcp/core/providers/opencode.py +95 -71
  18. foundry_mcp/core/providers/package-lock.json +4 -4
  19. foundry_mcp/core/providers/package.json +1 -1
  20. foundry_mcp/core/providers/validation.py +128 -0
  21. foundry_mcp/core/research/memory.py +103 -0
  22. foundry_mcp/core/research/models.py +783 -0
  23. foundry_mcp/core/research/providers/__init__.py +40 -0
  24. foundry_mcp/core/research/providers/base.py +242 -0
  25. foundry_mcp/core/research/providers/google.py +507 -0
  26. foundry_mcp/core/research/providers/perplexity.py +442 -0
  27. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  28. foundry_mcp/core/research/providers/tavily.py +383 -0
  29. foundry_mcp/core/research/workflows/__init__.py +5 -2
  30. foundry_mcp/core/research/workflows/base.py +106 -12
  31. foundry_mcp/core/research/workflows/consensus.py +160 -17
  32. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  33. foundry_mcp/core/responses.py +240 -0
  34. foundry_mcp/core/spec.py +1 -0
  35. foundry_mcp/core/task.py +141 -12
  36. foundry_mcp/core/validation.py +6 -1
  37. foundry_mcp/server.py +0 -52
  38. foundry_mcp/tools/unified/__init__.py +37 -18
  39. foundry_mcp/tools/unified/authoring.py +0 -33
  40. foundry_mcp/tools/unified/environment.py +202 -29
  41. foundry_mcp/tools/unified/plan.py +20 -1
  42. foundry_mcp/tools/unified/provider.py +0 -40
  43. foundry_mcp/tools/unified/research.py +644 -19
  44. foundry_mcp/tools/unified/review.py +5 -2
  45. foundry_mcp/tools/unified/review_helpers.py +16 -1
  46. foundry_mcp/tools/unified/server.py +9 -24
  47. foundry_mcp/tools/unified/task.py +528 -9
  48. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +2 -1
  49. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/RECORD +52 -46
  50. foundry_mcp/cli/flags.py +0 -266
  51. foundry_mcp/core/feature_flags.py +0 -592
  52. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  53. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  54. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -13,13 +13,13 @@ from typing import TYPE_CHECKING, Any, Optional
13
13
  from mcp.server.fastmcp import FastMCP
14
14
 
15
15
  from foundry_mcp.config import ServerConfig
16
- from foundry_mcp.core.feature_flags import get_flag_service
17
16
  from foundry_mcp.core.naming import canonical_tool
18
17
  from foundry_mcp.core.research.memory import ResearchMemory
19
18
  from foundry_mcp.core.research.models import ConsensusStrategy, ThreadStatus
20
19
  from foundry_mcp.core.research.workflows import (
21
20
  ChatWorkflow,
22
21
  ConsensusWorkflow,
22
+ DeepResearchWorkflow,
23
23
  IdeateWorkflow,
24
24
  ThinkDeepWorkflow,
25
25
  )
@@ -40,7 +40,6 @@ if TYPE_CHECKING:
40
40
 
41
41
  logger = logging.getLogger(__name__)
42
42
 
43
-
44
43
  # =============================================================================
45
44
  # Action Summaries
46
45
  # =============================================================================
@@ -50,9 +49,19 @@ _ACTION_SUMMARY = {
50
49
  "consensus": "Multi-model parallel consultation with synthesis",
51
50
  "thinkdeep": "Hypothesis-driven systematic investigation",
52
51
  "ideate": "Creative brainstorming with idea clustering",
52
+ "deep-research": "Multi-phase iterative deep research with query decomposition",
53
+ "deep-research-status": "Get status of deep research session",
54
+ "deep-research-report": "Get final report from deep research",
55
+ "deep-research-list": "List deep research sessions",
56
+ "deep-research-delete": "Delete a deep research session",
53
57
  "thread-list": "List conversation threads",
54
58
  "thread-get": "Get full thread details including messages",
55
59
  "thread-delete": "Delete a conversation thread",
60
+ # Spec-integrated research actions
61
+ "node-execute": "Execute research workflow linked to spec node",
62
+ "node-record": "Record research findings to spec node",
63
+ "node-status": "Get research node status and linked session info",
64
+ "node-findings": "Retrieve recorded findings from spec node",
56
65
  }
57
66
 
58
67
 
@@ -353,6 +362,235 @@ def _handle_ideate(
353
362
  )
354
363
 
355
364
 
365
+ def _handle_deep_research(
366
+ *,
367
+ query: Optional[str] = None,
368
+ research_id: Optional[str] = None,
369
+ deep_research_action: str = "start",
370
+ provider_id: Optional[str] = None,
371
+ system_prompt: Optional[str] = None,
372
+ max_iterations: int = 3,
373
+ max_sub_queries: int = 5,
374
+ max_sources_per_query: int = 5,
375
+ follow_links: bool = True,
376
+ timeout_per_operation: float = 120.0,
377
+ max_concurrent: int = 3,
378
+ task_timeout: Optional[float] = None,
379
+ **kwargs: Any,
380
+ ) -> dict:
381
+ """Handle deep-research action with background execution.
382
+
383
+ CRITICAL: This handler uses asyncio.create_task() via the workflow's
384
+ background mode to start research and return immediately with the
385
+ research_id. The workflow runs in the background and can be polled
386
+ via deep-research-status.
387
+
388
+ Supports:
389
+ - start: Begin new research, returns immediately with research_id
390
+ - continue: Resume paused research in background
391
+ - resume: Alias for continue (for backward compatibility)
392
+ """
393
+ # Normalize 'resume' to 'continue' for workflow compatibility
394
+ if deep_research_action == "resume":
395
+ deep_research_action = "continue"
396
+
397
+ # Validate based on action
398
+ if deep_research_action == "start" and not query:
399
+ return _validation_error(
400
+ "query",
401
+ "deep-research",
402
+ "Query is required to start deep research",
403
+ remediation="Provide a research query to investigate",
404
+ )
405
+
406
+ if deep_research_action in ("continue",) and not research_id:
407
+ return _validation_error(
408
+ "research_id",
409
+ "deep-research",
410
+ f"research_id is required for '{deep_research_action}' action",
411
+ remediation="Use deep-research-list to find existing research sessions",
412
+ )
413
+
414
+ config = _get_config()
415
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
416
+
417
+ # Execute with background=True for non-blocking execution
418
+ # This uses asyncio.create_task() internally and returns immediately
419
+ result = workflow.execute(
420
+ query=query,
421
+ research_id=research_id,
422
+ action=deep_research_action,
423
+ provider_id=provider_id,
424
+ system_prompt=system_prompt,
425
+ max_iterations=max_iterations,
426
+ max_sub_queries=max_sub_queries,
427
+ max_sources_per_query=max_sources_per_query,
428
+ follow_links=follow_links,
429
+ timeout_per_operation=timeout_per_operation,
430
+ max_concurrent=max_concurrent,
431
+ background=True, # CRITICAL: Run in background, return immediately
432
+ task_timeout=task_timeout,
433
+ )
434
+
435
+ if result.success:
436
+ # For background execution, return started status with research_id
437
+ response_data = {
438
+ "research_id": result.metadata.get("research_id"),
439
+ "status": "started",
440
+ "message": "Deep research started in background. Use deep-research-status to poll progress.",
441
+ }
442
+
443
+ # Include additional metadata if available (for continue/resume)
444
+ if result.metadata.get("phase"):
445
+ response_data["phase"] = result.metadata.get("phase")
446
+ if result.metadata.get("iteration") is not None:
447
+ response_data["iteration"] = result.metadata.get("iteration")
448
+
449
+ return asdict(success_response(data=response_data))
450
+ else:
451
+ return asdict(
452
+ error_response(
453
+ result.error or "Deep research failed to start",
454
+ error_code=ErrorCode.INTERNAL_ERROR,
455
+ error_type=ErrorType.INTERNAL,
456
+ remediation="Check query or research_id validity and provider availability",
457
+ details={"action": deep_research_action},
458
+ )
459
+ )
460
+
461
+
462
+ def _handle_deep_research_status(
463
+ *,
464
+ research_id: Optional[str] = None,
465
+ **kwargs: Any,
466
+ ) -> dict:
467
+ """Handle deep-research-status action."""
468
+ if not research_id:
469
+ return _validation_error("research_id", "deep-research-status", "Required")
470
+
471
+ config = _get_config()
472
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
473
+
474
+ result = workflow.execute(
475
+ research_id=research_id,
476
+ action="status",
477
+ )
478
+
479
+ if result.success:
480
+ return asdict(success_response(data=result.metadata))
481
+ else:
482
+ return asdict(
483
+ error_response(
484
+ result.error or "Failed to get status",
485
+ error_code=ErrorCode.NOT_FOUND,
486
+ error_type=ErrorType.NOT_FOUND,
487
+ remediation="Use deep-research-list to find valid research IDs",
488
+ )
489
+ )
490
+
491
+
492
+ def _handle_deep_research_report(
493
+ *,
494
+ research_id: Optional[str] = None,
495
+ **kwargs: Any,
496
+ ) -> dict:
497
+ """Handle deep-research-report action."""
498
+ if not research_id:
499
+ return _validation_error("research_id", "deep-research-report", "Required")
500
+
501
+ config = _get_config()
502
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
503
+
504
+ result = workflow.execute(
505
+ research_id=research_id,
506
+ action="report",
507
+ )
508
+
509
+ if result.success:
510
+ return asdict(
511
+ success_response(
512
+ data={
513
+ "report": result.content,
514
+ **result.metadata,
515
+ }
516
+ )
517
+ )
518
+ else:
519
+ return asdict(
520
+ error_response(
521
+ result.error or "Failed to get report",
522
+ error_code=ErrorCode.NOT_FOUND,
523
+ error_type=ErrorType.NOT_FOUND,
524
+ remediation="Ensure research is complete or use deep-research-status to check",
525
+ )
526
+ )
527
+
528
+
529
+ def _handle_deep_research_list(
530
+ *,
531
+ limit: int = 50,
532
+ cursor: Optional[str] = None,
533
+ completed_only: bool = False,
534
+ **kwargs: Any,
535
+ ) -> dict:
536
+ """Handle deep-research-list action."""
537
+ config = _get_config()
538
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
539
+
540
+ sessions = workflow.list_sessions(
541
+ limit=limit,
542
+ cursor=cursor,
543
+ completed_only=completed_only,
544
+ )
545
+
546
+ # Build response with pagination support
547
+ response_data: dict[str, Any] = {
548
+ "sessions": sessions,
549
+ "count": len(sessions),
550
+ }
551
+
552
+ # Include next cursor if there are more results
553
+ if sessions and len(sessions) == limit:
554
+ # Use last session's ID as cursor for next page
555
+ response_data["next_cursor"] = sessions[-1].get("id")
556
+
557
+ return asdict(success_response(data=response_data))
558
+
559
+
560
+ def _handle_deep_research_delete(
561
+ *,
562
+ research_id: Optional[str] = None,
563
+ **kwargs: Any,
564
+ ) -> dict:
565
+ """Handle deep-research-delete action."""
566
+ if not research_id:
567
+ return _validation_error("research_id", "deep-research-delete", "Required")
568
+
569
+ config = _get_config()
570
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
571
+
572
+ deleted = workflow.delete_session(research_id)
573
+
574
+ if not deleted:
575
+ return asdict(
576
+ error_response(
577
+ f"Research session '{research_id}' not found",
578
+ error_code=ErrorCode.NOT_FOUND,
579
+ error_type=ErrorType.NOT_FOUND,
580
+ remediation="Use deep-research-list to find valid research IDs",
581
+ )
582
+ )
583
+
584
+ return asdict(
585
+ success_response(
586
+ data={
587
+ "deleted": True,
588
+ "research_id": research_id,
589
+ }
590
+ )
591
+ )
592
+
593
+
356
594
  def _handle_thread_list(
357
595
  *,
358
596
  status: Optional[str] = None,
@@ -445,6 +683,325 @@ def _handle_thread_delete(
445
683
  )
446
684
 
447
685
 
686
+ # =============================================================================
687
+ # Spec-Integrated Research Actions
688
+ # =============================================================================
689
+
690
+
691
+ def _load_research_node(
692
+ spec_id: str,
693
+ research_node_id: str,
694
+ workspace: Optional[str] = None,
695
+ ) -> tuple[Optional[dict], Optional[dict], Optional[str]]:
696
+ """Load spec and validate research node exists.
697
+
698
+ Returns:
699
+ (spec_data, node_data, error_message)
700
+ """
701
+ from foundry_mcp.core.spec import load_spec, find_specs_directory
702
+
703
+ specs_dir = find_specs_directory(workspace)
704
+ if specs_dir is None:
705
+ return None, None, "No specs directory found"
706
+
707
+ spec_data = load_spec(spec_id, specs_dir)
708
+ if spec_data is None:
709
+ return None, None, f"Specification '{spec_id}' not found"
710
+
711
+ hierarchy = spec_data.get("hierarchy", {})
712
+ node = hierarchy.get(research_node_id)
713
+ if node is None:
714
+ return None, None, f"Node '{research_node_id}' not found"
715
+
716
+ if node.get("type") != "research":
717
+ return None, None, f"Node '{research_node_id}' is not a research node (type: {node.get('type')})"
718
+
719
+ return spec_data, node, None
720
+
721
+
722
+ def _handle_node_execute(
723
+ *,
724
+ spec_id: Optional[str] = None,
725
+ research_node_id: Optional[str] = None,
726
+ workspace: Optional[str] = None,
727
+ prompt: Optional[str] = None,
728
+ **kwargs: Any,
729
+ ) -> dict:
730
+ """Execute research workflow linked to spec node.
731
+
732
+ Starts the research workflow configured in the node's metadata,
733
+ and stores the session_id back in the node for tracking.
734
+ """
735
+ from datetime import datetime, timezone
736
+ from foundry_mcp.core.spec import save_spec, find_specs_directory
737
+
738
+ if not spec_id:
739
+ return _validation_error("spec_id", "node-execute", "Required")
740
+ if not research_node_id:
741
+ return _validation_error("research_node_id", "node-execute", "Required")
742
+
743
+ spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
744
+ if error:
745
+ return asdict(
746
+ error_response(
747
+ error,
748
+ error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
749
+ error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
750
+ )
751
+ )
752
+
753
+ metadata = node.get("metadata", {})
754
+ research_type = metadata.get("research_type", "consensus")
755
+ query = prompt or metadata.get("query", "")
756
+
757
+ if not query:
758
+ return _validation_error("query", "node-execute", "No query found in node or prompt parameter")
759
+
760
+ # Execute the appropriate research workflow
761
+ config = _get_config()
762
+ session_id = None
763
+ result_data: dict[str, Any] = {
764
+ "spec_id": spec_id,
765
+ "research_node_id": research_node_id,
766
+ "research_type": research_type,
767
+ }
768
+
769
+ if research_type == "chat":
770
+ workflow = ChatWorkflow(config.research, _get_memory())
771
+ result = workflow.chat(prompt=query)
772
+ session_id = result.thread_id
773
+ result_data["thread_id"] = session_id
774
+ elif research_type == "consensus":
775
+ workflow = ConsensusWorkflow(config.research, _get_memory())
776
+ result = workflow.run(prompt=query)
777
+ session_id = result.session_id
778
+ result_data["consensus_id"] = session_id
779
+ result_data["strategy"] = result.strategy.value if result.strategy else None
780
+ elif research_type == "thinkdeep":
781
+ workflow = ThinkDeepWorkflow(config.research, _get_memory())
782
+ result = workflow.run(topic=query)
783
+ session_id = result.investigation_id
784
+ result_data["investigation_id"] = session_id
785
+ elif research_type == "ideate":
786
+ workflow = IdeateWorkflow(config.research, _get_memory())
787
+ result = workflow.run(topic=query)
788
+ session_id = result.ideation_id
789
+ result_data["ideation_id"] = session_id
790
+ elif research_type == "deep-research":
791
+ workflow = DeepResearchWorkflow(config.research, _get_memory())
792
+ result = workflow.start(query=query)
793
+ session_id = result.research_id
794
+ result_data["research_id"] = session_id
795
+ else:
796
+ return _validation_error("research_type", "node-execute", f"Unsupported: {research_type}")
797
+
798
+ # Update node metadata with session info
799
+ metadata["session_id"] = session_id
800
+ history = metadata.setdefault("research_history", [])
801
+ history.append({
802
+ "timestamp": datetime.now(timezone.utc).isoformat(),
803
+ "action": "started",
804
+ "workflow": research_type,
805
+ "session_id": session_id,
806
+ })
807
+ node["metadata"] = metadata
808
+ node["status"] = "in_progress"
809
+
810
+ # Save spec
811
+ specs_dir = find_specs_directory(workspace)
812
+ if specs_dir and not save_spec(spec_id, spec_data, specs_dir):
813
+ return asdict(
814
+ error_response(
815
+ "Failed to save specification",
816
+ error_code=ErrorCode.INTERNAL_ERROR,
817
+ error_type=ErrorType.INTERNAL,
818
+ )
819
+ )
820
+
821
+ result_data["session_id"] = session_id
822
+ result_data["status"] = "started"
823
+ return asdict(success_response(data=result_data))
824
+
825
+
826
+ def _handle_node_record(
827
+ *,
828
+ spec_id: Optional[str] = None,
829
+ research_node_id: Optional[str] = None,
830
+ workspace: Optional[str] = None,
831
+ result: Optional[str] = None,
832
+ summary: Optional[str] = None,
833
+ key_insights: Optional[list[str]] = None,
834
+ recommendations: Optional[list[str]] = None,
835
+ sources: Optional[list[str]] = None,
836
+ confidence: Optional[str] = None,
837
+ session_id: Optional[str] = None,
838
+ **kwargs: Any,
839
+ ) -> dict:
840
+ """Record research findings to spec node."""
841
+ from datetime import datetime, timezone
842
+ from foundry_mcp.core.spec import save_spec, find_specs_directory
843
+ from foundry_mcp.core.validation import VALID_RESEARCH_RESULTS
844
+
845
+ if not spec_id:
846
+ return _validation_error("spec_id", "node-record", "Required")
847
+ if not research_node_id:
848
+ return _validation_error("research_node_id", "node-record", "Required")
849
+ if not result:
850
+ return _validation_error("result", "node-record", "Required (completed, inconclusive, blocked, cancelled)")
851
+ if result not in VALID_RESEARCH_RESULTS:
852
+ return _validation_error("result", "node-record", f"Must be one of: {', '.join(sorted(VALID_RESEARCH_RESULTS))}")
853
+
854
+ spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
855
+ if error:
856
+ return asdict(
857
+ error_response(
858
+ error,
859
+ error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
860
+ error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
861
+ )
862
+ )
863
+
864
+ metadata = node.get("metadata", {})
865
+
866
+ # Store findings
867
+ metadata["findings"] = {
868
+ "summary": summary or "",
869
+ "key_insights": key_insights or [],
870
+ "recommendations": recommendations or [],
871
+ "sources": sources or [],
872
+ "confidence": confidence or "medium",
873
+ }
874
+
875
+ # Update session link if provided
876
+ if session_id:
877
+ metadata["session_id"] = session_id
878
+
879
+ # Add to history
880
+ history = metadata.setdefault("research_history", [])
881
+ history.append({
882
+ "timestamp": datetime.now(timezone.utc).isoformat(),
883
+ "action": "completed",
884
+ "result": result,
885
+ "session_id": session_id or metadata.get("session_id"),
886
+ })
887
+
888
+ node["metadata"] = metadata
889
+
890
+ # Update node status based on result
891
+ if result == "completed":
892
+ node["status"] = "completed"
893
+ elif result == "blocked":
894
+ node["status"] = "blocked"
895
+ else:
896
+ node["status"] = "pending" # inconclusive or cancelled
897
+
898
+ # Save spec
899
+ specs_dir = find_specs_directory(workspace)
900
+ if specs_dir and not save_spec(spec_id, spec_data, specs_dir):
901
+ return asdict(
902
+ error_response(
903
+ "Failed to save specification",
904
+ error_code=ErrorCode.INTERNAL_ERROR,
905
+ error_type=ErrorType.INTERNAL,
906
+ )
907
+ )
908
+
909
+ return asdict(
910
+ success_response(
911
+ data={
912
+ "spec_id": spec_id,
913
+ "research_node_id": research_node_id,
914
+ "result": result,
915
+ "status": node["status"],
916
+ "findings_recorded": True,
917
+ }
918
+ )
919
+ )
920
+
921
+
922
+ def _handle_node_status(
923
+ *,
924
+ spec_id: Optional[str] = None,
925
+ research_node_id: Optional[str] = None,
926
+ workspace: Optional[str] = None,
927
+ **kwargs: Any,
928
+ ) -> dict:
929
+ """Get research node status and linked session info."""
930
+ if not spec_id:
931
+ return _validation_error("spec_id", "node-status", "Required")
932
+ if not research_node_id:
933
+ return _validation_error("research_node_id", "node-status", "Required")
934
+
935
+ spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
936
+ if error:
937
+ return asdict(
938
+ error_response(
939
+ error,
940
+ error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
941
+ error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
942
+ )
943
+ )
944
+
945
+ metadata = node.get("metadata", {})
946
+
947
+ return asdict(
948
+ success_response(
949
+ data={
950
+ "spec_id": spec_id,
951
+ "research_node_id": research_node_id,
952
+ "title": node.get("title"),
953
+ "status": node.get("status"),
954
+ "research_type": metadata.get("research_type"),
955
+ "blocking_mode": metadata.get("blocking_mode"),
956
+ "session_id": metadata.get("session_id"),
957
+ "query": metadata.get("query"),
958
+ "has_findings": bool(metadata.get("findings", {}).get("summary")),
959
+ "history_count": len(metadata.get("research_history", [])),
960
+ }
961
+ )
962
+ )
963
+
964
+
965
+ def _handle_node_findings(
966
+ *,
967
+ spec_id: Optional[str] = None,
968
+ research_node_id: Optional[str] = None,
969
+ workspace: Optional[str] = None,
970
+ **kwargs: Any,
971
+ ) -> dict:
972
+ """Retrieve recorded findings from spec node."""
973
+ if not spec_id:
974
+ return _validation_error("spec_id", "node-findings", "Required")
975
+ if not research_node_id:
976
+ return _validation_error("research_node_id", "node-findings", "Required")
977
+
978
+ spec_data, node, error = _load_research_node(spec_id, research_node_id, workspace)
979
+ if error:
980
+ return asdict(
981
+ error_response(
982
+ error,
983
+ error_code=ErrorCode.NOT_FOUND if "not found" in error.lower() else ErrorCode.VALIDATION_ERROR,
984
+ error_type=ErrorType.NOT_FOUND if "not found" in error.lower() else ErrorType.VALIDATION,
985
+ )
986
+ )
987
+
988
+ metadata = node.get("metadata", {})
989
+ findings = metadata.get("findings", {})
990
+
991
+ return asdict(
992
+ success_response(
993
+ data={
994
+ "spec_id": spec_id,
995
+ "research_node_id": research_node_id,
996
+ "title": node.get("title"),
997
+ "status": node.get("status"),
998
+ "findings": findings,
999
+ "research_history": metadata.get("research_history", []),
1000
+ }
1001
+ )
1002
+ )
1003
+
1004
+
448
1005
  # =============================================================================
449
1006
  # Router Setup
450
1007
  # =============================================================================
@@ -472,6 +1029,31 @@ def _build_router() -> ActionRouter:
472
1029
  handler=_handle_ideate,
473
1030
  summary=_ACTION_SUMMARY["ideate"],
474
1031
  ),
1032
+ ActionDefinition(
1033
+ name="deep-research",
1034
+ handler=_handle_deep_research,
1035
+ summary=_ACTION_SUMMARY["deep-research"],
1036
+ ),
1037
+ ActionDefinition(
1038
+ name="deep-research-status",
1039
+ handler=_handle_deep_research_status,
1040
+ summary=_ACTION_SUMMARY["deep-research-status"],
1041
+ ),
1042
+ ActionDefinition(
1043
+ name="deep-research-report",
1044
+ handler=_handle_deep_research_report,
1045
+ summary=_ACTION_SUMMARY["deep-research-report"],
1046
+ ),
1047
+ ActionDefinition(
1048
+ name="deep-research-list",
1049
+ handler=_handle_deep_research_list,
1050
+ summary=_ACTION_SUMMARY["deep-research-list"],
1051
+ ),
1052
+ ActionDefinition(
1053
+ name="deep-research-delete",
1054
+ handler=_handle_deep_research_delete,
1055
+ summary=_ACTION_SUMMARY["deep-research-delete"],
1056
+ ),
475
1057
  ActionDefinition(
476
1058
  name="thread-list",
477
1059
  handler=_handle_thread_list,
@@ -487,6 +1069,27 @@ def _build_router() -> ActionRouter:
487
1069
  handler=_handle_thread_delete,
488
1070
  summary=_ACTION_SUMMARY["thread-delete"],
489
1071
  ),
1072
+ # Spec-integrated research actions
1073
+ ActionDefinition(
1074
+ name="node-execute",
1075
+ handler=_handle_node_execute,
1076
+ summary=_ACTION_SUMMARY["node-execute"],
1077
+ ),
1078
+ ActionDefinition(
1079
+ name="node-record",
1080
+ handler=_handle_node_record,
1081
+ summary=_ACTION_SUMMARY["node-record"],
1082
+ ),
1083
+ ActionDefinition(
1084
+ name="node-status",
1085
+ handler=_handle_node_status,
1086
+ summary=_ACTION_SUMMARY["node-status"],
1087
+ ),
1088
+ ActionDefinition(
1089
+ name="node-findings",
1090
+ handler=_handle_node_findings,
1091
+ summary=_ACTION_SUMMARY["node-findings"],
1092
+ ),
490
1093
  ]
491
1094
  return ActionRouter(tool_name="research", actions=definitions)
492
1095
 
@@ -538,6 +1141,7 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
538
1141
  thread_id: Optional[str] = None,
539
1142
  investigation_id: Optional[str] = None,
540
1143
  ideation_id: Optional[str] = None,
1144
+ research_id: Optional[str] = None,
541
1145
  topic: Optional[str] = None,
542
1146
  query: Optional[str] = None,
543
1147
  system_prompt: Optional[str] = None,
@@ -547,10 +1151,17 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
547
1151
  strategy: Optional[str] = None,
548
1152
  synthesis_provider: Optional[str] = None,
549
1153
  timeout_per_provider: float = 30.0,
1154
+ timeout_per_operation: float = 120.0,
550
1155
  max_concurrent: int = 3,
551
1156
  require_all: bool = False,
552
1157
  min_responses: int = 1,
553
1158
  max_depth: Optional[int] = None,
1159
+ max_iterations: int = 3,
1160
+ max_sub_queries: int = 5,
1161
+ max_sources_per_query: int = 5,
1162
+ follow_links: bool = True,
1163
+ deep_research_action: str = "start",
1164
+ task_timeout: Optional[float] = None,
554
1165
  ideate_action: str = "generate",
555
1166
  perspective: Optional[str] = None,
556
1167
  perspectives: Optional[list[str]] = None,
@@ -561,6 +1172,8 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
561
1172
  title: Optional[str] = None,
562
1173
  status: Optional[str] = None,
563
1174
  limit: int = 50,
1175
+ cursor: Optional[str] = None,
1176
+ completed_only: bool = False,
564
1177
  ) -> dict:
565
1178
  """Execute research workflows via the action router.
566
1179
 
@@ -569,30 +1182,42 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
569
1182
  - consensus: Multi-model parallel consultation with synthesis
570
1183
  - thinkdeep: Hypothesis-driven systematic investigation
571
1184
  - ideate: Creative brainstorming with idea clustering
572
- - route: Intelligent workflow selection based on prompt
1185
+ - deep-research: Multi-phase iterative deep research with query decomposition
1186
+ - deep-research-status: Get status of deep research session
1187
+ - deep-research-report: Get final report from deep research
1188
+ - deep-research-list: List deep research sessions
1189
+ - deep-research-delete: Delete a deep research session
573
1190
  - thread-list: List conversation threads
574
1191
  - thread-get: Get thread details including messages
575
1192
  - thread-delete: Delete a conversation thread
576
1193
 
577
1194
  Args:
578
1195
  action: The research action to execute
579
- prompt: User prompt/message (chat, consensus, route)
1196
+ prompt: User prompt/message (chat, consensus)
580
1197
  thread_id: Thread ID for continuing conversations (chat)
581
1198
  investigation_id: Investigation ID to continue (thinkdeep)
582
1199
  ideation_id: Ideation session ID to continue (ideate)
1200
+ research_id: Deep research session ID (deep-research-*)
583
1201
  topic: Topic for new investigation/ideation
584
- query: Follow-up query (thinkdeep)
1202
+ query: Research query (deep-research) or follow-up (thinkdeep)
585
1203
  system_prompt: System prompt for workflows
586
1204
  provider_id: Provider to use for single-model operations
587
1205
  model: Model override
588
1206
  providers: Provider list for consensus
589
1207
  strategy: Consensus strategy (all_responses, synthesize, majority, first_valid)
590
1208
  synthesis_provider: Provider for synthesis
591
- timeout_per_provider: Timeout per provider in seconds
592
- max_concurrent: Max concurrent provider calls
1209
+ timeout_per_provider: Timeout per provider in seconds (consensus)
1210
+ timeout_per_operation: Timeout per operation in seconds (deep-research)
1211
+ max_concurrent: Max concurrent provider/operation calls
593
1212
  require_all: Require all providers to succeed
594
1213
  min_responses: Minimum successful responses needed
595
1214
  max_depth: Maximum investigation depth (thinkdeep)
1215
+ max_iterations: Maximum refinement iterations (deep-research)
1216
+ max_sub_queries: Maximum sub-queries to generate (deep-research)
1217
+ max_sources_per_query: Maximum sources per sub-query (deep-research)
1218
+ follow_links: Whether to follow and extract links (deep-research)
1219
+ deep_research_action: Sub-action for deep-research (start, continue, resume)
1220
+ task_timeout: Overall timeout for background research task in seconds
596
1221
  ideate_action: Ideation sub-action (generate, cluster, score, select, elaborate)
597
1222
  perspective: Specific perspective for idea generation
598
1223
  perspectives: Custom perspectives list
@@ -603,28 +1228,19 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
603
1228
  title: Title for new threads
604
1229
  status: Filter threads by status
605
1230
  limit: Maximum items to return
1231
+ cursor: Pagination cursor for deep-research-list
1232
+ completed_only: Filter to completed sessions only (deep-research-list)
606
1233
 
607
1234
  Returns:
608
1235
  Response envelope with action results
609
1236
  """
610
- # Check feature flag
611
- flag_service = get_flag_service()
612
- if not flag_service.is_enabled("research_tools"):
613
- return asdict(
614
- error_response(
615
- "Research tools are not enabled",
616
- error_code=ErrorCode.FEATURE_DISABLED,
617
- error_type=ErrorType.UNAVAILABLE,
618
- remediation="Enable 'research_tools' feature flag in configuration",
619
- )
620
- )
621
-
622
1237
  return _dispatch_research_action(
623
1238
  action=action,
624
1239
  prompt=prompt,
625
1240
  thread_id=thread_id,
626
1241
  investigation_id=investigation_id,
627
1242
  ideation_id=ideation_id,
1243
+ research_id=research_id,
628
1244
  topic=topic,
629
1245
  query=query,
630
1246
  system_prompt=system_prompt,
@@ -634,10 +1250,17 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
634
1250
  strategy=strategy,
635
1251
  synthesis_provider=synthesis_provider,
636
1252
  timeout_per_provider=timeout_per_provider,
1253
+ timeout_per_operation=timeout_per_operation,
637
1254
  max_concurrent=max_concurrent,
638
1255
  require_all=require_all,
639
1256
  min_responses=min_responses,
640
1257
  max_depth=max_depth,
1258
+ max_iterations=max_iterations,
1259
+ max_sub_queries=max_sub_queries,
1260
+ max_sources_per_query=max_sources_per_query,
1261
+ follow_links=follow_links,
1262
+ deep_research_action=deep_research_action,
1263
+ task_timeout=task_timeout,
641
1264
  ideate_action=ideate_action,
642
1265
  perspective=perspective,
643
1266
  perspectives=perspectives,
@@ -648,6 +1271,8 @@ def register_unified_research_tool(mcp: FastMCP, config: ServerConfig) -> None:
648
1271
  title=title,
649
1272
  status=status,
650
1273
  limit=limit,
1274
+ cursor=cursor,
1275
+ completed_only=completed_only,
651
1276
  )
652
1277
 
653
1278
  logger.debug("Registered unified research tool")