foundry-mcp 0.7.0__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. foundry_mcp/cli/__init__.py +0 -13
  2. foundry_mcp/cli/commands/session.py +1 -8
  3. foundry_mcp/cli/context.py +39 -0
  4. foundry_mcp/config.py +381 -7
  5. foundry_mcp/core/batch_operations.py +1196 -0
  6. foundry_mcp/core/discovery.py +1 -1
  7. foundry_mcp/core/llm_config.py +8 -0
  8. foundry_mcp/core/naming.py +25 -2
  9. foundry_mcp/core/prometheus.py +0 -13
  10. foundry_mcp/core/providers/__init__.py +12 -0
  11. foundry_mcp/core/providers/base.py +39 -0
  12. foundry_mcp/core/providers/claude.py +45 -1
  13. foundry_mcp/core/providers/codex.py +64 -3
  14. foundry_mcp/core/providers/cursor_agent.py +22 -3
  15. foundry_mcp/core/providers/detectors.py +34 -7
  16. foundry_mcp/core/providers/gemini.py +63 -1
  17. foundry_mcp/core/providers/opencode.py +95 -71
  18. foundry_mcp/core/providers/package-lock.json +4 -4
  19. foundry_mcp/core/providers/package.json +1 -1
  20. foundry_mcp/core/providers/validation.py +128 -0
  21. foundry_mcp/core/research/memory.py +103 -0
  22. foundry_mcp/core/research/models.py +783 -0
  23. foundry_mcp/core/research/providers/__init__.py +40 -0
  24. foundry_mcp/core/research/providers/base.py +242 -0
  25. foundry_mcp/core/research/providers/google.py +507 -0
  26. foundry_mcp/core/research/providers/perplexity.py +442 -0
  27. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  28. foundry_mcp/core/research/providers/tavily.py +383 -0
  29. foundry_mcp/core/research/workflows/__init__.py +5 -2
  30. foundry_mcp/core/research/workflows/base.py +106 -12
  31. foundry_mcp/core/research/workflows/consensus.py +160 -17
  32. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  33. foundry_mcp/core/responses.py +240 -0
  34. foundry_mcp/core/spec.py +1 -0
  35. foundry_mcp/core/task.py +141 -12
  36. foundry_mcp/core/validation.py +6 -1
  37. foundry_mcp/server.py +0 -52
  38. foundry_mcp/tools/unified/__init__.py +37 -18
  39. foundry_mcp/tools/unified/authoring.py +0 -33
  40. foundry_mcp/tools/unified/environment.py +202 -29
  41. foundry_mcp/tools/unified/plan.py +20 -1
  42. foundry_mcp/tools/unified/provider.py +0 -40
  43. foundry_mcp/tools/unified/research.py +644 -19
  44. foundry_mcp/tools/unified/review.py +5 -2
  45. foundry_mcp/tools/unified/review_helpers.py +16 -1
  46. foundry_mcp/tools/unified/server.py +9 -24
  47. foundry_mcp/tools/unified/task.py +528 -9
  48. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +2 -1
  49. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/RECORD +52 -46
  50. foundry_mcp/cli/flags.py +0 -266
  51. foundry_mcp/core/feature_flags.py +0 -592
  52. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  53. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  54. {foundry_mcp-0.7.0.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -19,7 +19,6 @@ from pathlib import Path
19
19
  from typing import Any, Dict, List, Optional, Protocol, Sequence
20
20
 
21
21
  from .base import (
22
- ModelDescriptor,
23
22
  ProviderCapability,
24
23
  ProviderContext,
25
24
  ProviderExecutionError,
@@ -122,52 +121,10 @@ def _default_runner(
122
121
  )
123
122
 
124
123
 
125
- OPENCODE_MODELS: List[ModelDescriptor] = [
126
- ModelDescriptor(
127
- id="openai/gpt-5.2",
128
- display_name="OpenAI GPT-5.2 (via OpenCode)",
129
- capabilities={
130
- ProviderCapability.TEXT,
131
- ProviderCapability.STREAMING,
132
- },
133
- routing_hints={
134
- "configurable": True,
135
- "source": "opencode config",
136
- "note": "Accepts any model ID - validated by opencode CLI",
137
- },
138
- ),
139
- ModelDescriptor(
140
- id="openai/gpt-5.2-codex",
141
- display_name="OpenAI GPT-5.2 Codex (via OpenCode)",
142
- capabilities={
143
- ProviderCapability.TEXT,
144
- ProviderCapability.STREAMING,
145
- },
146
- routing_hints={
147
- "configurable": True,
148
- "source": "opencode config",
149
- "note": "Accepts any model ID - validated by opencode CLI",
150
- },
151
- ),
152
- ModelDescriptor(
153
- id="openai/gpt-5.1-codex-mini",
154
- display_name="OpenAI GPT-5.1 Codex Mini (via OpenCode)",
155
- capabilities={
156
- ProviderCapability.TEXT,
157
- ProviderCapability.STREAMING,
158
- },
159
- routing_hints={
160
- "configurable": True,
161
- "source": "opencode config",
162
- "note": "Accepts any model ID - validated by opencode CLI",
163
- },
164
- ),
165
- ]
166
-
167
124
  OPENCODE_METADATA = ProviderMetadata(
168
125
  provider_id="opencode",
169
126
  display_name="OpenCode AI SDK",
170
- models=OPENCODE_MODELS,
127
+ models=[], # Model validation delegated to CLI
171
128
  default_model="openai/gpt-5.1-codex-mini",
172
129
  capabilities={ProviderCapability.TEXT, ProviderCapability.STREAMING},
173
130
  security_flags={"writes_allowed": False, "read_only": True},
@@ -206,9 +163,16 @@ class OpenCodeProvider(ProviderContext):
206
163
  self._env = self._prepare_subprocess_env(env)
207
164
 
208
165
  self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
209
- self._model = self._ensure_model(
210
- model or metadata.default_model or self._first_model_id()
211
- )
166
+
167
+ # Validate model - reject empty or whitespace-only strings
168
+ effective_model = model or metadata.default_model or "openai/gpt-5.1-codex-mini"
169
+ if not effective_model or not effective_model.strip():
170
+ raise ProviderExecutionError(
171
+ "Model identifier cannot be empty",
172
+ provider="opencode",
173
+ )
174
+ self._model = effective_model
175
+
212
176
  self._server_process: Optional[subprocess.Popen[bytes]] = None
213
177
  self._config_file_path: Optional[Path] = None
214
178
 
@@ -256,8 +220,51 @@ class OpenCodeProvider(ProviderContext):
256
220
  # Note: OPENCODE_API_KEY should be provided via environment or custom_env
257
221
  # We don't set a default value for security reasons
258
222
 
223
+ # Add global npm modules to NODE_PATH so wrapper can find @opencode-ai/sdk
224
+ # This allows the SDK to be installed globally rather than bundled
225
+ self._ensure_node_path(subprocess_env)
226
+
259
227
  return subprocess_env
260
228
 
229
+ def _ensure_node_path(self, env: Dict[str, str]) -> None:
230
+ """
231
+ Ensure NODE_PATH includes global npm modules and local node_modules.
232
+
233
+ This allows the wrapper script to import @opencode-ai/sdk whether it's
234
+ installed globally (npm install -g @opencode-ai/sdk) or locally in the
235
+ providers directory.
236
+ """
237
+ node_paths: List[str] = []
238
+
239
+ # Add existing NODE_PATH entries
240
+ if env.get("NODE_PATH"):
241
+ node_paths.extend(env["NODE_PATH"].split(os.pathsep))
242
+
243
+ # Add local node_modules (alongside wrapper script)
244
+ local_node_modules = self._wrapper_path.parent / "node_modules"
245
+ if local_node_modules.exists():
246
+ node_paths.append(str(local_node_modules))
247
+
248
+ # Detect and add global npm root
249
+ try:
250
+ result = subprocess.run(
251
+ ["npm", "root", "-g"],
252
+ capture_output=True,
253
+ text=True,
254
+ timeout=5,
255
+ check=False,
256
+ )
257
+ if result.returncode == 0 and result.stdout.strip():
258
+ global_root = result.stdout.strip()
259
+ if global_root not in node_paths:
260
+ node_paths.append(global_root)
261
+ except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
262
+ # npm not available or timed out - skip global path
263
+ pass
264
+
265
+ if node_paths:
266
+ env["NODE_PATH"] = os.pathsep.join(node_paths)
267
+
261
268
  def _create_readonly_config(self) -> Path:
262
269
  """
263
270
  Create temporary opencode.json with read-only tool restrictions.
@@ -297,27 +304,6 @@ class OpenCodeProvider(ProviderContext):
297
304
  finally:
298
305
  self._config_file_path = None
299
306
 
300
- def _first_model_id(self) -> str:
301
- if not self.metadata.models:
302
- raise ProviderUnavailableError(
303
- "OpenCode provider metadata is missing model descriptors.",
304
- provider=self.metadata.provider_id,
305
- )
306
- return self.metadata.models[0].id
307
-
308
- def _ensure_model(self, candidate: str) -> str:
309
- # Validate that the model is not empty
310
- if not candidate or not candidate.strip():
311
- raise ProviderExecutionError(
312
- "Model identifier cannot be empty",
313
- provider=self.metadata.provider_id,
314
- )
315
-
316
- # For opencode, we accept any model ID and let opencode CLI validate it
317
- # This avoids maintaining a hardcoded list that would become stale
318
- # opencode CLI supports many models across providers (OpenAI, Anthropic, etc.)
319
- return candidate
320
-
321
307
  def _is_port_open(self, port: int, host: str = "localhost") -> bool:
322
308
  """Check if a TCP port is open and accepting connections."""
323
309
  try:
@@ -437,10 +423,15 @@ class OpenCodeProvider(ProviderContext):
437
423
  return request.prompt
438
424
 
439
425
  def _resolve_model(self, request: ProviderRequest) -> str:
440
- """Resolve model from request metadata or use default."""
426
+ """Resolve model from request or use default."""
427
+ # 1. Check request.model first (from ProviderRequest constructor)
428
+ if request.model:
429
+ return str(request.model)
430
+ # 2. Fallback to metadata override (legacy/alternative path)
441
431
  model_override = request.metadata.get("model") if request.metadata else None
442
432
  if model_override:
443
- return self._ensure_model(str(model_override))
433
+ return str(model_override)
434
+ # 3. Fallback to instance default
444
435
  return self._model
445
436
 
446
437
  def _emit_stream_if_requested(self, content: str, *, stream: bool) -> None:
@@ -449,6 +440,30 @@ class OpenCodeProvider(ProviderContext):
449
440
  return
450
441
  self._emit_stream_chunk(StreamChunk(content=content, index=0))
451
442
 
443
+ def _extract_error_from_jsonl(self, stdout: str) -> Optional[str]:
444
+ """
445
+ Extract error message from OpenCode wrapper JSONL output.
446
+
447
+ The wrapper outputs errors as {"type":"error","code":"...","message":"..."}.
448
+ """
449
+ if not stdout:
450
+ return None
451
+
452
+ for line in stdout.strip().split("\n"):
453
+ if not line.strip():
454
+ continue
455
+ try:
456
+ event = json.loads(line)
457
+ except json.JSONDecodeError:
458
+ continue
459
+
460
+ if event.get("type") == "error":
461
+ msg = event.get("message", "")
462
+ if msg:
463
+ return msg
464
+
465
+ return None
466
+
452
467
  def _execute(self, request: ProviderRequest) -> ProviderResult:
453
468
  """Execute generation request via OpenCode wrapper."""
454
469
  self._validate_request(request)
@@ -497,8 +512,17 @@ class OpenCodeProvider(ProviderContext):
497
512
  if completed.returncode != 0:
498
513
  stderr = (completed.stderr or "").strip()
499
514
  logger.debug(f"OpenCode wrapper stderr: {stderr or 'no stderr'}")
515
+
516
+ # Extract error from JSONL stdout (wrapper outputs {"type":"error","message":"..."})
517
+ jsonl_error = self._extract_error_from_jsonl(completed.stdout)
518
+
519
+ error_msg = f"OpenCode wrapper exited with code {completed.returncode}"
520
+ if jsonl_error:
521
+ error_msg += f": {jsonl_error[:500]}"
522
+ elif stderr:
523
+ error_msg += f": {stderr[:500]}"
500
524
  raise ProviderExecutionError(
501
- f"OpenCode wrapper exited with code {completed.returncode}",
525
+ error_msg,
502
526
  provider=self.metadata.provider_id,
503
527
  )
504
528
 
@@ -9,16 +9,16 @@
9
9
  "version": "0.1.0",
10
10
  "license": "MIT",
11
11
  "dependencies": {
12
- "@opencode-ai/sdk": "^1.0.0"
12
+ "@opencode-ai/sdk": "^1.0.218"
13
13
  },
14
14
  "engines": {
15
15
  "node": ">=18.0.0"
16
16
  }
17
17
  },
18
18
  "node_modules/@opencode-ai/sdk": {
19
- "version": "1.0.164",
20
- "resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.0.164.tgz",
21
- "integrity": "sha512-TG+bpgL3O4tU/vCOT0THaSL5wJoXc15ErS79NLrEzFj1Igq1a9Mhef3oYZae0zZOI/ZTl/VNswguUeqkBm41pg=="
19
+ "version": "1.0.218",
20
+ "resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.0.218.tgz",
21
+ "integrity": "sha512-c6ss6UPAMskSVQUecuhNvPLFngyVh2Os9o0kpVjoqJJ16HXhzjVSk5axgh3ueQrfP5aZfg5o6l6srmjuCTPNnQ=="
22
22
  }
23
23
  }
24
24
  }
@@ -17,7 +17,7 @@
17
17
  "author": "Foundry MCP",
18
18
  "license": "MIT",
19
19
  "dependencies": {
20
- "@opencode-ai/sdk": "^1.0.0"
20
+ "@opencode-ai/sdk": "^1.0.218"
21
21
  },
22
22
  "engines": {
23
23
  "node": ">=18.0.0"
@@ -696,6 +696,129 @@ def with_validation_and_resilience(
696
696
  return decorator
697
697
 
698
698
 
699
+ # ---------------------------------------------------------------------------
700
+ # Context Window Error Detection
701
+ # ---------------------------------------------------------------------------
702
+
703
+ # Common error patterns indicating context window/token limit exceeded
704
+ CONTEXT_WINDOW_ERROR_PATTERNS: Set[str] = {
705
+ # OpenAI patterns
706
+ "context_length_exceeded",
707
+ "maximum context length",
708
+ "max_tokens",
709
+ "token limit",
710
+ "tokens exceeds",
711
+ "prompt is too long",
712
+ "input too long",
713
+ # Anthropic patterns
714
+ "prompt is too large",
715
+ "context window",
716
+ "exceeds the maximum",
717
+ "too many tokens",
718
+ # Google/Gemini patterns
719
+ "max input tokens",
720
+ "input token limit",
721
+ "content is too long",
722
+ "request payload size exceeds",
723
+ # Generic patterns
724
+ "length exceeded",
725
+ "limit exceeded",
726
+ "too long for model",
727
+ "input exceeds",
728
+ "context limit",
729
+ }
730
+
731
+
732
+ def is_context_window_error(error: Exception) -> bool:
733
+ """Check if an exception indicates a context window/token limit error.
734
+
735
+ Examines the error message for common patterns indicating the prompt
736
+ exceeded the model's context window or token limit.
737
+
738
+ Args:
739
+ error: Exception to check
740
+
741
+ Returns:
742
+ True if the error appears to be a context window error
743
+ """
744
+ error_str = str(error).lower()
745
+
746
+ for pattern in CONTEXT_WINDOW_ERROR_PATTERNS:
747
+ if pattern in error_str:
748
+ return True
749
+
750
+ return False
751
+
752
+
753
+ def extract_token_counts(error_str: str) -> tuple[Optional[int], Optional[int]]:
754
+ """Extract token counts from error message if present.
755
+
756
+ Attempts to parse prompt_tokens and max_tokens from common error formats.
757
+
758
+ Args:
759
+ error_str: Error message string
760
+
761
+ Returns:
762
+ Tuple of (prompt_tokens, max_tokens), either may be None if not found
763
+ """
764
+ import re
765
+
766
+ prompt_tokens = None
767
+ max_tokens = None
768
+
769
+ # Pattern: "X tokens exceeds Y limit" or "X exceeds Y"
770
+ match = re.search(r"(\d{1,7})\s*tokens?\s*exceeds?\s*(?:the\s*)?(\d{1,7})", error_str.lower())
771
+ if match:
772
+ prompt_tokens = int(match.group(1))
773
+ max_tokens = int(match.group(2))
774
+ return prompt_tokens, max_tokens
775
+
776
+ # Pattern: "maximum context length is X tokens" with "Y tokens" input
777
+ max_match = re.search(r"maximum\s+(?:context\s+)?length\s+(?:is\s+)?(\d{1,7})", error_str.lower())
778
+ if max_match:
779
+ max_tokens = int(max_match.group(1))
780
+
781
+ # Pattern: "requested X tokens" or "contains X tokens"
782
+ prompt_match = re.search(r"(?:requested|contains|have|with)\s+(\d{1,7})\s*tokens?", error_str.lower())
783
+ if prompt_match:
784
+ prompt_tokens = int(prompt_match.group(1))
785
+
786
+ return prompt_tokens, max_tokens
787
+
788
+
789
+ def create_context_window_guidance(
790
+ prompt_tokens: Optional[int] = None,
791
+ max_tokens: Optional[int] = None,
792
+ provider_id: Optional[str] = None,
793
+ ) -> str:
794
+ """Generate actionable guidance for resolving context window errors.
795
+
796
+ Args:
797
+ prompt_tokens: Number of tokens in the prompt (if known)
798
+ max_tokens: Maximum tokens allowed (if known)
799
+ provider_id: Provider that raised the error
800
+
801
+ Returns:
802
+ Human-readable guidance string
803
+ """
804
+ parts = ["Context window limit exceeded."]
805
+
806
+ if prompt_tokens and max_tokens:
807
+ overflow = prompt_tokens - max_tokens
808
+ parts.append(f"Prompt ({prompt_tokens:,} tokens) exceeds limit ({max_tokens:,} tokens) by {overflow:,} tokens.")
809
+ elif prompt_tokens:
810
+ parts.append(f"Prompt contains approximately {prompt_tokens:,} tokens.")
811
+ elif max_tokens:
812
+ parts.append(f"Maximum context window is {max_tokens:,} tokens.")
813
+
814
+ parts.append("To resolve: (1) Reduce input size by excluding large content, "
815
+ "(2) Summarize or truncate long sections, "
816
+ "(3) Use a model with larger context window, "
817
+ "(4) Process content in smaller batches.")
818
+
819
+ return " ".join(parts)
820
+
821
+
699
822
  __all__ = [
700
823
  # Validation
701
824
  "ValidationError",
@@ -726,4 +849,9 @@ __all__ = [
726
849
  "reset_rate_limiters",
727
850
  # Execution wrapper
728
851
  "with_validation_and_resilience",
852
+ # Context window detection
853
+ "CONTEXT_WINDOW_ERROR_PATTERNS",
854
+ "is_context_window_error",
855
+ "extract_token_counts",
856
+ "create_context_window_guidance",
729
857
  ]
@@ -15,6 +15,7 @@ from filelock import FileLock
15
15
  from foundry_mcp.core.research.models import (
16
16
  ConsensusState,
17
17
  ConversationThread,
18
+ DeepResearchState,
18
19
  IdeationState,
19
20
  ThinkDeepState,
20
21
  ThreadStatus,
@@ -222,6 +223,11 @@ class ResearchMemory:
222
223
  model_class=ConsensusState,
223
224
  ttl_hours=ttl_hours,
224
225
  )
226
+ self._deep_research = FileStorageBackend(
227
+ storage_path=base_path / "deep_research",
228
+ model_class=DeepResearchState,
229
+ ttl_hours=ttl_hours,
230
+ )
225
231
 
226
232
  # =========================================================================
227
233
  # Thread operations (CHAT workflow)
@@ -394,6 +400,65 @@ class ResearchMemory:
394
400
 
395
401
  return states
396
402
 
403
+ # =========================================================================
404
+ # Deep research operations (DEEP_RESEARCH workflow)
405
+ # =========================================================================
406
+
407
+ def save_deep_research(self, deep_research: DeepResearchState) -> None:
408
+ """Save a deep research state."""
409
+ self._deep_research.save(deep_research.id, deep_research)
410
+
411
+ def load_deep_research(self, deep_research_id: str) -> Optional[DeepResearchState]:
412
+ """Load a deep research state by ID."""
413
+ return self._deep_research.load(deep_research_id)
414
+
415
+ def delete_deep_research(self, deep_research_id: str) -> bool:
416
+ """Delete a deep research state."""
417
+ return self._deep_research.delete(deep_research_id)
418
+
419
+ def list_deep_research(
420
+ self,
421
+ limit: Optional[int] = None,
422
+ cursor: Optional[str] = None,
423
+ completed_only: bool = False,
424
+ ) -> list[DeepResearchState]:
425
+ """List deep research states.
426
+
427
+ Args:
428
+ limit: Maximum number of states to return
429
+ cursor: Pagination cursor (research_id to start after)
430
+ completed_only: Filter to only completed research
431
+
432
+ Returns:
433
+ List of deep research states
434
+ """
435
+ states = []
436
+ for dr_id in self._deep_research.list_ids():
437
+ dr = self._deep_research.load(dr_id)
438
+ if dr is not None:
439
+ if completed_only and dr.completed_at is None:
440
+ continue
441
+ states.append(dr)
442
+
443
+ # Sort by updated_at descending
444
+ states.sort(key=lambda s: s.updated_at, reverse=True)
445
+
446
+ # Apply cursor-based pagination (skip until after cursor ID)
447
+ if cursor is not None:
448
+ cursor_found = False
449
+ filtered_states = []
450
+ for state in states:
451
+ if cursor_found:
452
+ filtered_states.append(state)
453
+ elif state.id == cursor:
454
+ cursor_found = True
455
+ states = filtered_states
456
+
457
+ if limit is not None:
458
+ states = states[:limit]
459
+
460
+ return states
461
+
397
462
  # =========================================================================
398
463
  # Maintenance operations
399
464
  # =========================================================================
@@ -409,6 +474,7 @@ class ResearchMemory:
409
474
  "investigations": self._investigations.cleanup_expired(),
410
475
  "ideations": self._ideations.cleanup_expired(),
411
476
  "consensus": self._consensus.cleanup_expired(),
477
+ "deep_research": self._deep_research.cleanup_expired(),
412
478
  }
413
479
 
414
480
  def get_storage_stats(self) -> dict[str, int]:
@@ -422,4 +488,41 @@ class ResearchMemory:
422
488
  "investigations": len(self._investigations.list_ids()),
423
489
  "ideations": len(self._ideations.list_ids()),
424
490
  "consensus": len(self._consensus.list_ids()),
491
+ "deep_research": len(self._deep_research.list_ids()),
425
492
  }
493
+
494
+ # =========================================================================
495
+ # Universal session lookup
496
+ # =========================================================================
497
+
498
+ def load_session_by_id(
499
+ self, session_id: str
500
+ ) -> Optional[
501
+ ConversationThread
502
+ | ThinkDeepState
503
+ | IdeationState
504
+ | ConsensusState
505
+ | DeepResearchState
506
+ ]:
507
+ """Load any research session by its ID prefix.
508
+
509
+ Determines the session type from the ID prefix and loads from
510
+ the appropriate storage backend.
511
+
512
+ Args:
513
+ session_id: Session ID with type prefix (e.g., "thread-xxx", "consensus-xxx")
514
+
515
+ Returns:
516
+ The session state object, or None if not found
517
+ """
518
+ if session_id.startswith("thread-"):
519
+ return self.load_thread(session_id)
520
+ elif session_id.startswith("investigation-"):
521
+ return self.load_investigation(session_id)
522
+ elif session_id.startswith("ideation-"):
523
+ return self.load_ideation(session_id)
524
+ elif session_id.startswith("consensus-"):
525
+ return self.load_consensus(session_id)
526
+ elif session_id.startswith("deepres-"):
527
+ return self.load_deep_research(session_id)
528
+ return None