foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -14,10 +14,7 @@ import os
14
14
  import subprocess
15
15
  from typing import Any, Dict, List, Optional, Protocol, Sequence
16
16
 
17
- logger = logging.getLogger(__name__)
18
-
19
17
  from .base import (
20
- ModelDescriptor,
21
18
  ProviderCapability,
22
19
  ProviderContext,
23
20
  ProviderExecutionError,
@@ -34,6 +31,8 @@ from .base import (
34
31
  from .detectors import detect_provider_availability
35
32
  from .registry import register_provider
36
33
 
34
+ logger = logging.getLogger(__name__)
35
+
37
36
  DEFAULT_BINARY = "claude"
38
37
  DEFAULT_TIMEOUT_SECONDS = 360
39
38
  AVAILABILITY_OVERRIDE_ENV = "CLAUDE_CLI_AVAILABLE_OVERRIDE"
@@ -181,34 +180,11 @@ def _default_runner(
181
180
  )
182
181
 
183
182
 
184
- CLAUDE_MODELS: List[ModelDescriptor] = [
185
- ModelDescriptor(
186
- id="sonnet",
187
- display_name="Sonnet 4.5",
188
- capabilities={
189
- ProviderCapability.TEXT,
190
- ProviderCapability.STREAMING,
191
- ProviderCapability.VISION,
192
- ProviderCapability.THINKING,
193
- },
194
- routing_hints={"tier": "default", "description": "Smartest model for daily use"},
195
- ),
196
- ModelDescriptor(
197
- id="haiku",
198
- display_name="Haiku 4.5",
199
- capabilities={
200
- ProviderCapability.TEXT,
201
- ProviderCapability.STREAMING,
202
- },
203
- routing_hints={"tier": "fast", "description": "Fastest model for simple tasks"},
204
- ),
205
- ]
206
-
207
183
  CLAUDE_METADATA = ProviderMetadata(
208
184
  provider_id="claude",
209
185
  display_name="Anthropic Claude CLI",
210
- models=CLAUDE_MODELS,
211
- default_model="sonnet",
186
+ models=[], # Model validation delegated to CLI
187
+ default_model="opus",
212
188
  capabilities={
213
189
  ProviderCapability.TEXT,
214
190
  ProviderCapability.STREAMING,
@@ -239,24 +215,7 @@ class ClaudeProvider(ProviderContext):
239
215
  self._binary = binary or os.environ.get(CUSTOM_BINARY_ENV, DEFAULT_BINARY)
240
216
  self._env = env
241
217
  self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
242
- self._model = self._ensure_model(model or metadata.default_model or self._first_model_id())
243
-
244
- def _first_model_id(self) -> str:
245
- if not self.metadata.models:
246
- raise ProviderUnavailableError(
247
- "Claude provider metadata is missing model descriptors.",
248
- provider=self.metadata.provider_id,
249
- )
250
- return self.metadata.models[0].id
251
-
252
- def _ensure_model(self, candidate: str) -> str:
253
- available = {descriptor.id for descriptor in self.metadata.models}
254
- if candidate not in available:
255
- raise ProviderExecutionError(
256
- f"Unsupported Claude model '{candidate}'. Available: {', '.join(sorted(available))}",
257
- provider=self.metadata.provider_id,
258
- )
259
- return candidate
218
+ self._model = model or metadata.default_model or "opus"
260
219
 
261
220
  def _validate_request(self, request: ProviderRequest) -> None:
262
221
  """Validate and normalize request, ignoring unsupported parameters."""
@@ -355,9 +314,14 @@ class ClaudeProvider(ProviderContext):
355
314
  )
356
315
 
357
316
  def _resolve_model(self, request: ProviderRequest) -> str:
317
+ # 1. Check request.model first (from ProviderRequest constructor)
318
+ if request.model:
319
+ return str(request.model)
320
+ # 2. Fallback to metadata override (legacy/alternative path)
358
321
  model_override = request.metadata.get("model") if request.metadata else None
359
322
  if model_override:
360
- return self._ensure_model(str(model_override))
323
+ return str(model_override)
324
+ # 3. Fallback to instance default
361
325
  return self._model
362
326
 
363
327
  def _emit_stream_if_requested(self, content: str, *, stream: bool) -> None:
@@ -365,6 +329,36 @@ class ClaudeProvider(ProviderContext):
365
329
  return
366
330
  self._emit_stream_chunk(StreamChunk(content=content, index=0))
367
331
 
332
+ def _extract_error_from_json(self, stdout: str) -> Optional[str]:
333
+ """
334
+ Extract error message from Claude CLI JSON output.
335
+
336
+ Claude CLI outputs errors as JSON with is_error: true and error in 'result' field.
337
+ Example: {"type":"result","is_error":true,"result":"API Error: 404 {...}"}
338
+ """
339
+ if not stdout:
340
+ return None
341
+
342
+ try:
343
+ payload = json.loads(stdout.strip())
344
+ except json.JSONDecodeError:
345
+ return None
346
+
347
+ # Check for error indicator
348
+ if payload.get("is_error"):
349
+ result = payload.get("result", "")
350
+ if result:
351
+ return str(result)
352
+
353
+ # Also check for explicit error field
354
+ error = payload.get("error")
355
+ if error:
356
+ if isinstance(error, dict):
357
+ return error.get("message") or str(error)
358
+ return str(error)
359
+
360
+ return None
361
+
368
362
  def _execute(self, request: ProviderRequest) -> ProviderResult:
369
363
  self._validate_request(request)
370
364
  model = self._resolve_model(request)
@@ -375,8 +369,17 @@ class ClaudeProvider(ProviderContext):
375
369
  if completed.returncode != 0:
376
370
  stderr = (completed.stderr or "").strip()
377
371
  logger.debug(f"Claude CLI stderr: {stderr or 'no stderr'}")
372
+
373
+ # Extract error from JSON stdout (Claude outputs errors there with is_error: true)
374
+ json_error = self._extract_error_from_json(completed.stdout)
375
+
376
+ error_msg = f"Claude CLI exited with code {completed.returncode}"
377
+ if json_error:
378
+ error_msg += f": {json_error[:500]}"
379
+ elif stderr:
380
+ error_msg += f": {stderr[:500]}"
378
381
  raise ProviderExecutionError(
379
- f"Claude CLI exited with code {completed.returncode}",
382
+ error_msg,
380
383
  provider=self.metadata.provider_id,
381
384
  )
382
385
 
@@ -15,10 +15,7 @@ import os
15
15
  import subprocess
16
16
  from typing import Any, Dict, List, Optional, Protocol, Sequence, Tuple
17
17
 
18
- logger = logging.getLogger(__name__)
19
-
20
18
  from .base import (
21
- ModelDescriptor,
22
19
  ProviderCapability,
23
20
  ProviderContext,
24
21
  ProviderExecutionError,
@@ -35,6 +32,8 @@ from .base import (
35
32
  from .detectors import detect_provider_availability
36
33
  from .registry import register_provider
37
34
 
35
+ logger = logging.getLogger(__name__)
36
+
38
37
  DEFAULT_BINARY = "codex"
39
38
  DEFAULT_TIMEOUT_SECONDS = 360
40
39
  AVAILABILITY_OVERRIDE_ENV = "CODEX_CLI_AVAILABLE_OVERRIDE"
@@ -228,44 +227,11 @@ def _default_runner(
228
227
  )
229
228
 
230
229
 
231
- CODEX_MODELS: List[ModelDescriptor] = [
232
- ModelDescriptor(
233
- id="gpt-5.1-codex",
234
- display_name="GPT-5.1 Codex",
235
- capabilities={
236
- ProviderCapability.TEXT,
237
- ProviderCapability.STREAMING,
238
- ProviderCapability.FUNCTION_CALLING,
239
- },
240
- routing_hints={"tier": "primary", "optimized_for": "codex"},
241
- ),
242
- ModelDescriptor(
243
- id="gpt-5.1-codex-mini",
244
- display_name="GPT-5.1 Codex Mini",
245
- capabilities={
246
- ProviderCapability.TEXT,
247
- ProviderCapability.STREAMING,
248
- ProviderCapability.FUNCTION_CALLING,
249
- },
250
- routing_hints={"tier": "fast", "optimized_for": "codex"},
251
- ),
252
- ModelDescriptor(
253
- id="gpt-5.1",
254
- display_name="GPT-5.1",
255
- capabilities={
256
- ProviderCapability.TEXT,
257
- ProviderCapability.STREAMING,
258
- ProviderCapability.FUNCTION_CALLING,
259
- },
260
- routing_hints={"tier": "general"},
261
- ),
262
- ]
263
-
264
230
  CODEX_METADATA = ProviderMetadata(
265
231
  provider_id="codex",
266
232
  display_name="OpenAI Codex CLI",
267
- models=CODEX_MODELS,
268
- default_model="gpt-5.1-codex",
233
+ models=[], # Model validation delegated to CLI
234
+ default_model="gpt-5.2",
269
235
  capabilities={ProviderCapability.TEXT, ProviderCapability.STREAMING, ProviderCapability.FUNCTION_CALLING},
270
236
  security_flags={"writes_allowed": False, "read_only": True, "sandbox": "read-only"},
271
237
  extra={
@@ -301,7 +267,7 @@ class CodexProvider(ProviderContext):
301
267
  self._binary = binary or os.environ.get(CUSTOM_BINARY_ENV, DEFAULT_BINARY)
302
268
  self._env = self._prepare_subprocess_env(env)
303
269
  self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
304
- self._model = self._ensure_model(model or metadata.default_model or self._first_model_id())
270
+ self._model = model or metadata.default_model or "gpt-5.2"
305
271
 
306
272
  def _prepare_subprocess_env(self, custom_env: Optional[Dict[str, str]]) -> Dict[str, str]:
307
273
  """
@@ -323,23 +289,6 @@ class CodexProvider(ProviderContext):
323
289
 
324
290
  return subprocess_env
325
291
 
326
- def _first_model_id(self) -> str:
327
- if not self.metadata.models:
328
- raise ProviderUnavailableError(
329
- "Codex provider metadata is missing model descriptors.",
330
- provider=self.metadata.provider_id,
331
- )
332
- return self.metadata.models[0].id
333
-
334
- def _ensure_model(self, candidate: str) -> str:
335
- available = {descriptor.id for descriptor in self.metadata.models}
336
- if candidate not in available:
337
- raise ProviderExecutionError(
338
- f"Unsupported Codex model '{candidate}'. Available: {', '.join(sorted(available))}",
339
- provider=self.metadata.provider_id,
340
- )
341
- return candidate
342
-
343
292
  def _validate_request(self, request: ProviderRequest) -> None:
344
293
  """Validate and normalize request, ignoring unsupported parameters."""
345
294
  unsupported: List[str] = []
@@ -383,7 +332,8 @@ class CodexProvider(ProviderContext):
383
332
 
384
333
  def _build_command(self, model: str, prompt: str, attachments: List[str]) -> List[str]:
385
334
  # Note: codex CLI requires --json flag for JSONL output (non-interactive mode)
386
- command = [self._binary, "exec", "--sandbox", "read-only", "--json"]
335
+ # --skip-git-repo-check allows running outside trusted git directories
336
+ command = [self._binary, "exec", "--sandbox", "read-only", "--skip-git-repo-check", "--json"]
387
337
  if model:
388
338
  command.extend(["-m", model])
389
339
  for path in attachments:
@@ -529,10 +479,61 @@ class CodexProvider(ProviderContext):
529
479
 
530
480
  return final_content, usage, metadata, reported_model
531
481
 
482
+ def _extract_error_from_jsonl(self, stdout: str) -> Optional[str]:
483
+ """
484
+ Extract error message from Codex JSONL output.
485
+
486
+ Codex CLI outputs errors as JSONL events to stdout, not stderr.
487
+ Look for {"type":"error"} or {"type":"turn.failed"} events.
488
+ """
489
+ if not stdout:
490
+ return None
491
+
492
+ errors: List[str] = []
493
+ for line in stdout.strip().splitlines():
494
+ if not line.strip():
495
+ continue
496
+ try:
497
+ event = json.loads(line)
498
+ except json.JSONDecodeError:
499
+ continue
500
+
501
+ event_type = str(event.get("type", "")).lower()
502
+
503
+ # Extract from {"type":"error","message":"..."}
504
+ if event_type == "error":
505
+ msg = event.get("message", "")
506
+ # Skip reconnection messages, get the final error
507
+ if msg and not msg.startswith("Reconnecting"):
508
+ errors.append(msg)
509
+
510
+ # Extract from {"type":"turn.failed","error":{"message":"..."}}
511
+ elif event_type == "turn.failed":
512
+ error_obj = event.get("error", {})
513
+ if isinstance(error_obj, dict):
514
+ msg = error_obj.get("message", "")
515
+ if msg:
516
+ errors.append(msg)
517
+
518
+ # Return the last (most specific) error, or join if multiple
519
+ if errors:
520
+ # Deduplicate while preserving order
521
+ seen = set()
522
+ unique_errors = []
523
+ for e in errors:
524
+ if e not in seen:
525
+ seen.add(e)
526
+ unique_errors.append(e)
527
+ return "; ".join(unique_errors)
528
+ return None
529
+
532
530
  def _execute(self, request: ProviderRequest) -> ProviderResult:
533
531
  self._validate_request(request)
534
- model = self._ensure_model(
535
- str(request.metadata.get("model")) if request.metadata and "model" in request.metadata else self._model
532
+ # Resolve model: request.model takes precedence, then metadata, then instance default
533
+ model = (
534
+ request.model
535
+ or (str(request.metadata.get("model")) if request.metadata and "model" in request.metadata else None)
536
+ or self._model
536
537
  )
537
538
  prompt = self._build_prompt(request)
538
539
  attachments = self._normalize_attachment_paths(request)
@@ -543,8 +544,17 @@ class CodexProvider(ProviderContext):
543
544
  if completed.returncode != 0:
544
545
  stderr = (completed.stderr or "").strip()
545
546
  logger.debug(f"Codex CLI stderr: {stderr or 'no stderr'}")
547
+
548
+ # Extract error message from JSONL stdout (Codex outputs errors there, not stderr)
549
+ jsonl_error = self._extract_error_from_jsonl(completed.stdout)
550
+
551
+ error_msg = f"Codex CLI exited with code {completed.returncode}"
552
+ if jsonl_error:
553
+ error_msg += f": {jsonl_error[:500]}"
554
+ elif stderr:
555
+ error_msg += f": {stderr[:500]}"
546
556
  raise ProviderExecutionError(
547
- f"Codex CLI exited with code {completed.returncode}",
557
+ error_msg,
548
558
  provider=self.metadata.provider_id,
549
559
  )
550
560
 
@@ -20,7 +20,6 @@ from typing import Any, Dict, List, Optional, Protocol, Sequence, Tuple
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
22
  from .base import (
23
- ModelDescriptor,
24
23
  ProviderCapability,
25
24
  ProviderContext,
26
25
  ProviderExecutionError,
@@ -188,33 +187,10 @@ def _default_runner(
188
187
  )
189
188
 
190
189
 
191
- CURSOR_MODELS: List[ModelDescriptor] = [
192
- ModelDescriptor(
193
- id="composer-1",
194
- display_name="Composer-1",
195
- capabilities={
196
- ProviderCapability.TEXT,
197
- ProviderCapability.FUNCTION_CALLING,
198
- ProviderCapability.STREAMING,
199
- },
200
- routing_hints={"tier": "default"},
201
- ),
202
- ModelDescriptor(
203
- id="gpt-5.1-codex",
204
- display_name="GPT-5.1 Codex",
205
- capabilities={
206
- ProviderCapability.TEXT,
207
- ProviderCapability.FUNCTION_CALLING,
208
- ProviderCapability.STREAMING,
209
- },
210
- routing_hints={"tier": "codex"},
211
- ),
212
- ]
213
-
214
190
  CURSOR_METADATA = ProviderMetadata(
215
191
  provider_id="cursor-agent",
216
192
  display_name="Cursor Agent CLI",
217
- models=CURSOR_MODELS,
193
+ models=[], # Model validation delegated to CLI
218
194
  default_model="composer-1",
219
195
  capabilities={ProviderCapability.TEXT, ProviderCapability.FUNCTION_CALLING, ProviderCapability.STREAMING},
220
196
  security_flags={"writes_allowed": False, "read_only": True},
@@ -246,7 +222,7 @@ class CursorAgentProvider(ProviderContext):
246
222
  self._binary = binary or os.environ.get(CUSTOM_BINARY_ENV, DEFAULT_BINARY)
247
223
  self._env = env
248
224
  self._timeout = timeout or DEFAULT_TIMEOUT_SECONDS
249
- self._model = self._ensure_model(model or metadata.default_model or self._first_model_id())
225
+ self._model = model or metadata.default_model or "composer-1"
250
226
  self._config_backup_path: Optional[Path] = None
251
227
  self._original_config_existed: bool = False
252
228
  self._cleanup_done: bool = False
@@ -255,23 +231,6 @@ class CursorAgentProvider(ProviderContext):
255
231
  """Clean up temporary config directory on provider destruction."""
256
232
  self._cleanup_config_file()
257
233
 
258
- def _first_model_id(self) -> str:
259
- if not self.metadata.models:
260
- raise ProviderUnavailableError(
261
- "Cursor Agent metadata is missing model descriptors.",
262
- provider=self.metadata.provider_id,
263
- )
264
- return self.metadata.models[0].id
265
-
266
- def _ensure_model(self, candidate: str) -> str:
267
- available = {descriptor.id for descriptor in self.metadata.models}
268
- if candidate not in available:
269
- raise ProviderExecutionError(
270
- f"Unsupported Cursor Agent model '{candidate}'. Available: {', '.join(sorted(available))}",
271
- provider=self.metadata.provider_id,
272
- )
273
- return candidate
274
-
275
234
  def _create_readonly_config(self) -> Path:
276
235
  """
277
236
  Backup and replace ~/.cursor/cli-config.json with read-only permissions.
@@ -478,16 +437,32 @@ class CursorAgentProvider(ProviderContext):
478
437
  return retry_process, False
479
438
 
480
439
  stderr_text = (retry_process.stderr or stderr_text).strip()
440
+ # Cursor Agent outputs errors to stdout as plain text, not stderr
441
+ stdout_text = (retry_process.stdout or "").strip()
481
442
  logger.debug(f"Cursor Agent CLI stderr (retry): {stderr_text or 'no stderr'}")
443
+ error_msg = f"Cursor Agent CLI exited with code {retry_process.returncode}"
444
+ if stdout_text and not stdout_text.startswith("{"):
445
+ # Plain text error in stdout (not JSON response)
446
+ error_msg += f": {stdout_text[:500]}"
447
+ elif stderr_text:
448
+ error_msg += f": {stderr_text[:500]}"
482
449
  raise ProviderExecutionError(
483
- f"Cursor Agent CLI exited with code {retry_process.returncode}",
450
+ error_msg,
484
451
  provider=self.metadata.provider_id,
485
452
  )
486
453
 
487
454
  stderr_text = (completed.stderr or "").strip()
455
+ # Cursor Agent outputs errors to stdout as plain text, not stderr
456
+ stdout_text = (completed.stdout or "").strip()
488
457
  logger.debug(f"Cursor Agent CLI stderr: {stderr_text or 'no stderr'}")
458
+ error_msg = f"Cursor Agent CLI exited with code {completed.returncode}"
459
+ if stdout_text and not stdout_text.startswith("{"):
460
+ # Plain text error in stdout (not JSON response)
461
+ error_msg += f": {stdout_text[:500]}"
462
+ elif stderr_text:
463
+ error_msg += f": {stderr_text[:500]}"
489
464
  raise ProviderExecutionError(
490
- f"Cursor Agent CLI exited with code {completed.returncode}",
465
+ error_msg,
491
466
  provider=self.metadata.provider_id,
492
467
  )
493
468
 
@@ -533,8 +508,11 @@ class CursorAgentProvider(ProviderContext):
533
508
  provider=self.metadata.provider_id,
534
509
  )
535
510
 
536
- model = self._ensure_model(
537
- str(request.metadata.get("model")) if request.metadata and "model" in request.metadata else self._model
511
+ # Resolve model: request.model takes precedence, then metadata, then instance default
512
+ model = (
513
+ request.model
514
+ or (str(request.metadata.get("model")) if request.metadata and "model" in request.metadata else None)
515
+ or self._model
538
516
  )
539
517
 
540
518
  # Backup and replace HOME config with read-only version
@@ -31,11 +31,23 @@ import logging
31
31
  import os
32
32
  import shutil
33
33
  import subprocess
34
+ import time
34
35
  from dataclasses import dataclass, field
35
- from typing import Dict, Iterable, Optional, Sequence
36
+ from typing import Dict, Iterable, Optional, Sequence, Tuple
36
37
 
37
38
  logger = logging.getLogger(__name__)
38
39
 
40
+ # Cache for provider availability: {provider_id: (is_available, timestamp)}
41
+ _AVAILABILITY_CACHE: Dict[str, Tuple[bool, float]] = {}
42
+
43
+ def _get_cache_ttl() -> float:
44
+ """Get cache TTL from config or default to 3600s."""
45
+ try:
46
+ from foundry_mcp.config import get_config
47
+ return float(get_config().providers.get("availability_cache_ttl", 3600))
48
+ except Exception:
49
+ return 3600.0
50
+
39
51
  # Environment variable for test mode (bypasses real CLI probes)
40
52
  _TEST_MODE_ENV = "FOUNDRY_PROVIDER_TEST_MODE"
41
53
 
@@ -173,13 +185,14 @@ class ProviderDetector:
173
185
 
174
186
  def is_available(self, *, use_probe: bool = True) -> bool:
175
187
  """
176
- Check whether this provider is available.
188
+ Check whether this provider is available (with caching).
177
189
 
178
190
  Resolution order:
179
- 1. Check override_env (if set, returns its boolean value)
191
+ 1. Check override_env (if set, returns its boolean value - takes precedence)
180
192
  2. In test mode, return False (no real CLI available)
181
- 3. Resolve binary via PATH
182
- 4. Optionally run health probe
193
+ 3. Check cache (if valid)
194
+ 4. Resolve binary via PATH
195
+ 5. Optionally run health probe
183
196
 
184
197
  Args:
185
198
  use_probe: When True, run health probe after finding binary.
@@ -188,7 +201,7 @@ class ProviderDetector:
188
201
  Returns:
189
202
  True if provider is available, False otherwise
190
203
  """
191
- # Check environment override first
204
+ # Check environment override first (takes precedence over cache)
192
205
  if self.override_env:
193
206
  override = _coerce_bool(os.environ.get(self.override_env))
194
207
  if override is not None:
@@ -207,6 +220,14 @@ class ProviderDetector:
207
220
  )
208
221
  return False
209
222
 
223
+ # Check cache (only for non-overridden, non-test-mode cases)
224
+ cache_key = f"{self.provider_id}:{use_probe}"
225
+ cached = _AVAILABILITY_CACHE.get(cache_key)
226
+ if cached is not None:
227
+ is_avail, cached_time = cached
228
+ if time.time() - cached_time < _get_cache_ttl():
229
+ return is_avail
230
+
210
231
  # Resolve binary path
211
232
  executable = self.resolve_binary()
212
233
  if not executable:
@@ -214,14 +235,18 @@ class ProviderDetector:
214
235
  "Provider '%s' unavailable (binary not found in PATH)",
215
236
  self.provider_id,
216
237
  )
238
+ _AVAILABILITY_CACHE[cache_key] = (False, time.time())
217
239
  return False
218
240
 
219
241
  # Skip probe if not requested
220
242
  if not use_probe:
243
+ _AVAILABILITY_CACHE[cache_key] = (True, time.time())
221
244
  return True
222
245
 
223
246
  # Run health probe
224
- return self._run_probe(executable)
247
+ result = self._run_probe(executable)
248
+ _AVAILABILITY_CACHE[cache_key] = (result, time.time())
249
+ return result
225
250
 
226
251
  def get_unavailability_reason(self, *, use_probe: bool = True) -> Optional[str]:
227
252
  """
@@ -468,7 +493,9 @@ def reset_detectors() -> None:
468
493
  Reset detectors to the default set.
469
494
 
470
495
  Primarily used by tests to restore a clean state.
496
+ Also clears the availability cache to ensure fresh detection.
471
497
  """
498
+ _AVAILABILITY_CACHE.clear()
472
499
  _reset_default_detectors()
473
500
 
474
501