coding-cli-runtime 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,749 @@
1
+ """Shared provider capability metadata for wrappers and helper tools."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ import os
8
+ from collections.abc import Callable
9
+ from dataclasses import dataclass, field
10
+ from functools import lru_cache
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ from .json_io import load_packaged_json_object
15
+ from .reasoning import (
16
+ CLAUDE_ADAPTIVE_THINKING_MODELS,
17
+ CLAUDE_DEFAULT_THINKING_TOKENS,
18
+ CLAUDE_EFFORT_CHOICES,
19
+ )
20
+
21
+ _logger = logging.getLogger(__name__)
22
+
23
+ COPILOT_BASELINE_RESOURCE = "copilot_reasoning_baseline.json"
24
+ _CONFIG_DIR_ENV = "CODING_CLI_RUNTIME_CONFIG_DIR"
25
+ _CODEX_CACHE_PATH = Path("~/.codex/models_cache.json").expanduser()
26
+
27
+
28
+ @dataclass(frozen=True)
29
+ class ChoiceSpec:
30
+ value: str
31
+ label: str | None = None
32
+ description: str | None = None
33
+ metadata: dict[str, Any] = field(default_factory=dict)
34
+
35
+ def to_dict(self) -> dict[str, Any]:
36
+ payload: dict[str, Any] = {"value": self.value}
37
+ if self.label is not None:
38
+ payload["label"] = self.label
39
+ if self.description is not None:
40
+ payload["description"] = self.description
41
+ if self.metadata:
42
+ payload["metadata"] = self.metadata
43
+ return payload
44
+
45
+
46
+ @dataclass(frozen=True)
47
+ class ControlSpec:
48
+ name: str
49
+ kind: str
50
+ label: str
51
+ description: str | None = None
52
+ choices: tuple[ChoiceSpec, ...] = ()
53
+ default: str | int | float | bool | None = None
54
+ required: bool = False
55
+ editable: bool = True
56
+ advanced: bool = False
57
+ metadata: dict[str, Any] = field(default_factory=dict)
58
+
59
+ def choice_values(self) -> tuple[str, ...]:
60
+ return tuple(choice.value for choice in self.choices)
61
+
62
+ def to_dict(self) -> dict[str, Any]:
63
+ payload: dict[str, Any] = {
64
+ "name": self.name,
65
+ "kind": self.kind,
66
+ "label": self.label,
67
+ "editable": self.editable,
68
+ "advanced": self.advanced,
69
+ "required": self.required,
70
+ }
71
+ if self.description is not None:
72
+ payload["description"] = self.description
73
+ if self.default is not None:
74
+ payload["default"] = self.default
75
+ if self.choices:
76
+ payload["choices"] = [choice.to_dict() for choice in self.choices]
77
+ if self.metadata:
78
+ payload["metadata"] = self.metadata
79
+ return payload
80
+
81
+
82
+ @dataclass(frozen=True)
83
+ class ModelSpec:
84
+ name: str
85
+ description: str | None = None
86
+ controls: tuple[ControlSpec, ...] = ()
87
+ metadata: dict[str, Any] = field(default_factory=dict)
88
+
89
+ def control(self, name: str) -> ControlSpec | None:
90
+ return next((control for control in self.controls if control.name == name), None)
91
+
92
+ def to_dict(self) -> dict[str, Any]:
93
+ payload: dict[str, Any] = {"name": self.name}
94
+ if self.description is not None:
95
+ payload["description"] = self.description
96
+ if self.controls:
97
+ payload["controls"] = [control.to_dict() for control in self.controls]
98
+ if self.metadata:
99
+ payload["metadata"] = self.metadata
100
+ return payload
101
+
102
+
103
+ @dataclass(frozen=True)
104
+ class ProviderSpec:
105
+ provider_id: str
106
+ label: str
107
+ model_source: str
108
+ default_model: str | None
109
+ default_concurrency: int
110
+ models: tuple[ModelSpec, ...]
111
+ controls: tuple[ControlSpec, ...] = ()
112
+ notes: tuple[str, ...] = ()
113
+ allow_custom_model_override: bool = False
114
+ metadata: dict[str, Any] = field(default_factory=dict)
115
+
116
+ @property
117
+ def model_names(self) -> tuple[str, ...]:
118
+ return tuple(model.name for model in self.models)
119
+
120
+ def model(self, name: str) -> ModelSpec | None:
121
+ return next((model for model in self.models if model.name == name), None)
122
+
123
+ def control(self, name: str) -> ControlSpec | None:
124
+ return next((control for control in self.controls if control.name == name), None)
125
+
126
+ def to_dict(self) -> dict[str, Any]:
127
+ payload: dict[str, Any] = {
128
+ "providerId": self.provider_id,
129
+ "label": self.label,
130
+ "modelSource": self.model_source,
131
+ "defaultConcurrency": self.default_concurrency,
132
+ "models": [model.to_dict() for model in self.models],
133
+ "allowCustomModelOverride": self.allow_custom_model_override,
134
+ }
135
+ if self.default_model is not None:
136
+ payload["defaultModel"] = self.default_model
137
+ if self.controls:
138
+ payload["controls"] = [control.to_dict() for control in self.controls]
139
+ if self.notes:
140
+ payload["notes"] = list(self.notes)
141
+ if self.metadata:
142
+ payload["metadata"] = self.metadata
143
+ return payload
144
+
145
+
146
+ def _choice_specs(*values: str) -> tuple[ChoiceSpec, ...]:
147
+ return tuple(ChoiceSpec(value=value) for value in values)
148
+
149
+
150
+ def _codex_model_spec(
151
+ *,
152
+ name: str,
153
+ description: str,
154
+ default_reasoning: str,
155
+ reasoning_levels: tuple[str, ...],
156
+ visibility: str,
157
+ priority: int,
158
+ ) -> ModelSpec:
159
+ return ModelSpec(
160
+ name=name,
161
+ description=description,
162
+ controls=(
163
+ ControlSpec(
164
+ name="model_reasoning",
165
+ kind="choice",
166
+ label="Reasoning effort",
167
+ required=False,
168
+ default=default_reasoning,
169
+ choices=_choice_specs(*reasoning_levels),
170
+ ),
171
+ ),
172
+ metadata={
173
+ "catalogPriority": priority,
174
+ "catalogVisibility": visibility,
175
+ },
176
+ )
177
+
178
+
179
+ # Mirrors the local Codex CLI model catalog (`~/.codex/models_cache.json`) as of
180
+ # codex-cli 0.111.0 / catalog fetch 2026-03-08. `gpt-5.3-codex-spark` is
181
+ # intentionally excluded because the CLI marks it `supported_in_api=false`.
182
+ CODEX_MODEL_SPECS = (
183
+ _codex_model_spec(
184
+ name="gpt-5.3-codex",
185
+ description="Latest frontier agentic coding model.",
186
+ default_reasoning="medium",
187
+ reasoning_levels=("low", "medium", "high", "xhigh"),
188
+ visibility="list",
189
+ priority=0,
190
+ ),
191
+ _codex_model_spec(
192
+ name="gpt-5.4",
193
+ description="Latest frontier agentic coding model.",
194
+ default_reasoning="medium",
195
+ reasoning_levels=("low", "medium", "high", "xhigh"),
196
+ visibility="list",
197
+ priority=0,
198
+ ),
199
+ _codex_model_spec(
200
+ name="gpt-5.2-codex",
201
+ description="Frontier agentic coding model.",
202
+ default_reasoning="medium",
203
+ reasoning_levels=("low", "medium", "high", "xhigh"),
204
+ visibility="list",
205
+ priority=3,
206
+ ),
207
+ _codex_model_spec(
208
+ name="gpt-5.1-codex-max",
209
+ description="Codex-optimized flagship for deep and fast reasoning.",
210
+ default_reasoning="medium",
211
+ reasoning_levels=("low", "medium", "high", "xhigh"),
212
+ visibility="list",
213
+ priority=4,
214
+ ),
215
+ _codex_model_spec(
216
+ name="gpt-5.1-codex",
217
+ description="Optimized for codex.",
218
+ default_reasoning="medium",
219
+ reasoning_levels=("low", "medium", "high"),
220
+ visibility="hide",
221
+ priority=5,
222
+ ),
223
+ _codex_model_spec(
224
+ name="gpt-5.2",
225
+ description=(
226
+ "Latest frontier model with improvements across knowledge, reasoning and coding"
227
+ ),
228
+ default_reasoning="medium",
229
+ reasoning_levels=("low", "medium", "high", "xhigh"),
230
+ visibility="list",
231
+ priority=6,
232
+ ),
233
+ _codex_model_spec(
234
+ name="gpt-5.1",
235
+ description="Broad world knowledge with strong general reasoning.",
236
+ default_reasoning="medium",
237
+ reasoning_levels=("low", "medium", "high"),
238
+ visibility="hide",
239
+ priority=7,
240
+ ),
241
+ _codex_model_spec(
242
+ name="gpt-5-codex",
243
+ description="Optimized for codex.",
244
+ default_reasoning="medium",
245
+ reasoning_levels=("low", "medium", "high"),
246
+ visibility="hide",
247
+ priority=10,
248
+ ),
249
+ _codex_model_spec(
250
+ name="gpt-5",
251
+ description="Broad world knowledge with strong general reasoning.",
252
+ default_reasoning="medium",
253
+ reasoning_levels=("minimal", "low", "medium", "high"),
254
+ visibility="hide",
255
+ priority=11,
256
+ ),
257
+ _codex_model_spec(
258
+ name="gpt-5.1-codex-mini",
259
+ description="Optimized for codex. Cheaper, faster, but less capable.",
260
+ default_reasoning="medium",
261
+ reasoning_levels=("medium", "high"),
262
+ visibility="list",
263
+ priority=12,
264
+ ),
265
+ _codex_model_spec(
266
+ name="gpt-5-codex-mini",
267
+ description="Optimized for codex. Cheaper, faster, but less capable.",
268
+ default_reasoning="medium",
269
+ reasoning_levels=("medium", "high"),
270
+ visibility="hide",
271
+ priority=13,
272
+ ),
273
+ )
274
+
275
+ CLAUDE_OUTPUT_FORMAT_CHOICES = (
276
+ ChoiceSpec(value="text", metadata={"suffix": ".txt"}),
277
+ ChoiceSpec(value="json", metadata={"suffix": ".json"}),
278
+ ChoiceSpec(value="stream-json", metadata={"suffix": ".jsonl"}),
279
+ )
280
+ CLAUDE_PERMISSION_MODE_CHOICES = tuple(
281
+ ChoiceSpec(value=value) for value in ("acceptEdits", "bypassPermissions", "default", "plan")
282
+ )
283
+ CLAUDE_REASONING_MODE_CHOICES = tuple(
284
+ ChoiceSpec(value=value) for value in ("script_default", "effort", "thinking_tokens", "disabled")
285
+ )
286
+ CLAUDE_MODEL_SPECS = tuple(
287
+ ModelSpec(
288
+ name=model_name,
289
+ controls=(
290
+ ControlSpec(
291
+ name="effort",
292
+ kind="choice",
293
+ label="Adaptive thinking effort",
294
+ choices=tuple(ChoiceSpec(value=value) for value in CLAUDE_EFFORT_CHOICES),
295
+ metadata={"supported": model_name.startswith(CLAUDE_ADAPTIVE_THINKING_MODELS)},
296
+ ),
297
+ ControlSpec(
298
+ name="thinking_tokens",
299
+ kind="int",
300
+ label="Thinking tokens",
301
+ default=CLAUDE_DEFAULT_THINKING_TOKENS,
302
+ ),
303
+ ),
304
+ metadata={
305
+ "adaptiveThinking": model_name.startswith(CLAUDE_ADAPTIVE_THINKING_MODELS),
306
+ "defaultThinkingTokens": CLAUDE_DEFAULT_THINKING_TOKENS,
307
+ },
308
+ )
309
+ for model_name in (
310
+ "claude-haiku-4-5-20251001",
311
+ "claude-sonnet-4-5-20250929",
312
+ "claude-sonnet-4-6",
313
+ "claude-opus-4-1-20250805",
314
+ "claude-opus-4-6",
315
+ )
316
+ )
317
+
318
+ GEMINI_MODEL_SPECS = tuple(
319
+ ModelSpec(name=model_name)
320
+ for model_name in (
321
+ "gemini-3-flash-preview",
322
+ "gemini-3-pro-preview",
323
+ "gemini-3.1-pro-preview",
324
+ )
325
+ )
326
+
327
+
328
+ def _load_copilot_model_specs(
329
+ resource_name: str = COPILOT_BASELINE_RESOURCE,
330
+ ) -> tuple[ModelSpec, ...]:
331
+ payload = load_packaged_json_object(resource_name)
332
+ model_payload = payload.get("models")
333
+ if not isinstance(model_payload, dict):
334
+ raise ValueError(f"Invalid Copilot baseline model payload: {resource_name}")
335
+ models: list[ModelSpec] = []
336
+ for model_name, raw in model_payload.items():
337
+ if not isinstance(raw, dict):
338
+ continue
339
+ schema = str(raw.get("schema", "")).strip() or "none"
340
+ value = raw.get("value")
341
+ default_reasoning = raw.get("default_reasoning")
342
+ metadata: dict[str, Any] = {"reasoningSchema": schema}
343
+ controls: tuple[ControlSpec, ...] = ()
344
+ if value is not None:
345
+ metadata["reasoningValue"] = str(value)
346
+ if default_reasoning is not None:
347
+ metadata["defaultReasoning"] = str(default_reasoning)
348
+ supported_reasoning_raw = raw.get("supported_reasoning_efforts")
349
+ if isinstance(supported_reasoning_raw, (list, tuple)):
350
+ supported_reasoning = tuple(
351
+ str(item).strip().lower() for item in supported_reasoning_raw if str(item).strip()
352
+ )
353
+ if schema == "reasoning_effort" and supported_reasoning:
354
+ control_default = None
355
+ if default_reasoning is not None:
356
+ control_default = str(default_reasoning)
357
+ elif value is not None:
358
+ control_default = str(value)
359
+ controls = (
360
+ ControlSpec(
361
+ name="model_reasoning",
362
+ kind="choice",
363
+ label="Reasoning effort",
364
+ description="Passed through Copilot CLI via --reasoning-effort.",
365
+ required=False,
366
+ default=control_default,
367
+ choices=_choice_specs(*supported_reasoning),
368
+ ),
369
+ )
370
+ models.append(ModelSpec(name=model_name, controls=controls, metadata=metadata))
371
+ return tuple(models)
372
+
373
+
374
+ def _config_dir() -> Path:
375
+ """Return the user config directory for provider overrides."""
376
+ env = os.getenv(_CONFIG_DIR_ENV)
377
+ if env:
378
+ return Path(env).expanduser()
379
+ return Path("~/.config/coding-cli-runtime").expanduser()
380
+
381
+
382
+ def _parse_model_specs_from_raw(models_raw: list[Any]) -> tuple[ModelSpec, ...]:
383
+ """Parse a list of model entries (strings or dicts) into ModelSpec tuples."""
384
+ models: list[ModelSpec] = []
385
+ for raw in models_raw:
386
+ if isinstance(raw, str):
387
+ models.append(ModelSpec(name=raw))
388
+ continue
389
+ if not isinstance(raw, dict):
390
+ continue
391
+ name = raw.get("name")
392
+ if not isinstance(name, str):
393
+ continue
394
+ controls: list[ControlSpec] = []
395
+ for ctrl in raw.get("controls", []):
396
+ if not isinstance(ctrl, dict) or "name" not in ctrl:
397
+ continue
398
+ choices = tuple(
399
+ ChoiceSpec(value=str(c["value"]) if isinstance(c, dict) else str(c))
400
+ for c in ctrl.get("choices", [])
401
+ if (isinstance(c, dict) and "value" in c) or isinstance(c, str)
402
+ )
403
+ controls.append(
404
+ ControlSpec(
405
+ name=str(ctrl["name"]),
406
+ kind=str(ctrl.get("kind", "choice")),
407
+ label=str(ctrl.get("label", ctrl["name"])),
408
+ description=ctrl.get("description"),
409
+ choices=choices,
410
+ default=ctrl.get("default"),
411
+ required=bool(ctrl.get("required", False)),
412
+ )
413
+ )
414
+ metadata = raw.get("metadata", {})
415
+ if not isinstance(metadata, dict):
416
+ metadata = {}
417
+ models.append(
418
+ ModelSpec(
419
+ name=name,
420
+ description=raw.get("description"),
421
+ controls=tuple(controls),
422
+ metadata=metadata,
423
+ )
424
+ )
425
+ return tuple(models)
426
+
427
+
428
+ def _load_user_provider_override(
429
+ provider_id: str,
430
+ ) -> tuple[tuple[ModelSpec, ...], str | None] | None:
431
+ """Load user override from config dir. Returns (models, default_model) or None."""
432
+ path = _config_dir() / "providers" / f"{provider_id}.json"
433
+ if not path.is_file():
434
+ return None
435
+ try:
436
+ payload = json.loads(path.read_text(encoding="utf-8"))
437
+ if not isinstance(payload, dict):
438
+ _logger.warning("Ignoring invalid override file (not an object): %s", path)
439
+ return None
440
+ models_raw = payload.get("models")
441
+ if not isinstance(models_raw, list) or not models_raw:
442
+ _logger.warning("Ignoring override file (missing/empty models list): %s", path)
443
+ return None
444
+ models = _parse_model_specs_from_raw(models_raw)
445
+ if not models:
446
+ return None
447
+ default_model = payload.get("default_model")
448
+ if default_model is not None and not isinstance(default_model, str):
449
+ default_model = None
450
+ _logger.debug("Loaded %d model(s) from user override: %s", len(models), path)
451
+ return models, default_model
452
+ except Exception:
453
+ _logger.warning("Failed to read provider override: %s", path, exc_info=True)
454
+ return None
455
+
456
+
457
+ def _load_codex_live_catalog() -> tuple[ModelSpec, ...] | None:
458
+ """Parse ``~/.codex/models_cache.json`` into ModelSpec tuples.
459
+
460
+ Returns *None* on any failure so the caller falls back to the hardcoded
461
+ catalog. The Codex CLI auto-refreshes this file; the format may change
462
+ without notice, so every access is wrapped defensively.
463
+ """
464
+ if not _CODEX_CACHE_PATH.is_file():
465
+ return None
466
+ try:
467
+ payload = json.loads(_CODEX_CACHE_PATH.read_text(encoding="utf-8"))
468
+ if not isinstance(payload, dict):
469
+ return None
470
+ models_raw = payload.get("models")
471
+ if not isinstance(models_raw, list):
472
+ return None
473
+ models: list[ModelSpec] = []
474
+ for raw in models_raw:
475
+ if not isinstance(raw, dict):
476
+ continue
477
+ slug = raw.get("slug") or raw.get("display_name")
478
+ if not isinstance(slug, str):
479
+ continue
480
+ if not raw.get("supported_in_api", True):
481
+ continue
482
+ description = raw.get("description", "")
483
+ default_reasoning = str(raw.get("default_reasoning_level", "medium"))
484
+ levels_raw = raw.get("supported_reasoning_levels", [])
485
+ reasoning_levels = tuple(
486
+ str(r["effort"]) for r in levels_raw if isinstance(r, dict) and "effort" in r
487
+ )
488
+ visibility = raw.get("visibility", "list")
489
+ priority = raw.get("priority", 0)
490
+ controls: tuple[ControlSpec, ...] = ()
491
+ if reasoning_levels:
492
+ controls = (
493
+ ControlSpec(
494
+ name="model_reasoning",
495
+ kind="choice",
496
+ label="Reasoning effort",
497
+ required=False,
498
+ default=default_reasoning,
499
+ choices=_choice_specs(*reasoning_levels),
500
+ ),
501
+ )
502
+ models.append(
503
+ ModelSpec(
504
+ name=slug,
505
+ description=description if isinstance(description, str) else "",
506
+ controls=controls,
507
+ metadata={"catalogPriority": priority, "catalogVisibility": visibility},
508
+ )
509
+ )
510
+ if not models:
511
+ return None
512
+ _logger.debug("Loaded %d model(s) from Codex CLI cache: %s", len(models), _CODEX_CACHE_PATH)
513
+ return tuple(models)
514
+ except Exception:
515
+ _logger.debug("Failed to read Codex models cache", exc_info=True)
516
+ return None
517
+
518
+
519
+ def _resolve_models(
520
+ provider_id: str,
521
+ *,
522
+ live_loader: Callable[[], tuple[ModelSpec, ...] | None] | None = None,
523
+ hardcoded: tuple[ModelSpec, ...],
524
+ ) -> tuple[tuple[ModelSpec, ...], str | None]:
525
+ """Return (models, override_default_model) using: user file > live > hardcoded."""
526
+ override = _load_user_provider_override(provider_id)
527
+ if override is not None:
528
+ return override
529
+ if live_loader is not None:
530
+ live = live_loader()
531
+ if live is not None:
532
+ return live, None
533
+ return hardcoded, None
534
+
535
+
536
+ @lru_cache(maxsize=1)
537
+ def list_provider_specs() -> tuple[ProviderSpec, ...]:
538
+ copilot_models = _load_copilot_model_specs()
539
+
540
+ claude_models, claude_default = _resolve_models("claude", hardcoded=CLAUDE_MODEL_SPECS)
541
+ gemini_models, gemini_default = _resolve_models("gemini", hardcoded=GEMINI_MODEL_SPECS)
542
+ codex_models, codex_default = _resolve_models(
543
+ "codex", live_loader=_load_codex_live_catalog, hardcoded=CODEX_MODEL_SPECS
544
+ )
545
+ copilot_override = _load_user_provider_override("copilot")
546
+ if copilot_override is not None:
547
+ copilot_models, copilot_default = copilot_override
548
+ else:
549
+ copilot_default = None
550
+
551
+ return (
552
+ ProviderSpec(
553
+ provider_id="claude",
554
+ label="Claude",
555
+ model_source="override" if claude_default is not None else "code",
556
+ default_model=claude_default or "claude-sonnet-4-6",
557
+ default_concurrency=3,
558
+ models=claude_models,
559
+ controls=(
560
+ ControlSpec(
561
+ name="reasoning_mode",
562
+ kind="choice",
563
+ label="Reasoning mode",
564
+ default="script_default",
565
+ choices=CLAUDE_REASONING_MODE_CHOICES,
566
+ ),
567
+ ControlSpec(
568
+ name="output_format",
569
+ kind="choice",
570
+ label="Output format",
571
+ default="text",
572
+ choices=CLAUDE_OUTPUT_FORMAT_CHOICES,
573
+ ),
574
+ ControlSpec(
575
+ name="permission_mode",
576
+ kind="choice",
577
+ label="Permission mode",
578
+ default="bypassPermissions",
579
+ choices=CLAUDE_PERMISSION_MODE_CHOICES,
580
+ ),
581
+ ControlSpec(
582
+ name="skip_permissions",
583
+ kind="bool",
584
+ label="Skip permissions",
585
+ default=True,
586
+ ),
587
+ ),
588
+ notes=(
589
+ "Adaptive thinking models support --effort.",
590
+ "Script defaults still apply --thinking-tokens 8192"
591
+ " when no reasoning flag is given.",
592
+ ),
593
+ ),
594
+ ProviderSpec(
595
+ provider_id="gemini",
596
+ label="Gemini",
597
+ model_source="override" if gemini_default is not None else "code",
598
+ default_model=gemini_default or "gemini-3-pro-preview",
599
+ default_concurrency=3,
600
+ models=gemini_models,
601
+ controls=(
602
+ ControlSpec(
603
+ name="auto_approve",
604
+ kind="bool",
605
+ label="Auto approve",
606
+ default=True,
607
+ ),
608
+ ControlSpec(
609
+ name="gif_fallback_retry",
610
+ kind="bool",
611
+ label="GIF fallback retry",
612
+ default=True,
613
+ advanced=True,
614
+ ),
615
+ ),
616
+ notes=("This wrapper does not expose a reasoning control.",),
617
+ ),
618
+ ProviderSpec(
619
+ provider_id="codex",
620
+ label="Codex",
621
+ model_source=(
622
+ "override"
623
+ if codex_default is not None
624
+ else "codex_cli_cache"
625
+ if codex_models is not CODEX_MODEL_SPECS
626
+ else "code"
627
+ ),
628
+ default_model=codex_default or "gpt-5.3-codex",
629
+ default_concurrency=6,
630
+ models=codex_models,
631
+ controls=(),
632
+ notes=(
633
+ "Reasoning effort is model-specific and defaults"
634
+ " to the selected model's catalog default.",
635
+ ),
636
+ ),
637
+ ProviderSpec(
638
+ provider_id="copilot",
639
+ label="Copilot",
640
+ model_source="override" if copilot_default is not None else "baseline_catalog",
641
+ default_model=copilot_default or "claude-sonnet-4.5",
642
+ default_concurrency=2,
643
+ models=copilot_models,
644
+ controls=(
645
+ ControlSpec(name="allow_all", kind="bool", label="Allow all", default=True),
646
+ ControlSpec(name="ask_user", kind="bool", label="Ask user", default=False),
647
+ ControlSpec(
648
+ name="use_custom_instructions",
649
+ kind="bool",
650
+ label="Use custom instructions",
651
+ default=False,
652
+ ),
653
+ ControlSpec(
654
+ name="stream",
655
+ kind="choice",
656
+ label="Stream output",
657
+ default="on",
658
+ choices=(ChoiceSpec(value="on"), ChoiceSpec(value="off")),
659
+ ),
660
+ ControlSpec(
661
+ name="force_implementation",
662
+ kind="bool",
663
+ label="Force implementation",
664
+ default=True,
665
+ ),
666
+ ),
667
+ notes=(
668
+ "Copilot model options come from the checked-in reasoning baseline.",
669
+ "Reasoning metadata is informational only until the wrapper"
670
+ " exposes user-facing overrides.",
671
+ ),
672
+ allow_custom_model_override=True,
673
+ ),
674
+ )
675
+
676
+
677
+ @lru_cache(maxsize=1)
678
+ def provider_specs_by_id() -> dict[str, ProviderSpec]:
679
+ return {spec.provider_id: spec for spec in list_provider_specs()}
680
+
681
+
682
+ def get_provider_spec(provider_id: str) -> ProviderSpec:
683
+ try:
684
+ return provider_specs_by_id()[provider_id]
685
+ except KeyError as exc:
686
+ raise KeyError(f"Unknown provider spec: {provider_id}") from exc
687
+
688
+
689
+ def serialize_provider_specs() -> list[dict[str, Any]]:
690
+ return [spec.to_dict() for spec in list_provider_specs()]
691
+
692
+
693
+ def get_codex_supported_models() -> dict[str, dict[str, Any]]:
694
+ provider = get_provider_spec("codex")
695
+ supported: dict[str, dict[str, Any]] = {}
696
+ for model in provider.models:
697
+ reasoning = model.control("model_reasoning")
698
+ entry: dict[str, Any] = {
699
+ "description": model.description or "",
700
+ "reasoning": reasoning.choice_values() if reasoning else (),
701
+ }
702
+ if reasoning and reasoning.default is not None:
703
+ entry["default_reasoning"] = reasoning.default
704
+ supported[model.name] = entry
705
+ return supported
706
+
707
+
708
+ def get_claude_model_candidates() -> tuple[str, ...]:
709
+ return get_provider_spec("claude").model_names
710
+
711
+
712
+ def get_claude_default_model() -> str:
713
+ return get_provider_spec("claude").default_model or "claude-sonnet-4-6"
714
+
715
+
716
+ def get_claude_effort_levels() -> tuple[str, ...]:
717
+ model = get_provider_spec("claude").model("claude-sonnet-4-6")
718
+ if model is None:
719
+ return CLAUDE_EFFORT_CHOICES
720
+ effort = model.control("effort")
721
+ return effort.choice_values() if effort else CLAUDE_EFFORT_CHOICES
722
+
723
+
724
+ def get_claude_permission_modes() -> tuple[str, ...]:
725
+ control = get_provider_spec("claude").control("permission_mode")
726
+ return control.choice_values() if control else ()
727
+
728
+
729
+ def get_claude_output_suffixes() -> dict[str, str]:
730
+ control = get_provider_spec("claude").control("output_format")
731
+ if control is None:
732
+ return {}
733
+ return {choice.value: str(choice.metadata.get("suffix", "")) for choice in control.choices}
734
+
735
+
736
+ def get_gemini_model_options() -> tuple[str, ...]:
737
+ return get_provider_spec("gemini").model_names
738
+
739
+
740
+ def get_gemini_default_model() -> str:
741
+ return get_provider_spec("gemini").default_model or "gemini-3-pro-preview"
742
+
743
+
744
+ def get_copilot_default_model() -> str:
745
+ return get_provider_spec("copilot").default_model or "claude-sonnet-4.5"
746
+
747
+
748
+ def get_copilot_model_catalog() -> tuple[str, ...]:
749
+ return get_provider_spec("copilot").model_names