@dexto/tui 1.6.11 → 1.6.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/InkCLIRefactored.cjs +39 -29
  2. package/dist/InkCLIRefactored.d.ts.map +1 -1
  3. package/dist/InkCLIRefactored.js +39 -29
  4. package/dist/components/Footer.cjs +9 -2
  5. package/dist/components/Footer.d.ts +3 -2
  6. package/dist/components/Footer.d.ts.map +1 -1
  7. package/dist/components/Footer.js +17 -3
  8. package/dist/components/modes/AlternateBufferCLI.cjs +2 -1
  9. package/dist/components/modes/AlternateBufferCLI.d.ts.map +1 -1
  10. package/dist/components/modes/AlternateBufferCLI.js +2 -1
  11. package/dist/components/modes/StaticCLI.cjs +2 -1
  12. package/dist/components/modes/StaticCLI.d.ts.map +1 -1
  13. package/dist/components/modes/StaticCLI.js +2 -1
  14. package/dist/components/overlays/ChatGPTUsageCapOverlay.cjs +90 -0
  15. package/dist/components/overlays/ChatGPTUsageCapOverlay.d.ts +19 -0
  16. package/dist/components/overlays/ChatGPTUsageCapOverlay.d.ts.map +1 -0
  17. package/dist/components/overlays/ChatGPTUsageCapOverlay.js +70 -0
  18. package/dist/components/overlays/ModelSelectorRefactored.cjs +263 -38
  19. package/dist/components/overlays/ModelSelectorRefactored.d.ts.map +1 -1
  20. package/dist/components/overlays/ModelSelectorRefactored.js +267 -38
  21. package/dist/components/overlays/ReasoningOverlay.cjs +1 -1
  22. package/dist/components/overlays/ReasoningOverlay.js +1 -1
  23. package/dist/containers/OverlayContainer.cjs +104 -13
  24. package/dist/containers/OverlayContainer.d.ts.map +1 -1
  25. package/dist/containers/OverlayContainer.js +104 -13
  26. package/dist/hooks/useAgentEvents.cjs +33 -2
  27. package/dist/hooks/useAgentEvents.d.ts.map +1 -1
  28. package/dist/hooks/useAgentEvents.js +35 -3
  29. package/dist/hooks/useCLIState.cjs +1 -0
  30. package/dist/hooks/useCLIState.d.ts.map +1 -1
  31. package/dist/hooks/useCLIState.js +1 -0
  32. package/dist/interactive-commands/exit-stats.cjs +16 -0
  33. package/dist/interactive-commands/exit-stats.d.ts +4 -0
  34. package/dist/interactive-commands/exit-stats.d.ts.map +1 -1
  35. package/dist/interactive-commands/exit-stats.js +15 -0
  36. package/dist/interactive-commands/general-commands.cjs +13 -2
  37. package/dist/interactive-commands/general-commands.d.ts.map +1 -1
  38. package/dist/interactive-commands/general-commands.js +14 -3
  39. package/dist/interactive-commands/general-commands.test.cjs +152 -0
  40. package/dist/interactive-commands/general-commands.test.d.ts +2 -0
  41. package/dist/interactive-commands/general-commands.test.d.ts.map +1 -0
  42. package/dist/interactive-commands/general-commands.test.js +151 -0
  43. package/dist/services/processStream.test.cjs +1 -0
  44. package/dist/services/processStream.test.js +1 -0
  45. package/dist/state/initialState.cjs +1 -0
  46. package/dist/state/initialState.d.ts.map +1 -1
  47. package/dist/state/initialState.js +1 -0
  48. package/dist/state/types.d.ts +4 -2
  49. package/dist/state/types.d.ts.map +1 -1
  50. package/dist/utils/chatgpt-rate-limit.cjs +72 -0
  51. package/dist/utils/chatgpt-rate-limit.d.ts +11 -0
  52. package/dist/utils/chatgpt-rate-limit.d.ts.map +1 -0
  53. package/dist/utils/chatgpt-rate-limit.js +49 -0
  54. package/dist/utils/chatgpt-rate-limit.test.cjs +46 -0
  55. package/dist/utils/chatgpt-rate-limit.test.d.ts +2 -0
  56. package/dist/utils/chatgpt-rate-limit.test.d.ts.map +1 -0
  57. package/dist/utils/chatgpt-rate-limit.test.js +49 -0
  58. package/dist/utils/llm-provider-display.cjs +11 -1
  59. package/dist/utils/llm-provider-display.d.ts +2 -2
  60. package/dist/utils/llm-provider-display.d.ts.map +1 -1
  61. package/dist/utils/llm-provider-display.js +11 -1
  62. package/dist/utils/llm-provider-display.test.cjs +15 -0
  63. package/dist/utils/llm-provider-display.test.d.ts +2 -0
  64. package/dist/utils/llm-provider-display.test.d.ts.map +1 -0
  65. package/dist/utils/llm-provider-display.test.js +14 -0
  66. package/package.json +4 -4
@@ -11,12 +11,16 @@ import {
11
11
  import { Box, Text } from "ink";
12
12
  import { useTerminalSize } from "../../hooks/useTerminalSize.js";
13
13
  import {
14
+ CodexAppServerClient,
15
+ createCodexBaseURL,
14
16
  listOllamaModels,
15
17
  DEFAULT_OLLAMA_URL,
16
18
  getLocalModelById,
17
19
  getCuratedModelRefsForProviders,
20
+ getModelDisplayName,
18
21
  getOpenRouterModelCacheInfo,
19
22
  getReasoningProfile,
23
+ parseCodexBaseURL,
20
24
  refreshOpenRouterModelCache
21
25
  } from "@dexto/core";
22
26
  import {
@@ -48,9 +52,102 @@ const MODEL_SELECTOR_TABS = [
48
52
  ];
49
53
  const PROVIDER_COLLATOR = new Intl.Collator("en", { sensitivity: "base" });
50
54
  const PROVIDER_TOKEN_PATTERN = /[^a-z0-9]/g;
55
+ const CODEX_CHATGPT_BASE_URL = createCodexBaseURL("chatgpt");
56
+ const CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS = 5e3;
51
57
  function normalizeProviderToken(value) {
52
58
  return value.toLowerCase().replace(PROVIDER_TOKEN_PATTERN, "");
53
59
  }
60
+ function isChatGPTCodexBaseURL(baseURL) {
61
+ return parseCodexBaseURL(baseURL)?.authMode === "chatgpt";
62
+ }
63
+ function isChatGPTCodexConfig(provider, baseURL) {
64
+ return provider === "openai-compatible" && isChatGPTCodexBaseURL(baseURL);
65
+ }
66
+ function canonicalizeModelBaseURL(baseURL) {
67
+ const parsed = parseCodexBaseURL(baseURL);
68
+ return parsed ? createCodexBaseURL(parsed.authMode) : baseURL;
69
+ }
70
+ function toCanonicalModelPickerKey(input) {
71
+ const baseURL = canonicalizeModelBaseURL(input.baseURL);
72
+ return toModelPickerKey({
73
+ provider: input.provider,
74
+ model: input.model,
75
+ ...baseURL ? { baseURL } : {}
76
+ });
77
+ }
78
+ function createModelIdentity(input) {
79
+ return {
80
+ provider: input.provider,
81
+ name: input.name,
82
+ ...input.baseURL ? { baseURL: input.baseURL } : {}
83
+ };
84
+ }
85
+ async function raceWithTimeout(promise, options) {
86
+ return await new Promise((resolve, reject) => {
87
+ const timer = setTimeout(() => {
88
+ options.onTimeout?.();
89
+ reject(new Error(options.errorMessage));
90
+ }, options.timeoutMs);
91
+ promise.then(
92
+ (value) => {
93
+ clearTimeout(timer);
94
+ resolve(value);
95
+ },
96
+ (error) => {
97
+ clearTimeout(timer);
98
+ reject(error);
99
+ }
100
+ );
101
+ });
102
+ }
103
+ function matchesConfiguredModel(candidate, configured) {
104
+ if (candidate.provider !== configured.provider || candidate.name !== configured.model) {
105
+ return false;
106
+ }
107
+ const candidateBaseURL = candidate.baseURL ?? "";
108
+ const configuredBaseURL = configured.baseURL ?? "";
109
+ if (candidateBaseURL === configuredBaseURL) {
110
+ return true;
111
+ }
112
+ const candidateCodex = parseCodexBaseURL(candidate.baseURL);
113
+ const configuredCodex = parseCodexBaseURL(configured.baseURL);
114
+ return candidateCodex?.authMode === configuredCodex?.authMode;
115
+ }
116
+ async function loadChatGPTCodexModels(agent) {
117
+ let client = null;
118
+ try {
119
+ const createPromise = CodexAppServerClient.create({
120
+ requestTimeoutMs: CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS
121
+ });
122
+ client = await raceWithTimeout(createPromise, {
123
+ timeoutMs: CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS,
124
+ errorMessage: `Timed out starting ChatGPT Codex model listing after ${CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS}ms`,
125
+ onTimeout: () => {
126
+ void createPromise.then((lateClient) => lateClient.close().catch(() => void 0)).catch(() => void 0);
127
+ }
128
+ });
129
+ const account = await raceWithTimeout(client.readAccount(false), {
130
+ timeoutMs: CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS,
131
+ errorMessage: `Timed out reading ChatGPT Codex account state after ${CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS}ms`
132
+ });
133
+ if (account.account?.type !== "chatgpt") {
134
+ return [];
135
+ }
136
+ return await raceWithTimeout(client.listModels(), {
137
+ timeoutMs: CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS,
138
+ errorMessage: `Timed out listing ChatGPT Codex models after ${CHATGPT_CODEX_MODEL_LIST_TIMEOUT_MS}ms`
139
+ });
140
+ } catch (error) {
141
+ agent.logger.debug(
142
+ `ChatGPT Codex model list unavailable: ${error instanceof Error ? error.message : String(error)}`
143
+ );
144
+ return [];
145
+ } finally {
146
+ if (client) {
147
+ await client.close().catch(() => void 0);
148
+ }
149
+ }
150
+ }
54
151
  function toReleaseDateLookupKey(provider, modelName) {
55
152
  return `${provider}::${modelName.toLowerCase()}`;
56
153
  }
@@ -167,7 +264,11 @@ function compareModelOptionsForDisplay(left, right) {
167
264
  return 0;
168
265
  }
169
266
  function toModelIdentityKey(model) {
170
- return toModelPickerKey({ provider: model.provider, model: model.name });
267
+ return toCanonicalModelPickerKey({
268
+ provider: model.provider,
269
+ model: model.name,
270
+ ...model.baseURL ? { baseURL: model.baseURL } : {}
271
+ });
171
272
  }
172
273
  function normalizeLineText(value) {
173
274
  return stripUnsafeCharacters(value).replace(/\r?\n/g, " ").replace(/\s+/g, " ").trim();
@@ -324,12 +425,20 @@ const ModelSelector = forwardRef(function ModelSelector2({
324
425
  `OpenRouter catalog refresh skipped: ${error instanceof Error ? error.message : String(error)}`
325
426
  );
326
427
  }
327
- const [allModels, providers, currentConfig, loadedCustomModels, preferences] = await Promise.all([
428
+ const [
429
+ allModels,
430
+ providers,
431
+ currentConfig,
432
+ loadedCustomModels,
433
+ preferences,
434
+ codexModels
435
+ ] = await Promise.all([
328
436
  Promise.resolve(agent.getSupportedModels()),
329
437
  Promise.resolve(agent.getSupportedProviders()),
330
438
  Promise.resolve(agent.getCurrentLLMConfig()),
331
439
  loadCustomModels(),
332
- loadGlobalPreferences().catch(() => null)
440
+ loadGlobalPreferences().catch(() => null),
441
+ loadChatGPTCodexModels(agent)
333
442
  ]);
334
443
  const pickerState = await loadModelPickerState().catch(() => null);
335
444
  const modelList = [];
@@ -338,6 +447,51 @@ const ModelSelector = forwardRef(function ModelSelector2({
338
447
  const defaultBaseURL = preferences?.llm.baseURL;
339
448
  const defaultReasoningVariant = preferences?.llm.reasoning?.variant;
340
449
  const resolveReleaseDate = createReleaseDateResolver({ allModels, providers });
450
+ const defaultConfig = defaultProvider && defaultModel ? {
451
+ provider: defaultProvider,
452
+ model: defaultModel,
453
+ ...defaultBaseURL ? { baseURL: defaultBaseURL } : {}
454
+ } : null;
455
+ const currentModelConfig = {
456
+ provider: currentConfig.provider,
457
+ model: currentConfig.model,
458
+ ...currentConfig.baseURL ? { baseURL: currentConfig.baseURL } : {}
459
+ };
460
+ const getMatchState = (candidate) => ({
461
+ isDefault: defaultConfig ? matchesConfiguredModel(candidate, defaultConfig) : false,
462
+ isCurrent: matchesConfiguredModel(candidate, currentModelConfig)
463
+ });
464
+ const addChatGPTCodexModel = (input) => {
465
+ const existing = modelList.find(
466
+ (candidate) => candidate.provider === "openai-compatible" && candidate.name === input.model && candidate.baseURL === CODEX_CHATGPT_BASE_URL
467
+ );
468
+ if (existing) {
469
+ existing.isDefault = existing.isDefault || input.isDefault;
470
+ existing.isCurrent = existing.isCurrent || input.isCurrent;
471
+ if (existing.displayName === void 0 && input.displayName !== void 0) {
472
+ existing.displayName = input.displayName;
473
+ }
474
+ if (existing.releaseDate === void 0 && input.releaseDate !== void 0) {
475
+ existing.releaseDate = input.releaseDate;
476
+ }
477
+ if (existing.reasoningVariant === void 0 && input.reasoningVariant !== void 0) {
478
+ existing.reasoningVariant = input.reasoningVariant;
479
+ }
480
+ return;
481
+ }
482
+ modelList.push({
483
+ provider: "openai-compatible",
484
+ name: input.model,
485
+ displayName: input.displayName,
486
+ maxInputTokens: 128e3,
487
+ isDefault: input.isDefault,
488
+ isCurrent: input.isCurrent,
489
+ isCustom: false,
490
+ baseURL: CODEX_CHATGPT_BASE_URL,
491
+ ...input.releaseDate !== void 0 ? { releaseDate: input.releaseDate } : {},
492
+ ...input.reasoningVariant !== void 0 ? { reasoningVariant: input.reasoningVariant } : {}
493
+ });
494
+ };
341
495
  let ollamaModels = [];
342
496
  let localModels = [];
343
497
  try {
@@ -352,13 +506,19 @@ const ModelSelector = forwardRef(function ModelSelector2({
352
506
  }
353
507
  for (const custom of loadedCustomModels) {
354
508
  const customProvider = custom.provider;
509
+ const candidate = createModelIdentity({
510
+ provider: customProvider,
511
+ name: custom.name,
512
+ baseURL: custom.baseURL
513
+ });
514
+ const { isDefault, isCurrent } = getMatchState(candidate);
355
515
  const modelOption = {
356
516
  provider: customProvider,
357
517
  name: custom.name,
358
518
  displayName: custom.displayName || custom.name,
359
519
  maxInputTokens: custom.maxInputTokens ?? 128e3,
360
- isDefault: customProvider === defaultProvider && custom.name === defaultModel,
361
- isCurrent: currentConfig.provider === customProvider && currentConfig.model === custom.name,
520
+ isDefault,
521
+ isCurrent,
362
522
  isCustom: true
363
523
  };
364
524
  if (custom.baseURL) {
@@ -389,32 +549,95 @@ const ModelSelector = forwardRef(function ModelSelector2({
389
549
  model.releaseDate
390
550
  );
391
551
  const originalProvider = "originalProvider" in model ? model.originalProvider : void 0;
552
+ const candidate = createModelIdentity({
553
+ provider,
554
+ name: model.name
555
+ });
556
+ const { isDefault, isCurrent } = getMatchState(candidate);
392
557
  modelList.push({
393
558
  provider,
394
559
  name: model.name,
395
560
  displayName: model.displayName,
396
561
  maxInputTokens: model.maxInputTokens,
397
- isDefault: provider === defaultProvider && model.name === defaultModel,
398
- isCurrent: provider === currentConfig.provider && model.name === currentConfig.model,
562
+ isDefault,
563
+ isCurrent,
399
564
  isCustom: false,
400
565
  ...releaseDate !== void 0 ? { releaseDate } : {},
401
566
  ...model.status !== void 0 ? { status: model.status } : {},
402
- ...defaultReasoningVariant && provider === defaultProvider && model.name === defaultModel ? { reasoningVariant: defaultReasoningVariant } : {},
403
- ...defaultBaseURL && provider === defaultProvider && model.name === defaultModel ? { baseURL: defaultBaseURL } : {},
567
+ ...defaultReasoningVariant && isDefault ? { reasoningVariant: defaultReasoningVariant } : {},
404
568
  // Store original provider for display purposes
405
569
  ...originalProvider && { originalProvider }
406
570
  });
407
571
  }
408
572
  }
573
+ for (const codexModel of codexModels) {
574
+ const candidate = {
575
+ provider: "openai-compatible",
576
+ name: codexModel.model,
577
+ baseURL: CODEX_CHATGPT_BASE_URL
578
+ };
579
+ const isDefault = defaultConfig ? matchesConfiguredModel(candidate, defaultConfig) : false;
580
+ const releaseDate = resolveReleaseDate("openai", codexModel.model) ?? resolveReleaseDate("openrouter", codexModel.model);
581
+ addChatGPTCodexModel({
582
+ model: codexModel.model,
583
+ displayName: codexModel.displayName,
584
+ isDefault,
585
+ isCurrent: matchesConfiguredModel(candidate, currentModelConfig),
586
+ ...releaseDate !== void 0 ? { releaseDate } : {},
587
+ ...isDefault && defaultReasoningVariant ? { reasoningVariant: defaultReasoningVariant } : {}
588
+ });
589
+ }
590
+ const addMissingConfiguredCodexModel = (configured) => {
591
+ if (!configured || !isChatGPTCodexConfig(configured.provider, configured.baseURL)) {
592
+ return;
593
+ }
594
+ const releaseDate = resolveReleaseDate("openai", configured.model) ?? resolveReleaseDate("openrouter", configured.model);
595
+ addChatGPTCodexModel({
596
+ model: configured.model,
597
+ displayName: getModelDisplayName(configured.model, "openai"),
598
+ isDefault: defaultConfig ? matchesConfiguredModel(
599
+ {
600
+ provider: "openai-compatible",
601
+ name: configured.model,
602
+ baseURL: CODEX_CHATGPT_BASE_URL
603
+ },
604
+ defaultConfig
605
+ ) : false,
606
+ isCurrent: matchesConfiguredModel(
607
+ {
608
+ provider: "openai-compatible",
609
+ name: configured.model,
610
+ baseURL: CODEX_CHATGPT_BASE_URL
611
+ },
612
+ currentModelConfig
613
+ ),
614
+ ...releaseDate !== void 0 ? { releaseDate } : {},
615
+ ...defaultConfig && defaultReasoningVariant && matchesConfiguredModel(
616
+ {
617
+ provider: "openai-compatible",
618
+ name: configured.model,
619
+ baseURL: CODEX_CHATGPT_BASE_URL
620
+ },
621
+ defaultConfig
622
+ ) ? { reasoningVariant: defaultReasoningVariant } : {}
623
+ });
624
+ };
625
+ addMissingConfiguredCodexModel(defaultConfig);
626
+ addMissingConfiguredCodexModel(currentModelConfig);
409
627
  for (const ollamaModel of ollamaModels) {
628
+ const candidate = createModelIdentity({
629
+ provider: "ollama",
630
+ name: ollamaModel.name
631
+ });
632
+ const { isDefault, isCurrent } = getMatchState(candidate);
410
633
  modelList.push({
411
634
  provider: "ollama",
412
635
  name: ollamaModel.name,
413
636
  displayName: ollamaModel.name,
414
637
  maxInputTokens: 128e3,
415
638
  // Default, actual varies by model
416
- isDefault: defaultProvider === "ollama" && defaultModel === ollamaModel.name,
417
- isCurrent: currentConfig.provider === "ollama" && currentConfig.model === ollamaModel.name,
639
+ isDefault,
640
+ isCurrent,
418
641
  isCustom: false
419
642
  });
420
643
  }
@@ -422,13 +645,18 @@ const ModelSelector = forwardRef(function ModelSelector2({
422
645
  const modelInfo = getLocalModelById(localModel.id);
423
646
  const displayName = modelInfo?.name || localModel.id;
424
647
  const maxInputTokens = modelInfo?.contextLength || 128e3;
648
+ const candidate = createModelIdentity({
649
+ provider: "local",
650
+ name: localModel.id
651
+ });
652
+ const { isDefault, isCurrent } = getMatchState(candidate);
425
653
  modelList.push({
426
654
  provider: "local",
427
655
  name: localModel.id,
428
656
  displayName,
429
657
  maxInputTokens,
430
- isDefault: defaultProvider === "local" && defaultModel === localModel.id,
431
- isCurrent: currentConfig.provider === "local" && currentConfig.model === localModel.id,
658
+ isDefault,
659
+ isCurrent,
432
660
  isCustom: false
433
661
  });
434
662
  }
@@ -443,17 +671,22 @@ const ModelSelector = forwardRef(function ModelSelector2({
443
671
  model.name,
444
672
  model.releaseDate
445
673
  );
674
+ const candidate = createModelIdentity({
675
+ provider: "vertex",
676
+ name: model.name
677
+ });
678
+ const { isDefault, isCurrent } = getMatchState(candidate);
446
679
  modelList.push({
447
680
  provider: "vertex",
448
681
  name: model.name,
449
682
  displayName: model.displayName,
450
683
  maxInputTokens: model.maxInputTokens,
451
- isDefault: defaultProvider === "vertex" && defaultModel === model.name,
452
- isCurrent: currentConfig.provider === "vertex" && currentConfig.model === model.name,
684
+ isDefault,
685
+ isCurrent,
453
686
  isCustom: false,
454
687
  ...releaseDate !== void 0 ? { releaseDate } : {},
455
688
  ...model.status !== void 0 ? { status: model.status } : {},
456
- ...defaultReasoningVariant && defaultProvider === "vertex" && defaultModel === model.name ? { reasoningVariant: defaultReasoningVariant } : {}
689
+ ...defaultReasoningVariant && isDefault ? { reasoningVariant: defaultReasoningVariant } : {}
457
690
  });
458
691
  }
459
692
  }
@@ -541,12 +774,7 @@ const ModelSelector = forwardRef(function ModelSelector2({
541
774
  }, [isVisible, agent, refreshVersion]);
542
775
  const favoriteKeySet = useMemo(
543
776
  () => new Set(
544
- (modelPickerState?.favorites ?? []).map(
545
- (entry) => toModelPickerKey({
546
- provider: entry.provider,
547
- model: entry.model
548
- })
549
- )
777
+ (modelPickerState?.favorites ?? []).map((entry) => toCanonicalModelPickerKey(entry))
550
778
  ),
551
779
  [modelPickerState]
552
780
  );
@@ -559,7 +787,8 @@ const ModelSelector = forwardRef(function ModelSelector2({
559
787
  const name = model.name.toLowerCase().replace(/[\s-]+/g, "");
560
788
  const displayName = (model.displayName || "").toLowerCase().replace(/[\s-]+/g, "");
561
789
  const provider = model.provider.toLowerCase().replace(/[\s-]+/g, "");
562
- return name.includes(query) || displayName.includes(query) || provider.includes(query);
790
+ const providerDisplay = getLLMProviderDisplayName(model.provider, model.baseURL).toLowerCase().replace(/[\s-]+/g, "");
791
+ return name.includes(query) || displayName.includes(query) || provider.includes(query) || providerDisplay.includes(query);
563
792
  },
564
793
  [searchQuery]
565
794
  );
@@ -568,10 +797,7 @@ const ModelSelector = forwardRef(function ModelSelector2({
568
797
  const hasSearchQuery = searchQuery.trim().length > 0;
569
798
  const allCandidates = [...models].sort(compareModelOptionsForDisplay);
570
799
  const modelsByKey = new Map(
571
- allCandidates.map((model) => [
572
- toModelPickerKey({ provider: model.provider, model: model.name }),
573
- model
574
- ])
800
+ allCandidates.map((model) => [toModelIdentityKey(model), model])
575
801
  );
576
802
  const toUniqueMatchingModels = (candidates, limit) => {
577
803
  const deduped = [];
@@ -580,10 +806,7 @@ const ModelSelector = forwardRef(function ModelSelector2({
580
806
  if (!candidate || !matchesSearch(candidate)) {
581
807
  continue;
582
808
  }
583
- const key = toModelPickerKey({
584
- provider: candidate.provider,
585
- model: candidate.name
586
- });
809
+ const key = toModelIdentityKey(candidate);
587
810
  if (seen.has(key)) {
588
811
  continue;
589
812
  }
@@ -601,12 +824,12 @@ const ModelSelector = forwardRef(function ModelSelector2({
601
824
  const featuredCandidates = getCuratedModelRefsForProviders({
602
825
  providers: providersInModels,
603
826
  max: FEATURED_SECTION_LIMIT
604
- }).map((ref2) => modelsByKey.get(toModelPickerKey(ref2)));
827
+ }).map((ref2) => modelsByKey.get(toCanonicalModelPickerKey(ref2)));
605
828
  const recentsFromState = (modelPickerState?.recents ?? []).map(
606
- (entry) => modelsByKey.get(toModelPickerKey({ provider: entry.provider, model: entry.model }))
829
+ (entry) => modelsByKey.get(toCanonicalModelPickerKey(entry))
607
830
  );
608
831
  const favoritesFromState = (modelPickerState?.favorites ?? []).map(
609
- (entry) => modelsByKey.get(toModelPickerKey({ provider: entry.provider, model: entry.model }))
832
+ (entry) => modelsByKey.get(toCanonicalModelPickerKey(entry))
610
833
  );
611
834
  const customCandidates = allCandidates.filter((model) => model.isCustom);
612
835
  const tabModels = hasSearchQuery ? toUniqueMatchingModels(allCandidates) : activeTab === "all-models" ? toUniqueMatchingModels(allCandidates) : activeTab === "featured" ? toUniqueMatchingModels(featuredCandidates) : activeTab === "recents" ? toUniqueMatchingModels(recentsFromState) : activeTab === "favorites" ? toUniqueMatchingModels(favoritesFromState) : toUniqueMatchingModels(customCandidates);
@@ -659,9 +882,11 @@ const ModelSelector = forwardRef(function ModelSelector2({
659
882
  const handleToggleFavoriteModel = useCallback(
660
883
  async (model) => {
661
884
  try {
885
+ const baseURL = canonicalizeModelBaseURL(model.baseURL);
662
886
  await toggleFavoriteModel({
663
887
  provider: model.provider,
664
- model: model.name
888
+ model: model.name,
889
+ ...baseURL ? { baseURL } : {}
665
890
  });
666
891
  const nextState = await loadModelPickerState();
667
892
  setModelPickerState(nextState);
@@ -1149,12 +1374,16 @@ const ModelSelector = forwardRef(function ModelSelector2({
1149
1374
  }
1150
1375
  const actualIndex = modelStartIndex + scrollOffset + rowIndex;
1151
1376
  const isSelected = actualIndex === selectedIndex;
1152
- const providerDisplay = getLLMProviderDisplayName(item.provider);
1377
+ const providerDisplay = getLLMProviderDisplayName(
1378
+ item.provider,
1379
+ item.baseURL
1380
+ );
1153
1381
  const name = item.displayName || item.name;
1154
1382
  const isFavorite = favoriteKeySet.has(
1155
- toModelPickerKey({
1383
+ toCanonicalModelPickerKey({
1156
1384
  provider: item.provider,
1157
- model: item.name
1385
+ model: item.name,
1386
+ ...item.baseURL ? { baseURL: item.baseURL } : {}
1158
1387
  })
1159
1388
  );
1160
1389
  const prefix = getRowPrefix({
@@ -215,7 +215,7 @@ const ReasoningOverlay = import_react.default.forwardRef(
215
215
  [budgetInput, handleMenuSelect, isVisible, menuItems, mode, onClose, selectedIndex]
216
216
  );
217
217
  if (!isVisible) return null;
218
- const providerLabel = (0, import_llm_provider_display.getLLMProviderDisplayName)(provider);
218
+ const providerLabel = (0, import_llm_provider_display.getLLMProviderDisplayName)(provider, llmConfig.baseURL);
219
219
  const modelLabel = (0, import_core.getModelDisplayName)(model);
220
220
  return /* @__PURE__ */ (0, import_jsx_runtime.jsxs)(
221
221
  import_ink.Box,
@@ -189,7 +189,7 @@ const ReasoningOverlay = React.forwardRef(
189
189
  [budgetInput, handleMenuSelect, isVisible, menuItems, mode, onClose, selectedIndex]
190
190
  );
191
191
  if (!isVisible) return null;
192
- const providerLabel = getLLMProviderDisplayName(provider);
192
+ const providerLabel = getLLMProviderDisplayName(provider, llmConfig.baseURL);
193
193
  const modelLabel = getModelDisplayName(model);
194
194
  return /* @__PURE__ */ jsxs(
195
195
  Box,