@shareai-lab/kode 1.0.69 → 1.0.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (253) hide show
  1. package/README.md +205 -72
  2. package/README.zh-CN.md +246 -0
  3. package/cli.js +62 -0
  4. package/package.json +45 -25
  5. package/scripts/postinstall.js +56 -0
  6. package/src/ProjectOnboarding.tsx +180 -0
  7. package/src/Tool.ts +53 -0
  8. package/src/commands/approvedTools.ts +53 -0
  9. package/src/commands/bug.tsx +20 -0
  10. package/src/commands/clear.ts +43 -0
  11. package/src/commands/compact.ts +120 -0
  12. package/src/commands/config.tsx +19 -0
  13. package/src/commands/cost.ts +18 -0
  14. package/src/commands/ctx_viz.ts +209 -0
  15. package/src/commands/doctor.ts +24 -0
  16. package/src/commands/help.tsx +19 -0
  17. package/src/commands/init.ts +37 -0
  18. package/src/commands/listen.ts +42 -0
  19. package/src/commands/login.tsx +51 -0
  20. package/src/commands/logout.tsx +40 -0
  21. package/src/commands/mcp.ts +41 -0
  22. package/src/commands/model.tsx +40 -0
  23. package/src/commands/modelstatus.tsx +20 -0
  24. package/src/commands/onboarding.tsx +34 -0
  25. package/src/commands/pr_comments.ts +59 -0
  26. package/src/commands/refreshCommands.ts +54 -0
  27. package/src/commands/release-notes.ts +34 -0
  28. package/src/commands/resume.tsx +30 -0
  29. package/src/commands/review.ts +49 -0
  30. package/src/commands/terminalSetup.ts +221 -0
  31. package/src/commands.ts +136 -0
  32. package/src/components/ApproveApiKey.tsx +93 -0
  33. package/src/components/AsciiLogo.tsx +13 -0
  34. package/src/components/AutoUpdater.tsx +148 -0
  35. package/src/components/Bug.tsx +367 -0
  36. package/src/components/Config.tsx +289 -0
  37. package/src/components/ConsoleOAuthFlow.tsx +326 -0
  38. package/src/components/Cost.tsx +23 -0
  39. package/src/components/CostThresholdDialog.tsx +46 -0
  40. package/src/components/CustomSelect/option-map.ts +42 -0
  41. package/src/components/CustomSelect/select-option.tsx +52 -0
  42. package/src/components/CustomSelect/select.tsx +143 -0
  43. package/src/components/CustomSelect/use-select-state.ts +414 -0
  44. package/src/components/CustomSelect/use-select.ts +35 -0
  45. package/src/components/FallbackToolUseRejectedMessage.tsx +15 -0
  46. package/src/components/FileEditToolUpdatedMessage.tsx +66 -0
  47. package/src/components/Help.tsx +215 -0
  48. package/src/components/HighlightedCode.tsx +33 -0
  49. package/src/components/InvalidConfigDialog.tsx +113 -0
  50. package/src/components/Link.tsx +32 -0
  51. package/src/components/LogSelector.tsx +86 -0
  52. package/src/components/Logo.tsx +145 -0
  53. package/src/components/MCPServerApprovalDialog.tsx +100 -0
  54. package/src/components/MCPServerDialogCopy.tsx +25 -0
  55. package/src/components/MCPServerMultiselectDialog.tsx +109 -0
  56. package/src/components/Message.tsx +219 -0
  57. package/src/components/MessageResponse.tsx +15 -0
  58. package/src/components/MessageSelector.tsx +211 -0
  59. package/src/components/ModeIndicator.tsx +88 -0
  60. package/src/components/ModelConfig.tsx +301 -0
  61. package/src/components/ModelListManager.tsx +223 -0
  62. package/src/components/ModelSelector.tsx +3208 -0
  63. package/src/components/ModelStatusDisplay.tsx +228 -0
  64. package/src/components/Onboarding.tsx +274 -0
  65. package/src/components/PressEnterToContinue.tsx +11 -0
  66. package/src/components/PromptInput.tsx +710 -0
  67. package/src/components/SentryErrorBoundary.ts +33 -0
  68. package/src/components/Spinner.tsx +129 -0
  69. package/src/components/StructuredDiff.tsx +184 -0
  70. package/src/components/TextInput.tsx +246 -0
  71. package/src/components/TokenWarning.tsx +31 -0
  72. package/src/components/ToolUseLoader.tsx +40 -0
  73. package/src/components/TrustDialog.tsx +106 -0
  74. package/src/components/binary-feedback/BinaryFeedback.tsx +63 -0
  75. package/src/components/binary-feedback/BinaryFeedbackOption.tsx +111 -0
  76. package/src/components/binary-feedback/BinaryFeedbackView.tsx +172 -0
  77. package/src/components/binary-feedback/utils.ts +220 -0
  78. package/src/components/messages/AssistantBashOutputMessage.tsx +22 -0
  79. package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +45 -0
  80. package/src/components/messages/AssistantRedactedThinkingMessage.tsx +19 -0
  81. package/src/components/messages/AssistantTextMessage.tsx +144 -0
  82. package/src/components/messages/AssistantThinkingMessage.tsx +40 -0
  83. package/src/components/messages/AssistantToolUseMessage.tsx +123 -0
  84. package/src/components/messages/UserBashInputMessage.tsx +28 -0
  85. package/src/components/messages/UserCommandMessage.tsx +30 -0
  86. package/src/components/messages/UserKodingInputMessage.tsx +28 -0
  87. package/src/components/messages/UserPromptMessage.tsx +35 -0
  88. package/src/components/messages/UserTextMessage.tsx +39 -0
  89. package/src/components/messages/UserToolResultMessage/UserToolCanceledMessage.tsx +12 -0
  90. package/src/components/messages/UserToolResultMessage/UserToolErrorMessage.tsx +36 -0
  91. package/src/components/messages/UserToolResultMessage/UserToolRejectMessage.tsx +31 -0
  92. package/src/components/messages/UserToolResultMessage/UserToolResultMessage.tsx +57 -0
  93. package/src/components/messages/UserToolResultMessage/UserToolSuccessMessage.tsx +35 -0
  94. package/src/components/messages/UserToolResultMessage/utils.tsx +56 -0
  95. package/src/components/permissions/BashPermissionRequest/BashPermissionRequest.tsx +121 -0
  96. package/src/components/permissions/FallbackPermissionRequest.tsx +155 -0
  97. package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +182 -0
  98. package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +75 -0
  99. package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +164 -0
  100. package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +81 -0
  101. package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +242 -0
  102. package/src/components/permissions/PermissionRequest.tsx +103 -0
  103. package/src/components/permissions/PermissionRequestTitle.tsx +69 -0
  104. package/src/components/permissions/hooks.ts +44 -0
  105. package/src/components/permissions/toolUseOptions.ts +59 -0
  106. package/src/components/permissions/utils.ts +23 -0
  107. package/src/constants/betas.ts +5 -0
  108. package/src/constants/claude-asterisk-ascii-art.tsx +238 -0
  109. package/src/constants/figures.ts +4 -0
  110. package/src/constants/keys.ts +3 -0
  111. package/src/constants/macros.ts +6 -0
  112. package/src/constants/models.ts +935 -0
  113. package/src/constants/oauth.ts +18 -0
  114. package/src/constants/product.ts +17 -0
  115. package/src/constants/prompts.ts +177 -0
  116. package/src/constants/releaseNotes.ts +7 -0
  117. package/src/context/PermissionContext.tsx +149 -0
  118. package/src/context.ts +278 -0
  119. package/src/cost-tracker.ts +84 -0
  120. package/src/entrypoints/cli.tsx +1498 -0
  121. package/src/entrypoints/mcp.ts +176 -0
  122. package/src/history.ts +25 -0
  123. package/src/hooks/useApiKeyVerification.ts +59 -0
  124. package/src/hooks/useArrowKeyHistory.ts +55 -0
  125. package/src/hooks/useCanUseTool.ts +138 -0
  126. package/src/hooks/useCancelRequest.ts +39 -0
  127. package/src/hooks/useDoublePress.ts +42 -0
  128. package/src/hooks/useExitOnCtrlCD.ts +31 -0
  129. package/src/hooks/useInterval.ts +25 -0
  130. package/src/hooks/useLogMessages.ts +16 -0
  131. package/src/hooks/useLogStartupTime.ts +12 -0
  132. package/src/hooks/useNotifyAfterTimeout.ts +65 -0
  133. package/src/hooks/usePermissionRequestLogging.ts +44 -0
  134. package/src/hooks/useSlashCommandTypeahead.ts +137 -0
  135. package/src/hooks/useTerminalSize.ts +49 -0
  136. package/src/hooks/useTextInput.ts +315 -0
  137. package/src/messages.ts +37 -0
  138. package/src/permissions.ts +268 -0
  139. package/src/query.ts +704 -0
  140. package/src/screens/ConfigureNpmPrefix.tsx +197 -0
  141. package/src/screens/Doctor.tsx +219 -0
  142. package/src/screens/LogList.tsx +68 -0
  143. package/src/screens/REPL.tsx +792 -0
  144. package/src/screens/ResumeConversation.tsx +68 -0
  145. package/src/services/browserMocks.ts +66 -0
  146. package/src/services/claude.ts +1947 -0
  147. package/src/services/customCommands.ts +683 -0
  148. package/src/services/fileFreshness.ts +377 -0
  149. package/src/services/mcpClient.ts +564 -0
  150. package/src/services/mcpServerApproval.tsx +50 -0
  151. package/src/services/notifier.ts +40 -0
  152. package/src/services/oauth.ts +357 -0
  153. package/src/services/openai.ts +796 -0
  154. package/src/services/sentry.ts +3 -0
  155. package/src/services/statsig.ts +171 -0
  156. package/src/services/statsigStorage.ts +86 -0
  157. package/src/services/systemReminder.ts +406 -0
  158. package/src/services/vcr.ts +161 -0
  159. package/src/tools/ArchitectTool/ArchitectTool.tsx +122 -0
  160. package/src/tools/ArchitectTool/prompt.ts +15 -0
  161. package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +505 -0
  162. package/src/tools/BashTool/BashTool.tsx +270 -0
  163. package/src/tools/BashTool/BashToolResultMessage.tsx +38 -0
  164. package/src/tools/BashTool/OutputLine.tsx +48 -0
  165. package/src/tools/BashTool/prompt.ts +174 -0
  166. package/src/tools/BashTool/utils.ts +56 -0
  167. package/src/tools/FileEditTool/FileEditTool.tsx +316 -0
  168. package/src/tools/FileEditTool/prompt.ts +51 -0
  169. package/src/tools/FileEditTool/utils.ts +58 -0
  170. package/src/tools/FileReadTool/FileReadTool.tsx +371 -0
  171. package/src/tools/FileReadTool/prompt.ts +7 -0
  172. package/src/tools/FileWriteTool/FileWriteTool.tsx +297 -0
  173. package/src/tools/FileWriteTool/prompt.ts +10 -0
  174. package/src/tools/GlobTool/GlobTool.tsx +119 -0
  175. package/src/tools/GlobTool/prompt.ts +8 -0
  176. package/src/tools/GrepTool/GrepTool.tsx +147 -0
  177. package/src/tools/GrepTool/prompt.ts +11 -0
  178. package/src/tools/MCPTool/MCPTool.tsx +106 -0
  179. package/src/tools/MCPTool/prompt.ts +3 -0
  180. package/src/tools/MemoryReadTool/MemoryReadTool.tsx +127 -0
  181. package/src/tools/MemoryReadTool/prompt.ts +3 -0
  182. package/src/tools/MemoryWriteTool/MemoryWriteTool.tsx +89 -0
  183. package/src/tools/MemoryWriteTool/prompt.ts +3 -0
  184. package/src/tools/MultiEditTool/MultiEditTool.tsx +366 -0
  185. package/src/tools/MultiEditTool/prompt.ts +45 -0
  186. package/src/tools/NotebookEditTool/NotebookEditTool.tsx +298 -0
  187. package/src/tools/NotebookEditTool/prompt.ts +3 -0
  188. package/src/tools/NotebookReadTool/NotebookReadTool.tsx +266 -0
  189. package/src/tools/NotebookReadTool/prompt.ts +3 -0
  190. package/src/tools/StickerRequestTool/StickerRequestTool.tsx +93 -0
  191. package/src/tools/StickerRequestTool/prompt.ts +19 -0
  192. package/src/tools/TaskTool/TaskTool.tsx +382 -0
  193. package/src/tools/TaskTool/constants.ts +1 -0
  194. package/src/tools/TaskTool/prompt.ts +56 -0
  195. package/src/tools/ThinkTool/ThinkTool.tsx +56 -0
  196. package/src/tools/ThinkTool/prompt.ts +12 -0
  197. package/src/tools/TodoWriteTool/TodoWriteTool.tsx +289 -0
  198. package/src/tools/TodoWriteTool/prompt.ts +63 -0
  199. package/src/tools/lsTool/lsTool.tsx +269 -0
  200. package/src/tools/lsTool/prompt.ts +2 -0
  201. package/src/tools.ts +63 -0
  202. package/src/types/PermissionMode.ts +120 -0
  203. package/src/types/RequestContext.ts +72 -0
  204. package/src/utils/Cursor.ts +436 -0
  205. package/src/utils/PersistentShell.ts +373 -0
  206. package/src/utils/agentStorage.ts +97 -0
  207. package/src/utils/array.ts +3 -0
  208. package/src/utils/ask.tsx +98 -0
  209. package/src/utils/auth.ts +13 -0
  210. package/src/utils/autoCompactCore.ts +223 -0
  211. package/src/utils/autoUpdater.ts +318 -0
  212. package/src/utils/betas.ts +20 -0
  213. package/src/utils/browser.ts +14 -0
  214. package/src/utils/cleanup.ts +72 -0
  215. package/src/utils/commands.ts +261 -0
  216. package/src/utils/config.ts +771 -0
  217. package/src/utils/conversationRecovery.ts +54 -0
  218. package/src/utils/debugLogger.ts +1123 -0
  219. package/src/utils/diff.ts +42 -0
  220. package/src/utils/env.ts +57 -0
  221. package/src/utils/errors.ts +21 -0
  222. package/src/utils/exampleCommands.ts +108 -0
  223. package/src/utils/execFileNoThrow.ts +51 -0
  224. package/src/utils/expertChatStorage.ts +136 -0
  225. package/src/utils/file.ts +402 -0
  226. package/src/utils/fileRecoveryCore.ts +71 -0
  227. package/src/utils/format.tsx +44 -0
  228. package/src/utils/generators.ts +62 -0
  229. package/src/utils/git.ts +92 -0
  230. package/src/utils/globalLogger.ts +77 -0
  231. package/src/utils/http.ts +10 -0
  232. package/src/utils/imagePaste.ts +38 -0
  233. package/src/utils/json.ts +13 -0
  234. package/src/utils/log.ts +382 -0
  235. package/src/utils/markdown.ts +213 -0
  236. package/src/utils/messageContextManager.ts +289 -0
  237. package/src/utils/messages.tsx +938 -0
  238. package/src/utils/model.ts +836 -0
  239. package/src/utils/permissions/filesystem.ts +118 -0
  240. package/src/utils/ripgrep.ts +167 -0
  241. package/src/utils/sessionState.ts +49 -0
  242. package/src/utils/state.ts +25 -0
  243. package/src/utils/style.ts +29 -0
  244. package/src/utils/terminal.ts +49 -0
  245. package/src/utils/theme.ts +122 -0
  246. package/src/utils/thinking.ts +144 -0
  247. package/src/utils/todoStorage.ts +431 -0
  248. package/src/utils/tokens.ts +43 -0
  249. package/src/utils/toolExecutionController.ts +163 -0
  250. package/src/utils/unaryLogging.ts +26 -0
  251. package/src/utils/user.ts +37 -0
  252. package/src/utils/validate.ts +165 -0
  253. package/cli.mjs +0 -1803
@@ -0,0 +1,3208 @@
1
+ import React, { useState, useEffect, useCallback, useRef } from 'react'
2
+ import { Box, Text, useInput } from 'ink'
3
+ import { getTheme } from '../utils/theme'
4
+ import { Select } from './CustomSelect/select'
5
+ import { Newline } from 'ink'
6
+ import { getModelManager } from '../utils/model'
7
+
8
+ // 共享的屏幕容器组件,避免重复边框
9
+ function ScreenContainer({
10
+ title,
11
+ exitState,
12
+ children,
13
+ }: {
14
+ title: string
15
+ exitState: { pending: boolean; keyName: string }
16
+ children: React.ReactNode
17
+ }) {
18
+ const theme = getTheme()
19
+ return (
20
+ <Box
21
+ flexDirection="column"
22
+ gap={1}
23
+ borderStyle="round"
24
+ borderColor={theme.secondaryBorder}
25
+ paddingX={2}
26
+ paddingY={1}
27
+ >
28
+ <Text bold>
29
+ {title}{' '}
30
+ {exitState.pending ? `(press ${exitState.keyName} again to exit)` : ''}
31
+ </Text>
32
+ {children}
33
+ </Box>
34
+ )
35
+ }
36
+ import { PRODUCT_NAME } from '../constants/product'
37
+ import { useExitOnCtrlCD } from '../hooks/useExitOnCtrlCD'
38
+ import {
39
+ getGlobalConfig,
40
+ saveGlobalConfig,
41
+ ProviderType,
42
+ ModelPointerType,
43
+ setAllPointersToModel,
44
+ setModelPointer,
45
+ } from '../utils/config.js'
46
+ import models, { providers } from '../constants/models'
47
+ import TextInput from './TextInput'
48
+ import OpenAI from 'openai'
49
+ import chalk from 'chalk'
50
+ import { fetchAnthropicModels, verifyApiKey } from '../services/claude'
51
+ import { fetchCustomModels } from '../services/openai'
52
+ type Props = {
53
+ onDone: () => void
54
+ abortController?: AbortController
55
+ targetPointer?: ModelPointerType // NEW: Target pointer for configuration
56
+ isOnboarding?: boolean // NEW: Whether this is first-time setup
57
+ onCancel?: () => void // NEW: Cancel callback (different from onDone)
58
+ }
59
+
60
+ type ModelInfo = {
61
+ model: string
62
+ provider: string
63
+ [key: string]: any
64
+ }
65
+
66
+ // Define reasoning effort options
67
+ type ReasoningEffortOption = 'low' | 'medium' | 'high'
68
+
69
+ // Define context length options (in tokens)
70
+ type ContextLengthOption = {
71
+ label: string
72
+ value: number
73
+ }
74
+
75
+ const CONTEXT_LENGTH_OPTIONS: ContextLengthOption[] = [
76
+ { label: '32K tokens', value: 32000 },
77
+ { label: '64K tokens', value: 64000 },
78
+ { label: '128K tokens', value: 128000 },
79
+ { label: '200K tokens', value: 200000 },
80
+ { label: '256K tokens', value: 256000 },
81
+ { label: '300K tokens', value: 300000 },
82
+ { label: '512K tokens', value: 512000 },
83
+ { label: '1000K tokens', value: 1000000 },
84
+ { label: '2000K tokens', value: 2000000 },
85
+ { label: '3000K tokens', value: 3000000 },
86
+ { label: '5000K tokens', value: 5000000 },
87
+ { label: '10000K tokens', value: 10000000 },
88
+ ]
89
+
90
+ const DEFAULT_CONTEXT_LENGTH = 128000
91
+
92
+ // Define max tokens options
93
+ type MaxTokensOption = {
94
+ label: string
95
+ value: number
96
+ }
97
+
98
+ const MAX_TOKENS_OPTIONS: MaxTokensOption[] = [
99
+ { label: '1K tokens', value: 1024 },
100
+ { label: '2K tokens', value: 2048 },
101
+ { label: '4K tokens', value: 4096 },
102
+ { label: '8K tokens (recommended)', value: 8192 },
103
+ { label: '16K tokens', value: 16384 },
104
+ { label: '32K tokens', value: 32768 },
105
+ { label: '64K tokens', value: 65536 },
106
+ { label: '128K tokens', value: 131072 },
107
+ ]
108
+
109
+ const DEFAULT_MAX_TOKENS = 8192
110
+
111
+ // Custom hook to handle Escape key navigation
112
+ function useEscapeNavigation(
113
+ onEscape: () => void,
114
+ abortController?: AbortController,
115
+ ) {
116
+ // Use a ref to track if we've handled the escape key
117
+ const handledRef = useRef(false)
118
+
119
+ useInput(
120
+ (input, key) => {
121
+ if (key.escape && !handledRef.current) {
122
+ handledRef.current = true
123
+ // Reset after a short delay to allow for multiple escapes
124
+ setTimeout(() => {
125
+ handledRef.current = false
126
+ }, 100)
127
+ onEscape()
128
+ }
129
+ },
130
+ { isActive: true },
131
+ )
132
+ }
133
+
134
+ function printModelConfig() {
135
+ const config = getGlobalConfig()
136
+ // Only show ModelProfile information - no legacy fields
137
+ const modelProfiles = config.modelProfiles || []
138
+ const activeProfiles = modelProfiles.filter(p => p.isActive)
139
+
140
+ if (activeProfiles.length === 0) {
141
+ console.log(chalk.gray(' ⎿ No active model profiles configured'))
142
+ return
143
+ }
144
+
145
+ const profileSummary = activeProfiles
146
+ .map(p => `${p.name} (${p.provider}: ${p.modelName})`)
147
+ .join(' | ')
148
+ console.log(chalk.gray(` ⎿ ${profileSummary}`))
149
+ }
150
+
151
+ export function ModelSelector({
152
+ onDone: onDoneProp,
153
+ abortController,
154
+ targetPointer,
155
+ isOnboarding = false,
156
+ onCancel,
157
+ }: Props): React.ReactNode {
158
+ const config = getGlobalConfig()
159
+ const theme = getTheme()
160
+ const onDone = () => {
161
+ printModelConfig()
162
+ onDoneProp()
163
+ }
164
+ // Initialize the exit hook but don't use it for Escape key
165
+ const exitState = useExitOnCtrlCD(() => process.exit(0))
166
+
167
+ // Always start with provider selection in new system
168
+ const getInitialScreen = (): string => {
169
+ return 'provider'
170
+ }
171
+
172
+ // Screen navigation stack
173
+ const [screenStack, setScreenStack] = useState<
174
+ Array<
175
+ | 'provider'
176
+ | 'anthropicSubMenu'
177
+ | 'apiKey'
178
+ | 'resourceName'
179
+ | 'baseUrl'
180
+ | 'model'
181
+ | 'modelInput'
182
+ | 'modelParams'
183
+ | 'contextLength'
184
+ | 'connectionTest'
185
+ | 'confirmation'
186
+ >
187
+ >([getInitialScreen()])
188
+
189
+ // Current screen is always the last item in the stack
190
+ const currentScreen = screenStack[screenStack.length - 1]
191
+
192
+ // Function to navigate to a new screen
193
+ const navigateTo = (
194
+ screen:
195
+ | 'provider'
196
+ | 'anthropicSubMenu'
197
+ | 'apiKey'
198
+ | 'resourceName'
199
+ | 'baseUrl'
200
+ | 'model'
201
+ | 'modelInput'
202
+ | 'modelParams'
203
+ | 'contextLength'
204
+ | 'connectionTest'
205
+ | 'confirmation',
206
+ ) => {
207
+ setScreenStack(prev => [...prev, screen])
208
+ }
209
+
210
+ // Function to go back to the previous screen
211
+ const goBack = () => {
212
+ if (screenStack.length > 1) {
213
+ // Remove the current screen from the stack
214
+ setScreenStack(prev => prev.slice(0, -1))
215
+ } else {
216
+ // If we're at the first screen, call onDone to exit
217
+ onDone()
218
+ }
219
+ }
220
+
221
+ // State for model configuration
222
+ const [selectedProvider, setSelectedProvider] = useState<ProviderType>(
223
+ config.primaryProvider ?? 'anthropic',
224
+ )
225
+
226
+ // State for Anthropic provider sub-menu
227
+ const [anthropicProviderType, setAnthropicProviderType] = useState<
228
+ 'official' | 'bigdream' | 'opendev' | 'custom'
229
+ >('official')
230
+ const [selectedModel, setSelectedModel] = useState<string>('')
231
+ const [apiKey, setApiKey] = useState<string>('')
232
+
233
+ // New state for model parameters
234
+ const [maxTokens, setMaxTokens] = useState<string>(
235
+ config.maxTokens?.toString() || DEFAULT_MAX_TOKENS.toString(),
236
+ )
237
+ const [maxTokensMode, setMaxTokensMode] = useState<'preset' | 'custom'>(
238
+ 'preset',
239
+ )
240
+ const [selectedMaxTokensPreset, setSelectedMaxTokensPreset] =
241
+ useState<number>(config.maxTokens || DEFAULT_MAX_TOKENS)
242
+ const [reasoningEffort, setReasoningEffort] =
243
+ useState<ReasoningEffortOption>('medium')
244
+ const [supportsReasoningEffort, setSupportsReasoningEffort] =
245
+ useState<boolean>(false)
246
+
247
+ // Context length state (use default instead of legacy config)
248
+ const [contextLength, setContextLength] = useState<number>(
249
+ DEFAULT_CONTEXT_LENGTH,
250
+ )
251
+
252
+ // Form focus state
253
+ const [activeFieldIndex, setActiveFieldIndex] = useState(0)
254
+ const [maxTokensCursorOffset, setMaxTokensCursorOffset] = useState<number>(0)
255
+
256
+ // UI state
257
+
258
+ // Search and model loading state
259
+ const [availableModels, setAvailableModels] = useState<ModelInfo[]>([])
260
+ const [isLoadingModels, setIsLoadingModels] = useState(false)
261
+ const [modelLoadError, setModelLoadError] = useState<string | null>(null)
262
+ const [modelSearchQuery, setModelSearchQuery] = useState<string>('')
263
+ const [modelSearchCursorOffset, setModelSearchCursorOffset] =
264
+ useState<number>(0)
265
+ const [cursorOffset, setCursorOffset] = useState<number>(0)
266
+ const [apiKeyEdited, setApiKeyEdited] = useState<boolean>(false)
267
+
268
+ // Retry logic state
269
+ const [fetchRetryCount, setFetchRetryCount] = useState<number>(0)
270
+ const [isRetrying, setIsRetrying] = useState<boolean>(false)
271
+
272
+ // Connection test state
273
+ const [isTestingConnection, setIsTestingConnection] = useState<boolean>(false)
274
+ const [connectionTestResult, setConnectionTestResult] = useState<{
275
+ success: boolean
276
+ message: string
277
+ endpoint?: string
278
+ details?: string
279
+ } | null>(null)
280
+
281
+ // Validation error state for duplicate model detection
282
+ const [validationError, setValidationError] = useState<string | null>(null)
283
+
284
+ // State for Azure-specific configuration
285
+ const [resourceName, setResourceName] = useState<string>('')
286
+ const [resourceNameCursorOffset, setResourceNameCursorOffset] =
287
+ useState<number>(0)
288
+ const [customModelName, setCustomModelName] = useState<string>('')
289
+ const [customModelNameCursorOffset, setCustomModelNameCursorOffset] =
290
+ useState<number>(0)
291
+
292
+ // State for Ollama-specific configuration
293
+ const [ollamaBaseUrl, setOllamaBaseUrl] = useState<string>(
294
+ 'http://localhost:11434/v1',
295
+ )
296
+ const [ollamaBaseUrlCursorOffset, setOllamaBaseUrlCursorOffset] =
297
+ useState<number>(0)
298
+
299
+ // State for custom OpenAI-compatible API configuration
300
+ const [customBaseUrl, setCustomBaseUrl] = useState<string>('')
301
+ const [customBaseUrlCursorOffset, setCustomBaseUrlCursorOffset] =
302
+ useState<number>(0)
303
+
304
+ // State for provider base URL configuration (used for all providers)
305
+ const [providerBaseUrl, setProviderBaseUrl] = useState<string>('')
306
+ const [providerBaseUrlCursorOffset, setProviderBaseUrlCursorOffset] =
307
+ useState<number>(0)
308
+
309
+ // Reasoning effort options
310
+ const reasoningEffortOptions = [
311
+ { label: 'Low - Faster responses, less thorough reasoning', value: 'low' },
312
+ { label: 'Medium - Balanced speed and reasoning depth', value: 'medium' },
313
+ {
314
+ label: 'High - Slower responses, more thorough reasoning',
315
+ value: 'high',
316
+ },
317
+ ]
318
+
319
+ // Get available providers from models.ts, excluding community Claude providers (now in Anthropic sub-menu)
320
+ const availableProviders = Object.keys(providers).filter(
321
+ provider => provider !== 'bigdream' && provider !== 'opendev',
322
+ )
323
+
324
+ // Create provider options with nice labels
325
+ const providerOptions = availableProviders.map(provider => {
326
+ const modelCount = models[provider]?.length || 0
327
+ const label = getProviderLabel(provider, modelCount)
328
+ return {
329
+ label,
330
+ value: provider,
331
+ }
332
+ })
333
+
334
+ useEffect(() => {
335
+ if (!apiKeyEdited && selectedProvider) {
336
+ if (process.env[selectedProvider.toUpperCase() + '_API_KEY']) {
337
+ setApiKey(
338
+ process.env[selectedProvider.toUpperCase() + '_API_KEY'] as string,
339
+ )
340
+ } else {
341
+ setApiKey('')
342
+ }
343
+ }
344
+ }, [selectedProvider, apiKey, apiKeyEdited])
345
+
346
+ // Ensure contextLength is always set to a valid option when contextLength screen is displayed
347
+ useEffect(() => {
348
+ if (
349
+ currentScreen === 'contextLength' &&
350
+ !CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)
351
+ ) {
352
+ setContextLength(DEFAULT_CONTEXT_LENGTH)
353
+ }
354
+ }, [currentScreen, contextLength])
355
+
356
+ // Create a set of model names from our constants/models.ts for the current provider
357
+ const ourModelNames = new Set(
358
+ (models[selectedProvider as keyof typeof models] || []).map(
359
+ (model: any) => model.model,
360
+ ),
361
+ )
362
+
363
+ // Create model options from available models, filtered by search query
364
+ const filteredModels = modelSearchQuery
365
+ ? availableModels.filter(model =>
366
+ model.model?.toLowerCase().includes(modelSearchQuery.toLowerCase()),
367
+ )
368
+ : availableModels
369
+
370
+ // Sort models with priority for specific keywords
371
+ const sortModelsByPriority = (models: ModelInfo[]) => {
372
+ const priorityKeywords = [
373
+ 'claude',
374
+ 'kimi',
375
+ 'deepseek',
376
+ 'minimax',
377
+ 'o3',
378
+ 'gpt',
379
+ 'qwen',
380
+ ]
381
+
382
+ return models.sort((a, b) => {
383
+ // Add safety checks for undefined model names
384
+ const aModelLower = a.model?.toLowerCase() || ''
385
+ const bModelLower = b.model?.toLowerCase() || ''
386
+
387
+ // Check if models contain priority keywords
388
+ const aHasPriority = priorityKeywords.some(keyword =>
389
+ aModelLower.includes(keyword),
390
+ )
391
+ const bHasPriority = priorityKeywords.some(keyword =>
392
+ bModelLower.includes(keyword),
393
+ )
394
+
395
+ // If one has priority and the other doesn't, prioritize the one with keywords
396
+ if (aHasPriority && !bHasPriority) return -1
397
+ if (!aHasPriority && bHasPriority) return 1
398
+
399
+ // If both have priority or neither has priority, sort alphabetically
400
+ return a.model.localeCompare(b.model)
401
+ })
402
+ }
403
+
404
+ const sortedFilteredModels = sortModelsByPriority(filteredModels)
405
+
406
+ const modelOptions = sortedFilteredModels.map(model => {
407
+ // Check if this model is in our constants/models.ts list
408
+ const isInOurModels = ourModelNames.has(model.model)
409
+
410
+ return {
411
+ label: `${model.model}${getModelDetails(model)}`,
412
+ value: model.model,
413
+ }
414
+ })
415
+
416
+ function getModelDetails(model: ModelInfo): string {
417
+ const details = []
418
+
419
+ if (model.max_tokens) {
420
+ details.push(`${formatNumber(model.max_tokens)} tokens`)
421
+ }
422
+
423
+ if (model.supports_vision) {
424
+ details.push('vision')
425
+ }
426
+
427
+ if (model.supports_function_calling) {
428
+ details.push('tools')
429
+ }
430
+
431
+ return details.length > 0 ? ` (${details.join(', ')})` : ''
432
+ }
433
+
434
+ function formatNumber(num: number): string {
435
+ if (num >= 1000000) {
436
+ return `${(num / 1000000).toFixed(1)}M`
437
+ } else if (num >= 1000) {
438
+ return `${(num / 1000).toFixed(0)}K`
439
+ }
440
+ return num.toString()
441
+ }
442
+
443
+ function getProviderLabel(provider: string, modelCount: number): string {
444
+ // Use provider names from the providers object if available
445
+ if (providers[provider]) {
446
+ return `${providers[provider].name} ${providers[provider].status === 'wip' ? '(WIP)' : ''} (${modelCount} models)`
447
+ }
448
+ return `${provider}`
449
+ }
450
+
451
+ function handleProviderSelection(provider: string) {
452
+ const providerType = provider as ProviderType
453
+ setSelectedProvider(providerType)
454
+
455
+ if (provider === 'custom') {
456
+ // For custom provider, save and exit
457
+ saveConfiguration(providerType, selectedModel || '')
458
+ onDone()
459
+ } else if (provider === 'anthropic') {
460
+ // For Anthropic provider, go to sub-menu to choose between official, community proxies, or custom
461
+ navigateTo('anthropicSubMenu')
462
+ } else {
463
+ // For all other providers, go to base URL configuration first
464
+ // Initialize with the default base URL for the provider
465
+ const defaultBaseUrl = providers[providerType]?.baseURL || ''
466
+ setProviderBaseUrl(defaultBaseUrl)
467
+ navigateTo('baseUrl')
468
+ }
469
+ }
470
+
471
+ // Local implementation of fetchAnthropicModels for UI
472
+ async function fetchAnthropicModels(baseURL: string, apiKey: string) {
473
+ try {
474
+ const response = await fetch(`${baseURL}/v1/models`, {
475
+ method: 'GET',
476
+ headers: {
477
+ 'x-api-key': apiKey,
478
+ 'anthropic-version': '2023-06-01',
479
+ 'Content-Type': 'application/json',
480
+ },
481
+ })
482
+
483
+ if (!response.ok) {
484
+ if (response.status === 401) {
485
+ throw new Error(
486
+ 'Invalid API key. Please check your API key and try again.',
487
+ )
488
+ } else if (response.status === 403) {
489
+ throw new Error('API key does not have permission to access models.')
490
+ } else if (response.status === 404) {
491
+ throw new Error(
492
+ 'API endpoint not found. This provider may not support model listing.',
493
+ )
494
+ } else if (response.status === 429) {
495
+ throw new Error(
496
+ 'Too many requests. Please wait a moment and try again.',
497
+ )
498
+ } else if (response.status >= 500) {
499
+ throw new Error(
500
+ 'API service is temporarily unavailable. Please try again later.',
501
+ )
502
+ } else {
503
+ throw new Error(`Unable to connect to API (${response.status}).`)
504
+ }
505
+ }
506
+
507
+ const data = await response.json()
508
+
509
+ // Handle different response formats
510
+ let models = []
511
+ if (data && data.data && Array.isArray(data.data)) {
512
+ models = data.data
513
+ } else if (Array.isArray(data)) {
514
+ models = data
515
+ } else if (data && data.models && Array.isArray(data.models)) {
516
+ models = data.models
517
+ } else {
518
+ throw new Error('API returned unexpected response format.')
519
+ }
520
+
521
+ return models
522
+ } catch (error) {
523
+ if (
524
+ error instanceof Error &&
525
+ (error.message.includes('API key') ||
526
+ error.message.includes('API endpoint') ||
527
+ error.message.includes('API service') ||
528
+ error.message.includes('response format'))
529
+ ) {
530
+ throw error
531
+ }
532
+
533
+ if (error instanceof Error && error.message.includes('fetch')) {
534
+ throw new Error(
535
+ 'Unable to connect to the API. Please check the base URL and your internet connection.',
536
+ )
537
+ }
538
+
539
+ throw new Error(
540
+ 'Failed to fetch models from API. Please check your configuration and try again.',
541
+ )
542
+ }
543
+ }
544
+
545
+ // 通用的Anthropic兼容模型获取函数,实现三层降级策略
546
+ async function fetchAnthropicCompatibleModelsWithFallback(
547
+ baseURL: string,
548
+ provider: string,
549
+ apiKeyUrl: string,
550
+ ) {
551
+ let lastError: Error | null = null
552
+
553
+ // 第一层:尝试使用 Anthropic 风格的 API
554
+ try {
555
+ const models = await fetchAnthropicModels(baseURL, apiKey)
556
+ return models.map((model: any) => ({
557
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
558
+ provider: provider,
559
+ max_tokens: model.max_tokens || 8192,
560
+ supports_vision: model.supports_vision || true,
561
+ supports_function_calling: model.supports_function_calling || true,
562
+ supports_reasoning_effort: false,
563
+ }))
564
+ } catch (error) {
565
+ lastError = error as Error
566
+ console.log(
567
+ `Anthropic API failed for ${provider}, trying OpenAI format:`,
568
+ error,
569
+ )
570
+ }
571
+
572
+ // 第二层:尝试使用 OpenAI 风格的 API
573
+ try {
574
+ const models = await fetchCustomModels(baseURL, apiKey)
575
+ return models.map((model: any) => ({
576
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
577
+ provider: provider,
578
+ max_tokens: model.max_tokens || 8192,
579
+ supports_vision: model.supports_vision || false,
580
+ supports_function_calling: model.supports_function_calling || true,
581
+ supports_reasoning_effort: false,
582
+ }))
583
+ } catch (error) {
584
+ lastError = error as Error
585
+ console.log(
586
+ `OpenAI API failed for ${provider}, falling back to manual input:`,
587
+ error,
588
+ )
589
+ }
590
+
591
+ // 第三层:抛出错误,触发手动输入模式
592
+ let errorMessage = `Failed to fetch ${provider} models using both Anthropic and OpenAI API formats`
593
+
594
+ if (lastError) {
595
+ errorMessage = lastError.message
596
+ }
597
+
598
+ // 添加有用的建议
599
+ if (errorMessage.includes('API key')) {
600
+ errorMessage += `\n\n💡 Tip: Get your API key from ${apiKeyUrl}`
601
+ } else if (errorMessage.includes('permission')) {
602
+ errorMessage += `\n\n💡 Tip: Make sure your API key has access to the ${provider} API`
603
+ } else if (errorMessage.includes('connection')) {
604
+ errorMessage += '\n\n💡 Tip: Check your internet connection and try again'
605
+ }
606
+
607
+ setModelLoadError(errorMessage)
608
+ throw new Error(errorMessage)
609
+ }
610
+
611
+ // 统一处理所有Anthropic兼容提供商的模型获取
612
+ async function fetchAnthropicCompatibleProviderModels() {
613
+ // 根据anthropicProviderType确定默认baseURL和API key获取地址
614
+ let defaultBaseURL: string
615
+ let apiKeyUrl: string
616
+ let actualProvider: string
617
+
618
+ switch (anthropicProviderType) {
619
+ case 'official':
620
+ defaultBaseURL = 'https://api.anthropic.com'
621
+ apiKeyUrl = 'https://console.anthropic.com/settings/keys'
622
+ actualProvider = 'anthropic'
623
+ break
624
+ case 'bigdream':
625
+ defaultBaseURL = 'https://api-key.info'
626
+ apiKeyUrl = 'https://api-key.info/register?aff=MSl4'
627
+ actualProvider = 'bigdream'
628
+ break
629
+ case 'opendev':
630
+ defaultBaseURL = 'https://api.openai-next.com'
631
+ apiKeyUrl = 'https://api.openai-next.com/register/?aff_code=4xo7'
632
+ actualProvider = 'opendev'
633
+ break
634
+ case 'custom':
635
+ defaultBaseURL = providerBaseUrl
636
+ apiKeyUrl = 'your custom API provider'
637
+ actualProvider = 'anthropic'
638
+ break
639
+ default:
640
+ throw new Error(
641
+ `Unsupported Anthropic provider type: ${anthropicProviderType}`,
642
+ )
643
+ }
644
+
645
+ const baseURL =
646
+ anthropicProviderType === 'custom'
647
+ ? providerBaseUrl
648
+ : providerBaseUrl || defaultBaseURL
649
+ return await fetchAnthropicCompatibleModelsWithFallback(
650
+ baseURL,
651
+ actualProvider,
652
+ apiKeyUrl,
653
+ )
654
+ }
655
+
656
+ // Remove duplicate function definitions - using unified fetchAnthropicCompatibleProviderModels instead
657
+
658
+ async function fetchKimiModels() {
659
+ try {
660
+ const baseURL = providerBaseUrl || 'https://api.moonshot.cn/v1'
661
+ const models = await fetchCustomModels(baseURL, apiKey)
662
+
663
+ const kimiModels = models.map((model: any) => ({
664
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
665
+ provider: 'kimi',
666
+ max_tokens: model.max_tokens || 8192,
667
+ supports_vision: false, // Default to false, could be enhanced
668
+ supports_function_calling: true,
669
+ supports_reasoning_effort: false,
670
+ }))
671
+
672
+ return kimiModels
673
+ } catch (error) {
674
+ let errorMessage = 'Failed to fetch Kimi models'
675
+
676
+ if (error instanceof Error) {
677
+ errorMessage = error.message
678
+ }
679
+
680
+ // Add helpful suggestions based on error type
681
+ if (errorMessage.includes('API key')) {
682
+ errorMessage +=
683
+ '\n\n💡 Tip: Get your API key from https://platform.moonshot.cn/console/api-keys'
684
+ } else if (errorMessage.includes('permission')) {
685
+ errorMessage +=
686
+ '\n\n💡 Tip: Make sure your API key has access to the Kimi API'
687
+ } else if (errorMessage.includes('connection')) {
688
+ errorMessage +=
689
+ '\n\n💡 Tip: Check your internet connection and try again'
690
+ }
691
+
692
+ setModelLoadError(errorMessage)
693
+ throw error
694
+ }
695
+ }
696
+
697
+ async function fetchDeepSeekModels() {
698
+ try {
699
+ const baseURL = providerBaseUrl || 'https://api.deepseek.com'
700
+ const models = await fetchCustomModels(baseURL, apiKey)
701
+
702
+ const deepseekModels = models.map((model: any) => ({
703
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
704
+ provider: 'deepseek',
705
+ max_tokens: model.max_tokens || 8192,
706
+ supports_vision: false, // Default to false, could be enhanced
707
+ supports_function_calling: true,
708
+ supports_reasoning_effort: false,
709
+ }))
710
+
711
+ return deepseekModels
712
+ } catch (error) {
713
+ let errorMessage = 'Failed to fetch DeepSeek models'
714
+
715
+ if (error instanceof Error) {
716
+ errorMessage = error.message
717
+ }
718
+
719
+ // Add helpful suggestions based on error type
720
+ if (errorMessage.includes('API key')) {
721
+ errorMessage +=
722
+ '\n\n💡 Tip: Get your API key from https://platform.deepseek.com/api_keys'
723
+ } else if (errorMessage.includes('permission')) {
724
+ errorMessage +=
725
+ '\n\n💡 Tip: Make sure your API key has access to the DeepSeek API'
726
+ } else if (errorMessage.includes('connection')) {
727
+ errorMessage +=
728
+ '\n\n💡 Tip: Check your internet connection and try again'
729
+ }
730
+
731
+ setModelLoadError(errorMessage)
732
+ throw error
733
+ }
734
+ }
735
+
736
+ async function fetchSiliconFlowModels() {
737
+ try {
738
+ const baseURL = providerBaseUrl || 'https://api.siliconflow.cn/v1'
739
+ const models = await fetchCustomModels(baseURL, apiKey)
740
+
741
+ const siliconflowModels = models.map((model: any) => ({
742
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
743
+ provider: 'siliconflow',
744
+ max_tokens: model.max_tokens || 8192,
745
+ supports_vision: false, // Default to false, could be enhanced
746
+ supports_function_calling: true,
747
+ supports_reasoning_effort: false,
748
+ }))
749
+
750
+ return siliconflowModels
751
+ } catch (error) {
752
+ let errorMessage = 'Failed to fetch SiliconFlow models'
753
+
754
+ if (error instanceof Error) {
755
+ errorMessage = error.message
756
+ }
757
+
758
+ // Add helpful suggestions based on error type
759
+ if (errorMessage.includes('API key')) {
760
+ errorMessage +=
761
+ '\n\n💡 Tip: Get your API key from https://cloud.siliconflow.cn/i/oJWsm6io'
762
+ } else if (errorMessage.includes('permission')) {
763
+ errorMessage +=
764
+ '\n\n💡 Tip: Make sure your API key has access to the SiliconFlow API'
765
+ } else if (errorMessage.includes('connection')) {
766
+ errorMessage +=
767
+ '\n\n💡 Tip: Check your internet connection and try again'
768
+ }
769
+
770
+ setModelLoadError(errorMessage)
771
+ throw error
772
+ }
773
+ }
774
+
775
+ async function fetchQwenModels() {
776
+ try {
777
+ const baseURL =
778
+ providerBaseUrl || 'https://dashscope.aliyuncs.com/compatible-mode/v1'
779
+ const models = await fetchCustomModels(baseURL, apiKey)
780
+
781
+ const qwenModels = models.map((model: any) => ({
782
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
783
+ provider: 'qwen',
784
+ max_tokens: model.max_tokens || 8192,
785
+ supports_vision: false,
786
+ supports_function_calling: true,
787
+ supports_reasoning_effort: false,
788
+ }))
789
+
790
+ return qwenModels
791
+ } catch (error) {
792
+ let errorMessage = 'Failed to fetch Qwen models'
793
+
794
+ if (error instanceof Error) {
795
+ errorMessage = error.message
796
+ }
797
+
798
+ if (errorMessage.includes('API key')) {
799
+ errorMessage +=
800
+ '\n\n💡 Tip: Get your API key from https://bailian.console.aliyun.com/?tab=model#/api-key'
801
+ } else if (errorMessage.includes('permission')) {
802
+ errorMessage +=
803
+ '\n\n💡 Tip: Make sure your API key has access to the Qwen API'
804
+ } else if (errorMessage.includes('connection')) {
805
+ errorMessage +=
806
+ '\n\n💡 Tip: Check your internet connection and try again'
807
+ }
808
+
809
+ setModelLoadError(errorMessage)
810
+ throw error
811
+ }
812
+ }
813
+
814
+ async function fetchGLMModels() {
815
+ try {
816
+ const baseURL = providerBaseUrl || 'https://open.bigmodel.cn/api/paas/v4'
817
+ const models = await fetchCustomModels(baseURL, apiKey)
818
+
819
+ const glmModels = models.map((model: any) => ({
820
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
821
+ provider: 'glm',
822
+ max_tokens: model.max_tokens || 8192,
823
+ supports_vision: false,
824
+ supports_function_calling: true,
825
+ supports_reasoning_effort: false,
826
+ }))
827
+
828
+ return glmModels
829
+ } catch (error) {
830
+ let errorMessage = 'Failed to fetch GLM models'
831
+
832
+ if (error instanceof Error) {
833
+ errorMessage = error.message
834
+ }
835
+
836
+ if (errorMessage.includes('API key')) {
837
+ errorMessage +=
838
+ '\n\n💡 Tip: Get your API key from https://open.bigmodel.cn (API Keys section)'
839
+ } else if (errorMessage.includes('permission')) {
840
+ errorMessage +=
841
+ '\n\n💡 Tip: Make sure your API key has access to the GLM API'
842
+ } else if (errorMessage.includes('connection')) {
843
+ errorMessage +=
844
+ '\n\n💡 Tip: Check your internet connection and try again'
845
+ }
846
+
847
+ setModelLoadError(errorMessage)
848
+ throw error
849
+ }
850
+ }
851
+
852
+ async function fetchMinimaxModels() {
853
+ try {
854
+ const baseURL = providerBaseUrl || 'https://api.minimaxi.com/v1'
855
+ const models = await fetchCustomModels(baseURL, apiKey)
856
+
857
+ const minimaxModels = models.map((model: any) => ({
858
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
859
+ provider: 'minimax',
860
+ max_tokens: model.max_tokens || 8192,
861
+ supports_vision: false,
862
+ supports_function_calling: true,
863
+ supports_reasoning_effort: false,
864
+ }))
865
+
866
+ return minimaxModels
867
+ } catch (error) {
868
+ let errorMessage = 'Failed to fetch MiniMax models'
869
+
870
+ if (error instanceof Error) {
871
+ errorMessage = error.message
872
+ }
873
+
874
+ if (errorMessage.includes('API key')) {
875
+ errorMessage +=
876
+ '\n\n💡 Tip: Get your API key from https://www.minimax.io/platform/user-center/basic-information'
877
+ } else if (errorMessage.includes('permission')) {
878
+ errorMessage +=
879
+ '\n\n💡 Tip: Make sure your API key has access to the MiniMax API'
880
+ } else if (errorMessage.includes('connection')) {
881
+ errorMessage +=
882
+ '\n\n💡 Tip: Check your internet connection and try again'
883
+ }
884
+
885
+ setModelLoadError(errorMessage)
886
+ throw error
887
+ }
888
+ }
889
+
890
+ async function fetchBaiduQianfanModels() {
891
+ try {
892
+ const baseURL = providerBaseUrl || 'https://qianfan.baidubce.com/v2'
893
+ const models = await fetchCustomModels(baseURL, apiKey)
894
+
895
+ const baiduModels = models.map((model: any) => ({
896
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
897
+ provider: 'baidu-qianfan',
898
+ max_tokens: model.max_tokens || 8192,
899
+ supports_vision: false,
900
+ supports_function_calling: true,
901
+ supports_reasoning_effort: false,
902
+ }))
903
+
904
+ return baiduModels
905
+ } catch (error) {
906
+ let errorMessage = 'Failed to fetch Baidu Qianfan models'
907
+
908
+ if (error instanceof Error) {
909
+ errorMessage = error.message
910
+ }
911
+
912
+ if (errorMessage.includes('API key')) {
913
+ errorMessage +=
914
+ '\n\n💡 Tip: Get your API key from https://console.bce.baidu.com/iam/#/iam/accesslist'
915
+ } else if (errorMessage.includes('permission')) {
916
+ errorMessage +=
917
+ '\n\n💡 Tip: Make sure your API key has access to the Baidu Qianfan API'
918
+ } else if (errorMessage.includes('connection')) {
919
+ errorMessage +=
920
+ '\n\n💡 Tip: Check your internet connection and try again'
921
+ }
922
+
923
+ setModelLoadError(errorMessage)
924
+ throw error
925
+ }
926
+ }
927
+
928
+ async function fetchCustomOpenAIModels() {
929
+ try {
930
+ const models = await fetchCustomModels(customBaseUrl, apiKey)
931
+
932
+ const customModels = models.map((model: any) => ({
933
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
934
+ provider: 'custom-openai',
935
+ max_tokens: model.max_tokens || 4096,
936
+ supports_vision: false, // Default to false, could be enhanced
937
+ supports_function_calling: true,
938
+ supports_reasoning_effort: false,
939
+ }))
940
+
941
+ return customModels
942
+ } catch (error) {
943
+ let errorMessage = 'Failed to fetch custom API models'
944
+
945
+ if (error instanceof Error) {
946
+ errorMessage = error.message
947
+ }
948
+
949
+ // Add helpful suggestions based on error type
950
+ if (errorMessage.includes('API key')) {
951
+ errorMessage +=
952
+ '\n\n💡 Tip: Check that your API key is valid for this endpoint'
953
+ } else if (errorMessage.includes('endpoint not found')) {
954
+ errorMessage +=
955
+ '\n\n💡 Tip: Make sure the base URL ends with /v1 and supports OpenAI-compatible API'
956
+ } else if (errorMessage.includes('connect')) {
957
+ errorMessage +=
958
+ '\n\n💡 Tip: Verify the base URL is correct and accessible'
959
+ } else if (errorMessage.includes('response format')) {
960
+ errorMessage +=
961
+ '\n\n💡 Tip: This API may not be fully OpenAI-compatible'
962
+ }
963
+
964
+ setModelLoadError(errorMessage)
965
+ throw error
966
+ }
967
+ }
968
+
969
+ async function fetchGeminiModels() {
970
+ try {
971
+ const response = await fetch(
972
+ `https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`,
973
+ )
974
+
975
+ if (!response.ok) {
976
+ const errorData = await response.json()
977
+ throw new Error(
978
+ errorData.error?.message || `API error: ${response.status}`,
979
+ )
980
+ }
981
+
982
+ const { models } = await response.json()
983
+
984
+ const geminiModels = models
985
+ .filter((model: any) =>
986
+ model.supportedGenerationMethods.includes('generateContent'),
987
+ )
988
+ .map((model: any) => ({
989
+ model: model.name.replace('models/', ''),
990
+ provider: 'gemini',
991
+ max_tokens: model.outputTokenLimit,
992
+ supports_vision:
993
+ model.supportedGenerationMethods.includes('generateContent'),
994
+ supports_function_calling:
995
+ model.supportedGenerationMethods.includes('generateContent'),
996
+ }))
997
+
998
+ return geminiModels
999
+ } catch (error) {
1000
+ setModelLoadError(
1001
+ error instanceof Error ? error.message : 'Unknown error',
1002
+ )
1003
+ throw error
1004
+ }
1005
+ }
1006
+
1007
+ async function fetchOllamaModels() {
1008
+ try {
1009
+ const response = await fetch(`${ollamaBaseUrl}/models`)
1010
+
1011
+ if (!response.ok) {
1012
+ throw new Error(`HTTP error ${response.status}: ${response.statusText}`)
1013
+ }
1014
+
1015
+ const responseData = await response.json()
1016
+
1017
+ // Properly handle Ollama API response format
1018
+ // Ollama API can return models in different formats based on version
1019
+ let models = []
1020
+
1021
+ // Check if data field exists (newer Ollama versions)
1022
+ if (responseData.data && Array.isArray(responseData.data)) {
1023
+ models = responseData.data
1024
+ }
1025
+ // Check if models array is directly at the root (older Ollama versions)
1026
+ else if (Array.isArray(responseData.models)) {
1027
+ models = responseData.models
1028
+ }
1029
+ // If response is already an array
1030
+ else if (Array.isArray(responseData)) {
1031
+ models = responseData
1032
+ } else {
1033
+ throw new Error(
1034
+ 'Invalid response from Ollama API: missing models array',
1035
+ )
1036
+ }
1037
+
1038
+ // Transform Ollama models to our format
1039
+ const ollamaModels = models.map((model: any) => ({
1040
+ model:
1041
+ model.name ??
1042
+ model.modelName ??
1043
+ (typeof model === 'string' ? model : ''),
1044
+ provider: 'ollama',
1045
+ max_tokens: 4096, // Default value
1046
+ supports_vision: false,
1047
+ supports_function_calling: true,
1048
+ supports_reasoning_effort: false,
1049
+ }))
1050
+
1051
+ // Filter out models with empty names
1052
+ const validModels = ollamaModels.filter(model => model.model)
1053
+
1054
+ setAvailableModels(validModels)
1055
+
1056
+ // Only navigate if we have models
1057
+ if (validModels.length > 0) {
1058
+ navigateTo('model')
1059
+ } else {
1060
+ setModelLoadError('No models found in your Ollama installation')
1061
+ }
1062
+
1063
+ return validModels
1064
+ } catch (error) {
1065
+ const errorMessage =
1066
+ error instanceof Error ? error.message : String(error)
1067
+
1068
+ if (errorMessage.includes('fetch')) {
1069
+ setModelLoadError(
1070
+ `Could not connect to Ollama server at ${ollamaBaseUrl}. Make sure Ollama is running and the URL is correct.`,
1071
+ )
1072
+ } else {
1073
+ setModelLoadError(`Error loading Ollama models: ${errorMessage}`)
1074
+ }
1075
+
1076
+ console.error('Error fetching Ollama models:', error)
1077
+ return []
1078
+ }
1079
+ }
1080
+
1081
+ async function fetchModelsWithRetry() {
1082
+ const MAX_RETRIES = 2
1083
+ let lastError: Error | null = null
1084
+
1085
+ for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
1086
+ setFetchRetryCount(attempt)
1087
+ setIsRetrying(attempt > 1)
1088
+
1089
+ if (attempt > 1) {
1090
+ // Show retry message
1091
+ setModelLoadError(
1092
+ `Attempt ${attempt}/${MAX_RETRIES}: Retrying model discovery...`,
1093
+ )
1094
+ // Wait 1 second before retrying
1095
+ await new Promise(resolve => setTimeout(resolve, 1000))
1096
+ }
1097
+
1098
+ try {
1099
+ const models = await fetchModels()
1100
+ // Success! Reset retry state and return models
1101
+ setFetchRetryCount(0)
1102
+ setIsRetrying(false)
1103
+ setModelLoadError(null)
1104
+ return models
1105
+ } catch (error) {
1106
+ lastError = error instanceof Error ? error : new Error(String(error))
1107
+ console.log(`Model fetch attempt ${attempt} failed:`, lastError.message)
1108
+
1109
+ if (attempt === MAX_RETRIES) {
1110
+ // Final attempt failed, break to handle fallback
1111
+ break
1112
+ }
1113
+ }
1114
+ }
1115
+
1116
+ // All retries failed, handle fallback to manual input
1117
+ setIsRetrying(false)
1118
+ const errorMessage = lastError?.message || 'Unknown error'
1119
+
1120
+ // Check if provider supports manual input fallback
1121
+ const supportsManualInput = [
1122
+ 'anthropic',
1123
+ 'kimi',
1124
+ 'deepseek',
1125
+ 'siliconflow',
1126
+ 'qwen',
1127
+ 'glm',
1128
+ 'minimax',
1129
+ 'baidu-qianfan',
1130
+ 'custom-openai',
1131
+ ].includes(selectedProvider)
1132
+
1133
+ if (supportsManualInput) {
1134
+ setModelLoadError(
1135
+ `Failed to auto-discover models after ${MAX_RETRIES} attempts: ${errorMessage}\n\n⚡ Automatically switching to manual model configuration...`,
1136
+ )
1137
+
1138
+ // Automatically switch to manual input after 2 seconds
1139
+ setTimeout(() => {
1140
+ setModelLoadError(null)
1141
+ navigateTo('modelInput')
1142
+ }, 2000)
1143
+ } else {
1144
+ setModelLoadError(
1145
+ `Failed to load models after ${MAX_RETRIES} attempts: ${errorMessage}`,
1146
+ )
1147
+ }
1148
+
1149
+ return []
1150
+ }
1151
+
1152
+ async function fetchModels() {
1153
+ setIsLoadingModels(true)
1154
+ setModelLoadError(null)
1155
+
1156
+ try {
1157
+ // For Anthropic provider (including official and community proxies via sub-menu), use the same logic
1158
+ if (selectedProvider === 'anthropic') {
1159
+ const anthropicModels = await fetchAnthropicCompatibleProviderModels()
1160
+ setAvailableModels(anthropicModels)
1161
+ navigateTo('model')
1162
+ return anthropicModels
1163
+ }
1164
+
1165
+ // For custom OpenAI-compatible APIs, use the fetchCustomOpenAIModels function
1166
+ if (selectedProvider === 'custom-openai') {
1167
+ const customModels = await fetchCustomOpenAIModels()
1168
+ setAvailableModels(customModels)
1169
+ navigateTo('model')
1170
+ return customModels
1171
+ }
1172
+
1173
+ // For Gemini, use the separate fetchGeminiModels function
1174
+ if (selectedProvider === 'gemini') {
1175
+ const geminiModels = await fetchGeminiModels()
1176
+ setAvailableModels(geminiModels)
1177
+ navigateTo('model')
1178
+ return geminiModels
1179
+ }
1180
+
1181
+ // For Kimi, use the fetchKimiModels function
1182
+ if (selectedProvider === 'kimi') {
1183
+ const kimiModels = await fetchKimiModels()
1184
+ setAvailableModels(kimiModels)
1185
+ navigateTo('model')
1186
+ return kimiModels
1187
+ }
1188
+
1189
+ // For DeepSeek, use the fetchDeepSeekModels function
1190
+ if (selectedProvider === 'deepseek') {
1191
+ const deepseekModels = await fetchDeepSeekModels()
1192
+ setAvailableModels(deepseekModels)
1193
+ navigateTo('model')
1194
+ return deepseekModels
1195
+ }
1196
+
1197
+ // For SiliconFlow, use the fetchSiliconFlowModels function
1198
+ if (selectedProvider === 'siliconflow') {
1199
+ const siliconflowModels = await fetchSiliconFlowModels()
1200
+ setAvailableModels(siliconflowModels)
1201
+ navigateTo('model')
1202
+ return siliconflowModels
1203
+ }
1204
+
1205
+ // For Qwen, use the fetchQwenModels function
1206
+ if (selectedProvider === 'qwen') {
1207
+ const qwenModels = await fetchQwenModels()
1208
+ setAvailableModels(qwenModels)
1209
+ navigateTo('model')
1210
+ return qwenModels
1211
+ }
1212
+
1213
+ // For GLM, use the fetchGLMModels function
1214
+ if (selectedProvider === 'glm') {
1215
+ const glmModels = await fetchGLMModels()
1216
+ setAvailableModels(glmModels)
1217
+ navigateTo('model')
1218
+ return glmModels
1219
+ }
1220
+
1221
+ // For Baidu Qianfan, use the fetchBaiduQianfanModels function
1222
+ if (selectedProvider === 'baidu-qianfan') {
1223
+ const baiduModels = await fetchBaiduQianfanModels()
1224
+ setAvailableModels(baiduModels)
1225
+ navigateTo('model')
1226
+ return baiduModels
1227
+ }
1228
+
1229
+ // For Azure, skip model fetching and go directly to model input
1230
+ if (selectedProvider === 'azure') {
1231
+ navigateTo('modelInput')
1232
+ return []
1233
+ }
1234
+
1235
+ // For all other providers, use the OpenAI client
1236
+ let baseURL = providerBaseUrl || providers[selectedProvider]?.baseURL
1237
+
1238
+ // For custom-openai provider, use the custom base URL
1239
+ if (selectedProvider === 'custom-openai') {
1240
+ baseURL = customBaseUrl
1241
+ }
1242
+
1243
+ const openai = new OpenAI({
1244
+ apiKey: apiKey || 'dummy-key-for-ollama', // Ollama doesn't need a real key
1245
+ baseURL: baseURL,
1246
+ dangerouslyAllowBrowser: true,
1247
+ })
1248
+
1249
+ // Fetch the models
1250
+ const response = await openai.models.list()
1251
+
1252
+ // Transform the response into our ModelInfo format
1253
+ const fetchedModels = []
1254
+ for (const model of response.data) {
1255
+ const modelName = model.modelName || model.id || model.name || model.model || 'unknown'
1256
+ const modelInfo = models[selectedProvider as keyof typeof models]?.find(
1257
+ m => m.model === modelName,
1258
+ )
1259
+ fetchedModels.push({
1260
+ model: modelName,
1261
+ provider: selectedProvider,
1262
+ max_tokens: modelInfo?.max_output_tokens,
1263
+ supports_vision: modelInfo?.supports_vision || false,
1264
+ supports_function_calling:
1265
+ modelInfo?.supports_function_calling || false,
1266
+ supports_reasoning_effort:
1267
+ modelInfo?.supports_reasoning_effort || false,
1268
+ })
1269
+ }
1270
+
1271
+ setAvailableModels(fetchedModels)
1272
+
1273
+ // Navigate to model selection screen if models were loaded successfully
1274
+ navigateTo('model')
1275
+
1276
+ return fetchedModels
1277
+ } catch (error) {
1278
+ // Log for debugging
1279
+ console.error('Error fetching models:', error)
1280
+
1281
+ // Re-throw the error so that fetchModelsWithRetry can handle it properly
1282
+ throw error
1283
+ } finally {
1284
+ setIsLoadingModels(false)
1285
+ }
1286
+ }
1287
+
1288
+ function handleApiKeySubmit(key: string) {
1289
+ setApiKey(key)
1290
+
1291
+ // For Azure, go to resource name input next
1292
+ if (selectedProvider === 'azure') {
1293
+ navigateTo('resourceName')
1294
+ return
1295
+ }
1296
+
1297
+ // Fetch models with the provided API key
1298
+ fetchModelsWithRetry().catch(error => {
1299
+ // The retry logic in fetchModelsWithRetry already handles the error display
1300
+ // This catch is just to prevent unhandled promise rejection
1301
+ console.error('Final error after retries:', error)
1302
+ })
1303
+ }
1304
+
1305
+ function handleResourceNameSubmit(name: string) {
1306
+ setResourceName(name)
1307
+ navigateTo('modelInput')
1308
+ }
1309
+
1310
+ function handleOllamaBaseUrlSubmit(url: string) {
1311
+ setOllamaBaseUrl(url)
1312
+ setIsLoadingModels(true)
1313
+ setModelLoadError(null)
1314
+
1315
+ // Use the dedicated Ollama model fetch function
1316
+ fetchOllamaModels().finally(() => {
1317
+ setIsLoadingModels(false)
1318
+ })
1319
+ }
1320
+
1321
+ function handleCustomBaseUrlSubmit(url: string) {
1322
+ // Automatically remove trailing slash from baseURL
1323
+ const cleanUrl = url.replace(/\/+$/, '')
1324
+ setCustomBaseUrl(cleanUrl)
1325
+ // After setting custom base URL, go to API key input
1326
+ navigateTo('apiKey')
1327
+ }
1328
+
1329
+ function handleProviderBaseUrlSubmit(url: string) {
1330
+ // Automatically remove trailing slash from baseURL
1331
+ const cleanUrl = url.replace(/\/+$/, '')
1332
+ setProviderBaseUrl(cleanUrl)
1333
+
1334
+ // For Ollama, handle differently - it tries to fetch models immediately
1335
+ if (selectedProvider === 'ollama') {
1336
+ setOllamaBaseUrl(cleanUrl)
1337
+ setIsLoadingModels(true)
1338
+ setModelLoadError(null)
1339
+
1340
+ // Use the dedicated Ollama model fetch function
1341
+ fetchOllamaModels().finally(() => {
1342
+ setIsLoadingModels(false)
1343
+ })
1344
+ } else {
1345
+ // For all other providers, go to API key input next
1346
+ navigateTo('apiKey')
1347
+ }
1348
+ }
1349
+
1350
+ function handleAnthropicProviderSelection(
1351
+ providerType: 'official' | 'bigdream' | 'custom',
1352
+ ) {
1353
+ setAnthropicProviderType(providerType)
1354
+
1355
+ if (providerType === 'custom') {
1356
+ // For custom Anthropic provider, go to base URL configuration
1357
+ setProviderBaseUrl('')
1358
+ navigateTo('baseUrl')
1359
+ } else {
1360
+ // For official/community proxy providers, set default base URL and go to API key
1361
+ const defaultUrls = {
1362
+ official: 'https://api.anthropic.com',
1363
+ bigdream: 'https://api-key.info',
1364
+ opendev: 'https://api.openai-next.com',
1365
+ }
1366
+ setProviderBaseUrl(defaultUrls[providerType])
1367
+ navigateTo('apiKey')
1368
+ }
1369
+ }
1370
+
1371
+ function handleCustomModelSubmit(model: string) {
1372
+ setCustomModelName(model)
1373
+ setSelectedModel(model)
1374
+
1375
+ // No model info available, so set default values
1376
+ setSupportsReasoningEffort(false)
1377
+ setReasoningEffort(null)
1378
+
1379
+ // Use default max tokens for manually entered models
1380
+ setMaxTokensMode('preset')
1381
+ setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1382
+ setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1383
+ setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1384
+
1385
+ // Go to model parameters screen
1386
+ navigateTo('modelParams')
1387
+ // Reset active field index
1388
+ setActiveFieldIndex(0)
1389
+ }
1390
+
1391
+ function handleModelSelection(model: string) {
1392
+ setSelectedModel(model)
1393
+
1394
+ // Check if the selected model supports reasoning_effort
1395
+ const modelInfo = availableModels.find(m => m.model === model)
1396
+ setSupportsReasoningEffort(modelInfo?.supports_reasoning_effort || false)
1397
+
1398
+ if (!modelInfo?.supports_reasoning_effort) {
1399
+ setReasoningEffort(null)
1400
+ }
1401
+
1402
+ // Set max tokens based on model info or default
1403
+ if (modelInfo?.max_tokens) {
1404
+ const modelMaxTokens = modelInfo.max_tokens
1405
+ // Check if the model's max tokens matches any of our presets
1406
+ const matchingPreset = MAX_TOKENS_OPTIONS.find(
1407
+ option => option.value === modelMaxTokens,
1408
+ )
1409
+
1410
+ if (matchingPreset) {
1411
+ setMaxTokensMode('preset')
1412
+ setSelectedMaxTokensPreset(modelMaxTokens)
1413
+ setMaxTokens(modelMaxTokens.toString())
1414
+ } else {
1415
+ setMaxTokensMode('custom')
1416
+ setMaxTokens(modelMaxTokens.toString())
1417
+ }
1418
+ setMaxTokensCursorOffset(modelMaxTokens.toString().length)
1419
+ } else {
1420
+ // No model-specific max tokens, use default
1421
+ setMaxTokensMode('preset')
1422
+ setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1423
+ setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1424
+ setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1425
+ }
1426
+
1427
+ // Go to model parameters screen
1428
+ navigateTo('modelParams')
1429
+ // Reset active field index
1430
+ setActiveFieldIndex(0)
1431
+ }
1432
+
1433
+ const handleModelParamsSubmit = () => {
1434
+ // Values are already in state, no need to extract from form
1435
+ // Ensure contextLength is set to a valid option before navigating
1436
+ if (!CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)) {
1437
+ setContextLength(DEFAULT_CONTEXT_LENGTH)
1438
+ }
1439
+ // Navigate to context length screen
1440
+ navigateTo('contextLength')
1441
+ }
1442
+
1443
+ async function testConnection(): Promise<{
1444
+ success: boolean
1445
+ message: string
1446
+ endpoint?: string
1447
+ details?: string
1448
+ }> {
1449
+ setIsTestingConnection(true)
1450
+ setConnectionTestResult(null)
1451
+
1452
+ try {
1453
+ // Determine the base URL to test
1454
+ let testBaseURL =
1455
+ providerBaseUrl || providers[selectedProvider]?.baseURL || ''
1456
+
1457
+ if (selectedProvider === 'azure') {
1458
+ testBaseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${selectedModel}`
1459
+ } else if (selectedProvider === 'custom-openai') {
1460
+ testBaseURL = customBaseUrl
1461
+ }
1462
+
1463
+ // For OpenAI-compatible providers, try multiple endpoints in order of preference
1464
+ const isOpenAICompatible = [
1465
+ 'minimax',
1466
+ 'kimi',
1467
+ 'deepseek',
1468
+ 'siliconflow',
1469
+ 'qwen',
1470
+ 'glm',
1471
+ 'baidu-qianfan',
1472
+ 'openai',
1473
+ 'mistral',
1474
+ 'xai',
1475
+ 'groq',
1476
+ 'custom-openai',
1477
+ ].includes(selectedProvider)
1478
+
1479
+ if (isOpenAICompatible) {
1480
+ // Define endpoints to try in order of preference
1481
+ const endpointsToTry = []
1482
+
1483
+ if (selectedProvider === 'minimax') {
1484
+ endpointsToTry.push(
1485
+ {
1486
+ path: '/text/chatcompletion_v2',
1487
+ name: 'MiniMax v2 (recommended)',
1488
+ },
1489
+ { path: '/chat/completions', name: 'Standard OpenAI' },
1490
+ )
1491
+ } else {
1492
+ endpointsToTry.push({
1493
+ path: '/chat/completions',
1494
+ name: 'Standard OpenAI',
1495
+ })
1496
+ }
1497
+
1498
+ let lastError = null
1499
+ for (const endpoint of endpointsToTry) {
1500
+ try {
1501
+ const testResult = await testChatEndpoint(
1502
+ testBaseURL,
1503
+ endpoint.path,
1504
+ endpoint.name,
1505
+ )
1506
+ if (testResult.success) {
1507
+ return testResult
1508
+ }
1509
+ lastError = testResult
1510
+ } catch (error) {
1511
+ lastError = {
1512
+ success: false,
1513
+ message: `Failed to test ${endpoint.name}`,
1514
+ endpoint: endpoint.path,
1515
+ details: error instanceof Error ? error.message : String(error),
1516
+ }
1517
+ }
1518
+ }
1519
+
1520
+ return (
1521
+ lastError || {
1522
+ success: false,
1523
+ message: 'All endpoints failed',
1524
+ details: 'No endpoints could be reached',
1525
+ }
1526
+ )
1527
+ } else {
1528
+ // For non-OpenAI providers (like Anthropic, Gemini), use different test approach
1529
+ return await testProviderSpecificEndpoint(testBaseURL)
1530
+ }
1531
+ } catch (error) {
1532
+ return {
1533
+ success: false,
1534
+ message: 'Connection test failed',
1535
+ details: error instanceof Error ? error.message : String(error),
1536
+ }
1537
+ } finally {
1538
+ setIsTestingConnection(false)
1539
+ }
1540
+ }
1541
+
1542
+ async function testChatEndpoint(
1543
+ baseURL: string,
1544
+ endpointPath: string,
1545
+ endpointName: string,
1546
+ ): Promise<{
1547
+ success: boolean
1548
+ message: string
1549
+ endpoint?: string
1550
+ details?: string
1551
+ }> {
1552
+ const testURL = `${baseURL.replace(/\/+$/, '')}${endpointPath}`
1553
+
1554
+ // Create a test message that expects a specific response
1555
+ const testPayload = {
1556
+ model: selectedModel,
1557
+ messages: [
1558
+ {
1559
+ role: 'user',
1560
+ content:
1561
+ 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.',
1562
+ },
1563
+ ],
1564
+ max_tokens: Math.max(parseInt(maxTokens) || 8192, 8192), // Ensure minimum 8192 tokens for connection test
1565
+ temperature: 0,
1566
+ stream: false,
1567
+ }
1568
+
1569
+ const headers: Record<string, string> = {
1570
+ 'Content-Type': 'application/json',
1571
+ }
1572
+
1573
+ // Add authorization headers
1574
+ if (selectedProvider === 'azure') {
1575
+ headers['api-key'] = apiKey
1576
+ } else {
1577
+ headers['Authorization'] = `Bearer ${apiKey}`
1578
+ }
1579
+
1580
+ try {
1581
+ const response = await fetch(testURL, {
1582
+ method: 'POST',
1583
+ headers,
1584
+ body: JSON.stringify(testPayload),
1585
+ })
1586
+
1587
+ if (response.ok) {
1588
+ const data = await response.json()
1589
+ console.log(
1590
+ '[DEBUG] Connection test response:',
1591
+ JSON.stringify(data, null, 2),
1592
+ )
1593
+
1594
+ // Check if we got a valid response with content
1595
+ let responseContent = ''
1596
+
1597
+ if (data.choices && data.choices.length > 0) {
1598
+ responseContent = data.choices[0]?.message?.content || ''
1599
+ } else if (data.reply) {
1600
+ // Handle MiniMax format
1601
+ responseContent = data.reply
1602
+ } else if (data.output) {
1603
+ // Handle other formats
1604
+ responseContent = data.output?.text || data.output || ''
1605
+ }
1606
+
1607
+ console.log('[DEBUG] Extracted response content:', responseContent)
1608
+
1609
+ // Check if response contains "YES" (case insensitive)
1610
+ const containsYes = responseContent.toLowerCase().includes('yes')
1611
+
1612
+ if (containsYes) {
1613
+ return {
1614
+ success: true,
1615
+ message: `✅ Connection test passed with ${endpointName}`,
1616
+ endpoint: endpointPath,
1617
+ details: `Model responded correctly: "${responseContent.trim()}"`,
1618
+ }
1619
+ } else {
1620
+ return {
1621
+ success: false,
1622
+ message: `⚠️ ${endpointName} connected but model response unexpected`,
1623
+ endpoint: endpointPath,
1624
+ details: `Expected "YES" but got: "${responseContent.trim() || '(empty response)'}"`,
1625
+ }
1626
+ }
1627
+ } else {
1628
+ const errorData = await response.json().catch(() => null)
1629
+ const errorMessage =
1630
+ errorData?.error?.message || errorData?.message || response.statusText
1631
+
1632
+ return {
1633
+ success: false,
1634
+ message: `❌ ${endpointName} failed (${response.status})`,
1635
+ endpoint: endpointPath,
1636
+ details: `Error: ${errorMessage}`,
1637
+ }
1638
+ }
1639
+ } catch (error) {
1640
+ return {
1641
+ success: false,
1642
+ message: `❌ ${endpointName} connection failed`,
1643
+ endpoint: endpointPath,
1644
+ details: error instanceof Error ? error.message : String(error),
1645
+ }
1646
+ }
1647
+ }
1648
+
1649
+ async function testProviderSpecificEndpoint(baseURL: string): Promise<{
1650
+ success: boolean
1651
+ message: string
1652
+ endpoint?: string
1653
+ details?: string
1654
+ }> {
1655
+ // For Anthropic and Anthropic-compatible providers, use the official SDK for testing
1656
+ if (selectedProvider === 'anthropic' || selectedProvider === 'bigdream') {
1657
+ try {
1658
+ console.log(
1659
+ `[DEBUG] Testing ${selectedProvider} connection using official Anthropic SDK...`,
1660
+ )
1661
+
1662
+ // Determine the baseURL for testing
1663
+ let testBaseURL: string | undefined = undefined
1664
+ if (selectedProvider === 'bigdream') {
1665
+ testBaseURL = baseURL || 'https://api-key.info'
1666
+ } else if (selectedProvider === 'anthropic') {
1667
+ // For anthropic, use user-provided baseURL if available, otherwise undefined (official API)
1668
+ testBaseURL =
1669
+ baseURL && baseURL !== 'https://api.anthropic.com'
1670
+ ? baseURL
1671
+ : undefined
1672
+ }
1673
+
1674
+ // Use the verifyApiKey function which uses the official Anthropic SDK
1675
+ const isValid = await verifyApiKey(apiKey, testBaseURL)
1676
+
1677
+ if (isValid) {
1678
+ return {
1679
+ success: true,
1680
+ message: `✅ ${selectedProvider} connection test passed`,
1681
+ endpoint: '/messages',
1682
+ details: 'API key verified using official Anthropic SDK',
1683
+ }
1684
+ } else {
1685
+ return {
1686
+ success: false,
1687
+ message: `❌ ${selectedProvider} API key verification failed`,
1688
+ endpoint: '/messages',
1689
+ details:
1690
+ 'Invalid API key. Please check your API key and try again.',
1691
+ }
1692
+ }
1693
+ } catch (error) {
1694
+ console.log(`[DEBUG] ${selectedProvider} connection test error:`, error)
1695
+ return {
1696
+ success: false,
1697
+ message: `❌ ${selectedProvider} connection failed`,
1698
+ endpoint: '/messages',
1699
+ details: error instanceof Error ? error.message : String(error),
1700
+ }
1701
+ }
1702
+ }
1703
+
1704
+ // For other providers, return a placeholder success (we can extend this later)
1705
+ return {
1706
+ success: true,
1707
+ message: `✅ Configuration saved for ${selectedProvider}`,
1708
+ details: 'Provider-specific testing not implemented yet',
1709
+ }
1710
+ }
1711
+
1712
+ async function handleConnectionTest() {
1713
+ const result = await testConnection()
1714
+ setConnectionTestResult(result)
1715
+
1716
+ if (result.success) {
1717
+ // Auto-advance to confirmation after a short delay
1718
+ setTimeout(() => {
1719
+ navigateTo('confirmation')
1720
+ }, 2000)
1721
+ }
1722
+ }
1723
+
1724
+ const handleContextLengthSubmit = () => {
1725
+ // Context length value is already in state
1726
+ // Navigate to connection test screen
1727
+ navigateTo('connectionTest')
1728
+ }
1729
+
1730
+ async function saveConfiguration(
1731
+ provider: ProviderType,
1732
+ model: string,
1733
+ ): Promise<string | null> {
1734
+ let baseURL = providerBaseUrl || providers[provider]?.baseURL || ''
1735
+ let actualProvider = provider
1736
+
1737
+ // For Anthropic provider, determine the actual provider based on sub-menu selection
1738
+ if (provider === 'anthropic') {
1739
+ switch (anthropicProviderType) {
1740
+ case 'official':
1741
+ actualProvider = 'anthropic'
1742
+ baseURL = baseURL || 'https://api.anthropic.com'
1743
+ break
1744
+ case 'bigdream':
1745
+ actualProvider = 'bigdream'
1746
+ baseURL = baseURL || 'https://api-key.info'
1747
+ break
1748
+ case 'custom':
1749
+ actualProvider = 'anthropic' // Use anthropic for custom endpoints
1750
+ // baseURL is already set from user input
1751
+ break
1752
+ }
1753
+ }
1754
+
1755
+ // For Azure, construct the baseURL using the resource name
1756
+ if (provider === 'azure') {
1757
+ baseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${model}`
1758
+ }
1759
+ // For custom OpenAI-compatible API, use the custom base URL
1760
+ else if (provider === 'custom-openai') {
1761
+ baseURL = customBaseUrl
1762
+ }
1763
+
1764
+ try {
1765
+ // Use ModelManager's addModel method for duplicate validation
1766
+ const modelManager = getModelManager()
1767
+
1768
+ const modelConfig = {
1769
+ name: `${actualProvider} ${model}`,
1770
+ provider: actualProvider,
1771
+ modelName: model,
1772
+ baseURL: baseURL,
1773
+ apiKey: apiKey || '',
1774
+ maxTokens: parseInt(maxTokens) || DEFAULT_MAX_TOKENS,
1775
+ contextLength: contextLength || DEFAULT_CONTEXT_LENGTH,
1776
+ reasoningEffort,
1777
+ }
1778
+
1779
+ // addModel method will throw error if duplicate exists
1780
+ return await modelManager.addModel(modelConfig)
1781
+ } catch (error) {
1782
+ // Validation failed - show error to user
1783
+ setValidationError(
1784
+ error instanceof Error ? error.message : 'Failed to add model',
1785
+ )
1786
+ return null
1787
+ }
1788
+ }
1789
+
1790
+ async function handleConfirmation() {
1791
+ // Clear any previous validation errors
1792
+ setValidationError(null)
1793
+
1794
+ // Save the configuration and exit
1795
+ const modelId = await saveConfiguration(selectedProvider, selectedModel)
1796
+
1797
+ // If validation failed (modelId is null), don't proceed
1798
+ if (!modelId) {
1799
+ return // Error is already set in saveConfiguration
1800
+ }
1801
+
1802
+ // Handle model pointer assignment for new system
1803
+ if (modelId && (isOnboarding || targetPointer)) {
1804
+ if (isOnboarding) {
1805
+ // First-time setup: set all pointers to this model
1806
+ setAllPointersToModel(modelId)
1807
+ } else if (targetPointer) {
1808
+ // Specific pointer configuration: only set target pointer
1809
+ setModelPointer(targetPointer, modelId)
1810
+ }
1811
+ }
1812
+
1813
+ onDone()
1814
+ }
1815
+
1816
+ // Handle back navigation based on current screen
1817
+ const handleBack = () => {
1818
+ if (currentScreen === 'provider') {
1819
+ // If we're at the first screen, exit
1820
+ if (onCancel) {
1821
+ onCancel()
1822
+ } else {
1823
+ onDone()
1824
+ }
1825
+ } else {
1826
+ // Remove the current screen from the stack
1827
+ setScreenStack(prev => prev.slice(0, -1))
1828
+ }
1829
+ }
1830
+
1831
+ // Use escape navigation hook
1832
+ useEscapeNavigation(handleBack, abortController)
1833
+
1834
+ // Handle cursor offset changes
1835
+ function handleCursorOffsetChange(offset: number) {
1836
+ setCursorOffset(offset)
1837
+ }
1838
+
1839
+ // Handle API key changes
1840
+ function handleApiKeyChange(value: string) {
1841
+ setApiKeyEdited(true)
1842
+ setApiKey(value)
1843
+ }
1844
+
1845
+ // Handle model search query changes
1846
+ function handleModelSearchChange(value: string) {
1847
+ setModelSearchQuery(value)
1848
+ // Update cursor position to end of text when typing
1849
+ setModelSearchCursorOffset(value.length)
1850
+ }
1851
+
1852
+ // Handle model search cursor offset changes
1853
+ function handleModelSearchCursorOffsetChange(offset: number) {
1854
+ setModelSearchCursorOffset(offset)
1855
+ }
1856
+
1857
+ // Handle input for Resource Name screen
1858
+ useInput((input, key) => {
1859
+ // Handle API key submission on Enter
1860
+ if (currentScreen === 'apiKey' && key.return) {
1861
+ if (apiKey) {
1862
+ handleApiKeySubmit(apiKey)
1863
+ }
1864
+ return
1865
+ }
1866
+
1867
+ if (currentScreen === 'apiKey' && key.tab) {
1868
+ // For providers that support manual model input, skip to manual model input
1869
+ if (
1870
+ selectedProvider === 'anthropic' ||
1871
+ selectedProvider === 'kimi' ||
1872
+ selectedProvider === 'deepseek' ||
1873
+ selectedProvider === 'qwen' ||
1874
+ selectedProvider === 'glm' ||
1875
+ selectedProvider === 'minimax' ||
1876
+ selectedProvider === 'baidu-qianfan' ||
1877
+ selectedProvider === 'siliconflow' ||
1878
+ selectedProvider === 'custom-openai'
1879
+ ) {
1880
+ navigateTo('modelInput')
1881
+ return
1882
+ }
1883
+
1884
+ // For other providers, try to fetch models without API key
1885
+ fetchModelsWithRetry().catch(error => {
1886
+ // The retry logic in fetchModelsWithRetry already handles the error display
1887
+ // This catch is just to prevent unhandled promise rejection
1888
+ console.error('Final error after retries:', error)
1889
+ })
1890
+ return
1891
+ }
1892
+
1893
+ // Handle Resource Name submission on Enter
1894
+ if (currentScreen === 'resourceName' && key.return) {
1895
+ if (resourceName) {
1896
+ handleResourceNameSubmit(resourceName)
1897
+ }
1898
+ return
1899
+ }
1900
+
1901
+ // Handle Base URL submission on Enter
1902
+ if (currentScreen === 'baseUrl' && key.return) {
1903
+ if (selectedProvider === 'custom-openai') {
1904
+ handleCustomBaseUrlSubmit(customBaseUrl)
1905
+ } else {
1906
+ // For all other providers (including ollama), use the general handler
1907
+ handleProviderBaseUrlSubmit(providerBaseUrl)
1908
+ }
1909
+ return
1910
+ }
1911
+
1912
+ // Handle Custom Model Name submission on Enter
1913
+ if (currentScreen === 'modelInput' && key.return) {
1914
+ if (customModelName) {
1915
+ handleCustomModelSubmit(customModelName)
1916
+ }
1917
+ return
1918
+ }
1919
+
1920
+ // Handle confirmation on Enter
1921
+ if (currentScreen === 'confirmation' && key.return) {
1922
+ handleConfirmation().catch(error => {
1923
+ console.error('Error in handleConfirmation:', error)
1924
+ setValidationError(
1925
+ error instanceof Error ? error.message : 'Unexpected error occurred',
1926
+ )
1927
+ })
1928
+ return
1929
+ }
1930
+
1931
+ // Handle connection test
1932
+ if (currentScreen === 'connectionTest') {
1933
+ if (key.return) {
1934
+ if (!isTestingConnection && !connectionTestResult) {
1935
+ handleConnectionTest()
1936
+ } else if (connectionTestResult && connectionTestResult.success) {
1937
+ navigateTo('confirmation')
1938
+ } else if (connectionTestResult && !connectionTestResult.success) {
1939
+ // Retry the test
1940
+ handleConnectionTest()
1941
+ }
1942
+ return
1943
+ }
1944
+ }
1945
+
1946
+ // Handle context length selection
1947
+ if (currentScreen === 'contextLength') {
1948
+ if (key.return) {
1949
+ handleContextLengthSubmit()
1950
+ return
1951
+ }
1952
+
1953
+ if (key.upArrow) {
1954
+ const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
1955
+ opt => opt.value === contextLength,
1956
+ )
1957
+ const newIndex =
1958
+ currentIndex > 0
1959
+ ? currentIndex - 1
1960
+ : currentIndex === -1
1961
+ ? CONTEXT_LENGTH_OPTIONS.findIndex(
1962
+ opt => opt.value === DEFAULT_CONTEXT_LENGTH,
1963
+ ) || 0
1964
+ : CONTEXT_LENGTH_OPTIONS.length - 1
1965
+ setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
1966
+ return
1967
+ }
1968
+
1969
+ if (key.downArrow) {
1970
+ const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
1971
+ opt => opt.value === contextLength,
1972
+ )
1973
+ const newIndex =
1974
+ currentIndex === -1
1975
+ ? CONTEXT_LENGTH_OPTIONS.findIndex(
1976
+ opt => opt.value === DEFAULT_CONTEXT_LENGTH,
1977
+ ) || 0
1978
+ : (currentIndex + 1) % CONTEXT_LENGTH_OPTIONS.length
1979
+ setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
1980
+ return
1981
+ }
1982
+ }
1983
+
1984
+ // Handle paste event (Ctrl+V or Cmd+V)
1985
+ if (
1986
+ currentScreen === 'apiKey' &&
1987
+ ((key.ctrl && input === 'v') || (key.meta && input === 'v'))
1988
+ ) {
1989
+ // We can't directly access clipboard in terminal, but we can show a message
1990
+ setModelLoadError(
1991
+ "Please use your terminal's paste functionality or type the API key manually",
1992
+ )
1993
+ return
1994
+ }
1995
+
1996
+ // Handle Tab key for form navigation in model params screen
1997
+ if (currentScreen === 'modelParams' && key.tab) {
1998
+ const formFields = getFormFieldsForModelParams()
1999
+ // Move to next field
2000
+ setActiveFieldIndex(current => (current + 1) % formFields.length)
2001
+ return
2002
+ }
2003
+
2004
+ // Handle Enter key for form submission in model params screen
2005
+ if (currentScreen === 'modelParams' && key.return) {
2006
+ const formFields = getFormFieldsForModelParams()
2007
+ const currentField = formFields[activeFieldIndex]
2008
+
2009
+ if (
2010
+ currentField.name === 'submit' ||
2011
+ activeFieldIndex === formFields.length - 1
2012
+ ) {
2013
+ // If on the Continue button, submit the form
2014
+ handleModelParamsSubmit()
2015
+ } else if (currentField.component === 'select') {
2016
+ // For select fields, move to the next field (since selection should be handled by Select component)
2017
+ setActiveFieldIndex(current =>
2018
+ Math.min(current + 1, formFields.length - 1),
2019
+ )
2020
+ }
2021
+ return
2022
+ }
2023
+ })
2024
+
2025
+ // Helper function to get form fields for model params
2026
+ function getFormFieldsForModelParams() {
2027
+ return [
2028
+ {
2029
+ name: 'maxTokens',
2030
+ label: 'Maximum Tokens',
2031
+ description: 'Select the maximum number of tokens to generate.',
2032
+ value: parseInt(maxTokens),
2033
+ component: 'select',
2034
+ options: MAX_TOKENS_OPTIONS.map(option => ({
2035
+ label: option.label,
2036
+ value: option.value.toString(),
2037
+ })),
2038
+ defaultValue: maxTokens,
2039
+ },
2040
+ ...(supportsReasoningEffort
2041
+ ? [
2042
+ {
2043
+ name: 'reasoningEffort',
2044
+ label: 'Reasoning Effort',
2045
+ description: 'Controls reasoning depth for complex problems.',
2046
+ value: reasoningEffort,
2047
+ component: 'select',
2048
+ },
2049
+ ]
2050
+ : []),
2051
+ {
2052
+ name: 'submit',
2053
+ label: 'Continue →',
2054
+ component: 'button',
2055
+ },
2056
+ ]
2057
+ }
2058
+
2059
+ // Render API Key Input Screen
2060
+ if (currentScreen === 'apiKey') {
2061
+ const modelTypeText = 'this model profile'
2062
+
2063
+ return (
2064
+ <Box flexDirection="column" gap={1}>
2065
+ <Box
2066
+ flexDirection="column"
2067
+ gap={1}
2068
+ borderStyle="round"
2069
+ borderColor={theme.secondaryBorder}
2070
+ paddingX={2}
2071
+ paddingY={1}
2072
+ >
2073
+ <Text bold>
2074
+ API Key Setup{' '}
2075
+ {exitState.pending
2076
+ ? `(press ${exitState.keyName} again to exit)`
2077
+ : ''}
2078
+ </Text>
2079
+ <Box flexDirection="column" gap={1}>
2080
+ <Text bold>
2081
+ Enter your {getProviderLabel(selectedProvider, 0).split(' (')[0]}{' '}
2082
+ API key for {modelTypeText}:
2083
+ </Text>
2084
+ <Box flexDirection="column" width={70}>
2085
+ <Text color={theme.secondaryText}>
2086
+ This key will be stored locally and used to access the{' '}
2087
+ {selectedProvider} API.
2088
+ <Newline />
2089
+ Your key is never sent to our servers.
2090
+ <Newline />
2091
+ <Newline />
2092
+ {selectedProvider === 'kimi' && (
2093
+ <>
2094
+ 💡 Get your API key from:{' '}
2095
+ <Text color={theme.suggestion}>
2096
+ https://platform.moonshot.cn/console/api-keys
2097
+ </Text>
2098
+ </>
2099
+ )}
2100
+ {selectedProvider === 'deepseek' && (
2101
+ <>
2102
+ 💡 Get your API key from:{' '}
2103
+ <Text color={theme.suggestion}>
2104
+ https://platform.deepseek.com/api_keys
2105
+ </Text>
2106
+ </>
2107
+ )}
2108
+ {selectedProvider === 'siliconflow' && (
2109
+ <>
2110
+ 💡 Get your API key from:{' '}
2111
+ <Text color={theme.suggestion}>
2112
+ https://cloud.siliconflow.cn/i/oJWsm6io
2113
+ </Text>
2114
+ </>
2115
+ )}
2116
+ {selectedProvider === 'qwen' && (
2117
+ <>
2118
+ 💡 Get your API key from:{' '}
2119
+ <Text color={theme.suggestion}>
2120
+ https://bailian.console.aliyun.com/?tab=model#/api-key
2121
+ </Text>
2122
+ </>
2123
+ )}
2124
+ {selectedProvider === 'glm' && (
2125
+ <>
2126
+ 💡 Get your API key from:{' '}
2127
+ <Text color={theme.suggestion}>
2128
+ https://open.bigmodel.cn (API Keys section)
2129
+ </Text>
2130
+ </>
2131
+ )}
2132
+ {selectedProvider === 'minimax' && (
2133
+ <>
2134
+ 💡 Get your API key from:{' '}
2135
+ <Text color={theme.suggestion}>
2136
+ https://www.minimax.io/platform/user-center/basic-information
2137
+ </Text>
2138
+ </>
2139
+ )}
2140
+ {selectedProvider === 'baidu-qianfan' && (
2141
+ <>
2142
+ 💡 Get your API key from:{' '}
2143
+ <Text color={theme.suggestion}>
2144
+ https://console.bce.baidu.com/iam/#/iam/accesslist
2145
+ </Text>
2146
+ </>
2147
+ )}
2148
+ {selectedProvider === 'anthropic' && (
2149
+ <>
2150
+ 💡 Get your API key from:{' '}
2151
+ <Text color={theme.suggestion}>
2152
+ {anthropicProviderType === 'official'
2153
+ ? 'https://console.anthropic.com/settings/keys'
2154
+ : anthropicProviderType === 'bigdream'
2155
+ ? 'https://api-key.info/register?aff=MSl4'
2156
+ : anthropicProviderType === 'opendev'
2157
+ ? 'https://api.openai-next.com/register/?aff_code=4xo7'
2158
+ : 'your custom API provider'}
2159
+ </Text>
2160
+ </>
2161
+ )}
2162
+ {selectedProvider === 'openai' && (
2163
+ <>
2164
+ 💡 Get your API key from:{' '}
2165
+ <Text color={theme.suggestion}>
2166
+ https://platform.openai.com/api-keys
2167
+ </Text>
2168
+ </>
2169
+ )}
2170
+ </Text>
2171
+ </Box>
2172
+
2173
+ <Box>
2174
+ <TextInput
2175
+ placeholder="sk-..."
2176
+ value={apiKey}
2177
+ onChange={handleApiKeyChange}
2178
+ onSubmit={handleApiKeySubmit}
2179
+ mask="*"
2180
+ columns={500}
2181
+ cursorOffset={cursorOffset}
2182
+ onChangeCursorOffset={handleCursorOffsetChange}
2183
+ showCursor={true}
2184
+ />
2185
+ </Box>
2186
+
2187
+ <Box marginTop={1}>
2188
+ <Text>
2189
+ <Text color={theme.suggestion} dimColor={!apiKey}>
2190
+ [Submit API Key]
2191
+ </Text>
2192
+ <Text>
2193
+ {' '}
2194
+ - Press Enter or click to continue with this API key
2195
+ </Text>
2196
+ </Text>
2197
+ </Box>
2198
+
2199
+ {isLoadingModels && (
2200
+ <Box>
2201
+ <Text color={theme.suggestion}>
2202
+ Loading available models...
2203
+ </Text>
2204
+ </Box>
2205
+ )}
2206
+ {modelLoadError && (
2207
+ <Box>
2208
+ <Text color="red">Error: {modelLoadError}</Text>
2209
+ </Box>
2210
+ )}
2211
+ <Box marginTop={1}>
2212
+ <Text dimColor>
2213
+ Press <Text color={theme.suggestion}>Enter</Text> to continue,{' '}
2214
+ <Text color={theme.suggestion}>Tab</Text> to{' '}
2215
+ {selectedProvider === 'anthropic' ||
2216
+ selectedProvider === 'kimi' ||
2217
+ selectedProvider === 'deepseek' ||
2218
+ selectedProvider === 'qwen' ||
2219
+ selectedProvider === 'glm' ||
2220
+ selectedProvider === 'minimax' ||
2221
+ selectedProvider === 'baidu-qianfan' ||
2222
+ selectedProvider === 'siliconflow' ||
2223
+ selectedProvider === 'custom-openai'
2224
+ ? 'skip to manual model input'
2225
+ : 'skip using a key'}
2226
+ , or <Text color={theme.suggestion}>Esc</Text> to go back
2227
+ </Text>
2228
+ </Box>
2229
+ </Box>
2230
+ </Box>
2231
+ </Box>
2232
+ )
2233
+ }
2234
+
2235
+ // Render Model Selection Screen
2236
+ if (currentScreen === 'model') {
2237
+ const modelTypeText = 'this model profile'
2238
+
2239
+ return (
2240
+ <Box flexDirection="column" gap={1}>
2241
+ <Box
2242
+ flexDirection="column"
2243
+ gap={1}
2244
+ borderStyle="round"
2245
+ borderColor={theme.secondaryBorder}
2246
+ paddingX={2}
2247
+ paddingY={1}
2248
+ >
2249
+ <Text bold>
2250
+ Model Selection{' '}
2251
+ {exitState.pending
2252
+ ? `(press ${exitState.keyName} again to exit)`
2253
+ : ''}
2254
+ </Text>
2255
+ <Box flexDirection="column" gap={1}>
2256
+ <Text bold>
2257
+ Select a model from{' '}
2258
+ {
2259
+ getProviderLabel(
2260
+ selectedProvider,
2261
+ availableModels.length,
2262
+ ).split(' (')[0]
2263
+ }{' '}
2264
+ for {modelTypeText}:
2265
+ </Text>
2266
+ <Box flexDirection="column" width={70}>
2267
+ <Text color={theme.secondaryText}>
2268
+ This model profile can be assigned to different pointers (main,
2269
+ task, reasoning, quick) for various use cases.
2270
+ </Text>
2271
+ </Box>
2272
+
2273
+ <Box marginY={1}>
2274
+ <Text bold>Search models:</Text>
2275
+ <TextInput
2276
+ placeholder="Type to filter models..."
2277
+ value={modelSearchQuery}
2278
+ onChange={handleModelSearchChange}
2279
+ columns={100}
2280
+ cursorOffset={modelSearchCursorOffset}
2281
+ onChangeCursorOffset={handleModelSearchCursorOffsetChange}
2282
+ showCursor={true}
2283
+ focus={true}
2284
+ />
2285
+ </Box>
2286
+
2287
+ {modelOptions.length > 0 ? (
2288
+ <>
2289
+ <Select
2290
+ options={modelOptions}
2291
+ onChange={handleModelSelection}
2292
+ />
2293
+ <Text dimColor>
2294
+ Showing {modelOptions.length} of {availableModels.length}{' '}
2295
+ models
2296
+ </Text>
2297
+ </>
2298
+ ) : (
2299
+ <Box>
2300
+ {availableModels.length > 0 ? (
2301
+ <Text color="yellow">
2302
+ No models match your search. Try a different query.
2303
+ </Text>
2304
+ ) : (
2305
+ <Text color="yellow">
2306
+ No models available for this provider.
2307
+ </Text>
2308
+ )}
2309
+ </Box>
2310
+ )}
2311
+
2312
+ <Box marginTop={1}>
2313
+ <Text dimColor>
2314
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
2315
+ API key input
2316
+ </Text>
2317
+ </Box>
2318
+ </Box>
2319
+ </Box>
2320
+ </Box>
2321
+ )
2322
+ }
2323
+
2324
+ if (currentScreen === 'modelParams') {
2325
+ // Define form fields
2326
+ const formFields = getFormFieldsForModelParams()
2327
+
2328
+ return (
2329
+ <Box flexDirection="column" gap={1}>
2330
+ <Box
2331
+ flexDirection="column"
2332
+ gap={1}
2333
+ borderStyle="round"
2334
+ borderColor={theme.secondaryBorder}
2335
+ paddingX={2}
2336
+ paddingY={1}
2337
+ >
2338
+ <Text bold>
2339
+ Model Parameters{' '}
2340
+ {exitState.pending
2341
+ ? `(press ${exitState.keyName} again to exit)`
2342
+ : ''}
2343
+ </Text>
2344
+ <Box flexDirection="column" gap={1}>
2345
+ <Text bold>Configure parameters for {selectedModel}:</Text>
2346
+ <Box flexDirection="column" width={70}>
2347
+ <Text color={theme.secondaryText}>
2348
+ Use <Text color={theme.suggestion}>Tab</Text> to navigate
2349
+ between fields. Press{' '}
2350
+ <Text color={theme.suggestion}>Enter</Text> to submit.
2351
+ </Text>
2352
+ </Box>
2353
+
2354
+ <Box flexDirection="column">
2355
+ {formFields.map((field, index) => (
2356
+ <Box flexDirection="column" marginY={1} key={field.name}>
2357
+ {field.component !== 'button' ? (
2358
+ <>
2359
+ <Text
2360
+ bold
2361
+ color={
2362
+ activeFieldIndex === index ? theme.success : undefined
2363
+ }
2364
+ >
2365
+ {field.label}
2366
+ </Text>
2367
+ {field.description && (
2368
+ <Text color={theme.secondaryText}>
2369
+ {field.description}
2370
+ </Text>
2371
+ )}
2372
+ </>
2373
+ ) : (
2374
+ <Text
2375
+ bold
2376
+ color={
2377
+ activeFieldIndex === index ? theme.success : undefined
2378
+ }
2379
+ >
2380
+ {field.label}
2381
+ </Text>
2382
+ )}
2383
+ <Box marginY={1}>
2384
+ {activeFieldIndex === index ? (
2385
+ field.component === 'select' ? (
2386
+ field.name === 'maxTokens' ? (
2387
+ <Select
2388
+ options={field.options || []}
2389
+ onChange={value => {
2390
+ const numValue = parseInt(value)
2391
+ setMaxTokens(numValue.toString())
2392
+ setSelectedMaxTokensPreset(numValue)
2393
+ setMaxTokensCursorOffset(
2394
+ numValue.toString().length,
2395
+ )
2396
+ // Move to next field after selection
2397
+ setTimeout(() => {
2398
+ setActiveFieldIndex(index + 1)
2399
+ }, 100)
2400
+ }}
2401
+ defaultValue={field.defaultValue}
2402
+ />
2403
+ ) : (
2404
+ <Select
2405
+ options={reasoningEffortOptions}
2406
+ onChange={value => {
2407
+ setReasoningEffort(value as ReasoningEffortOption)
2408
+ // Move to next field after selection
2409
+ setTimeout(() => {
2410
+ setActiveFieldIndex(index + 1)
2411
+ }, 100)
2412
+ }}
2413
+ defaultValue={reasoningEffort}
2414
+ />
2415
+ )
2416
+ ) : null
2417
+ ) : field.name === 'maxTokens' ? (
2418
+ <Text color={theme.secondaryText}>
2419
+ Current:{' '}
2420
+ <Text color={theme.suggestion}>
2421
+ {MAX_TOKENS_OPTIONS.find(
2422
+ opt => opt.value === parseInt(maxTokens),
2423
+ )?.label || `${maxTokens} tokens`}
2424
+ </Text>
2425
+ </Text>
2426
+ ) : field.name === 'reasoningEffort' ? (
2427
+ <Text color={theme.secondaryText}>
2428
+ Current:{' '}
2429
+ <Text color={theme.suggestion}>{reasoningEffort}</Text>
2430
+ </Text>
2431
+ ) : null}
2432
+ </Box>
2433
+ </Box>
2434
+ ))}
2435
+
2436
+ <Box marginTop={1}>
2437
+ <Text dimColor>
2438
+ Press <Text color={theme.suggestion}>Tab</Text> to navigate,{' '}
2439
+ <Text color={theme.suggestion}>Enter</Text> to continue, or{' '}
2440
+ <Text color={theme.suggestion}>Esc</Text> to go back
2441
+ </Text>
2442
+ </Box>
2443
+ </Box>
2444
+ </Box>
2445
+ </Box>
2446
+ </Box>
2447
+ )
2448
+ }
2449
+
2450
+ // Render Resource Name Input Screen
2451
+ if (currentScreen === 'resourceName') {
2452
+ return (
2453
+ <Box flexDirection="column" gap={1}>
2454
+ <Box
2455
+ flexDirection="column"
2456
+ gap={1}
2457
+ borderStyle="round"
2458
+ borderColor={theme.secondaryBorder}
2459
+ paddingX={2}
2460
+ paddingY={1}
2461
+ >
2462
+ <Text bold>
2463
+ Azure Resource Setup{' '}
2464
+ {exitState.pending
2465
+ ? `(press ${exitState.keyName} again to exit)`
2466
+ : ''}
2467
+ </Text>
2468
+ <Box flexDirection="column" gap={1}>
2469
+ <Text bold>Enter your Azure OpenAI resource name:</Text>
2470
+ <Box flexDirection="column" width={70}>
2471
+ <Text color={theme.secondaryText}>
2472
+ This is the name of your Azure OpenAI resource (without the full
2473
+ domain).
2474
+ <Newline />
2475
+ For example, if your endpoint is
2476
+ "https://myresource.openai.azure.com", enter "myresource".
2477
+ </Text>
2478
+ </Box>
2479
+
2480
+ <Box>
2481
+ <TextInput
2482
+ placeholder="myazureresource"
2483
+ value={resourceName}
2484
+ onChange={setResourceName}
2485
+ onSubmit={handleResourceNameSubmit}
2486
+ columns={100}
2487
+ cursorOffset={resourceNameCursorOffset}
2488
+ onChangeCursorOffset={setResourceNameCursorOffset}
2489
+ showCursor={true}
2490
+ />
2491
+ </Box>
2492
+
2493
+ <Box marginTop={1}>
2494
+ <Text>
2495
+ <Text color={theme.suggestion} dimColor={!resourceName}>
2496
+ [Submit Resource Name]
2497
+ </Text>
2498
+ <Text> - Press Enter or click to continue</Text>
2499
+ </Text>
2500
+ </Box>
2501
+
2502
+ <Box marginTop={1}>
2503
+ <Text dimColor>
2504
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2505
+ <Text color={theme.suggestion}>Esc</Text> to go back
2506
+ </Text>
2507
+ </Box>
2508
+ </Box>
2509
+ </Box>
2510
+ </Box>
2511
+ )
2512
+ }
2513
+
2514
+ // Render Base URL Input Screen (for all providers)
2515
+ if (currentScreen === 'baseUrl') {
2516
+ const isCustomOpenAI = selectedProvider === 'custom-openai'
2517
+
2518
+ // For custom-openai, we still use the old logic with customBaseUrl
2519
+ if (isCustomOpenAI) {
2520
+ return (
2521
+ <Box flexDirection="column" gap={1}>
2522
+ <Box
2523
+ flexDirection="column"
2524
+ gap={1}
2525
+ borderStyle="round"
2526
+ borderColor={theme.secondaryBorder}
2527
+ paddingX={2}
2528
+ paddingY={1}
2529
+ >
2530
+ <Text bold>
2531
+ Custom API Server Setup{' '}
2532
+ {exitState.pending
2533
+ ? `(press ${exitState.keyName} again to exit)`
2534
+ : ''}
2535
+ </Text>
2536
+ <Box flexDirection="column" gap={1}>
2537
+ <Text bold>Enter your custom API URL:</Text>
2538
+ <Box flexDirection="column" width={70}>
2539
+ <Text color={theme.secondaryText}>
2540
+ This is the base URL for your OpenAI-compatible API.
2541
+ <Newline />
2542
+ For example: https://api.example.com/v1
2543
+ </Text>
2544
+ </Box>
2545
+
2546
+ <Box>
2547
+ <TextInput
2548
+ placeholder="https://api.example.com/v1"
2549
+ value={customBaseUrl}
2550
+ onChange={setCustomBaseUrl}
2551
+ onSubmit={handleCustomBaseUrlSubmit}
2552
+ columns={100}
2553
+ cursorOffset={customBaseUrlCursorOffset}
2554
+ onChangeCursorOffset={setCustomBaseUrlCursorOffset}
2555
+ showCursor={!isLoadingModels}
2556
+ focus={!isLoadingModels}
2557
+ />
2558
+ </Box>
2559
+
2560
+ <Box marginTop={1}>
2561
+ <Text>
2562
+ <Text
2563
+ color={
2564
+ isLoadingModels ? theme.secondaryText : theme.suggestion
2565
+ }
2566
+ >
2567
+ [Submit Base URL]
2568
+ </Text>
2569
+ <Text> - Press Enter or click to continue</Text>
2570
+ </Text>
2571
+ </Box>
2572
+
2573
+ <Box marginTop={1}>
2574
+ <Text dimColor>
2575
+ Press <Text color={theme.suggestion}>Enter</Text> to continue
2576
+ or <Text color={theme.suggestion}>Esc</Text> to go back
2577
+ </Text>
2578
+ </Box>
2579
+ </Box>
2580
+ </Box>
2581
+ </Box>
2582
+ )
2583
+ }
2584
+
2585
+ // For all other providers, use the new general provider URL configuration
2586
+ const providerName = providers[selectedProvider]?.name || selectedProvider
2587
+ const defaultUrl = providers[selectedProvider]?.baseURL || ''
2588
+
2589
+ return (
2590
+ <Box flexDirection="column" gap={1}>
2591
+ <Box
2592
+ flexDirection="column"
2593
+ gap={1}
2594
+ borderStyle="round"
2595
+ borderColor={theme.secondaryBorder}
2596
+ paddingX={2}
2597
+ paddingY={1}
2598
+ >
2599
+ <Text bold>
2600
+ {providerName} API Configuration{' '}
2601
+ {exitState.pending
2602
+ ? `(press ${exitState.keyName} again to exit)`
2603
+ : ''}
2604
+ </Text>
2605
+ <Box flexDirection="column" gap={1}>
2606
+ <Text bold>Configure the API endpoint for {providerName}:</Text>
2607
+ <Box flexDirection="column" width={70}>
2608
+ <Text color={theme.secondaryText}>
2609
+ {selectedProvider === 'ollama' ? (
2610
+ <>
2611
+ This is the URL of your Ollama server.
2612
+ <Newline />
2613
+ Default is http://localhost:11434/v1 for local Ollama
2614
+ installations.
2615
+ </>
2616
+ ) : (
2617
+ <>
2618
+ This is the base URL for the {providerName} API.
2619
+ <Newline />
2620
+ You can modify this URL or press Enter to use the default.
2621
+ </>
2622
+ )}
2623
+ </Text>
2624
+ </Box>
2625
+
2626
+ <Box>
2627
+ <TextInput
2628
+ placeholder={defaultUrl}
2629
+ value={providerBaseUrl}
2630
+ onChange={setProviderBaseUrl}
2631
+ onSubmit={handleProviderBaseUrlSubmit}
2632
+ columns={100}
2633
+ cursorOffset={providerBaseUrlCursorOffset}
2634
+ onChangeCursorOffset={setProviderBaseUrlCursorOffset}
2635
+ showCursor={!isLoadingModels}
2636
+ focus={!isLoadingModels}
2637
+ />
2638
+ </Box>
2639
+
2640
+ <Box marginTop={1}>
2641
+ <Text>
2642
+ <Text
2643
+ color={
2644
+ isLoadingModels ? theme.secondaryText : theme.suggestion
2645
+ }
2646
+ >
2647
+ [Submit Base URL]
2648
+ </Text>
2649
+ <Text> - Press Enter or click to continue</Text>
2650
+ </Text>
2651
+ </Box>
2652
+
2653
+ {isLoadingModels && (
2654
+ <Box marginTop={1}>
2655
+ <Text color={theme.success}>
2656
+ {selectedProvider === 'ollama'
2657
+ ? 'Connecting to Ollama server...'
2658
+ : `Connecting to ${providerName}...`}
2659
+ </Text>
2660
+ </Box>
2661
+ )}
2662
+
2663
+ {modelLoadError && (
2664
+ <Box marginTop={1}>
2665
+ <Text color="red">Error: {modelLoadError}</Text>
2666
+ </Box>
2667
+ )}
2668
+
2669
+ <Box marginTop={1}>
2670
+ <Text dimColor>
2671
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2672
+ <Text color={theme.suggestion}>Esc</Text> to go back
2673
+ </Text>
2674
+ </Box>
2675
+ </Box>
2676
+ </Box>
2677
+ </Box>
2678
+ )
2679
+ }
2680
+
2681
+ // Render Custom Model Input Screen
2682
+ if (currentScreen === 'modelInput') {
2683
+ const modelTypeText = 'this model profile'
2684
+
2685
+ // Determine the screen title and description based on provider
2686
+ let screenTitle = 'Manual Model Setup'
2687
+ let description = 'Enter the model name manually'
2688
+ let placeholder = 'gpt-4'
2689
+ let examples = 'For example: "gpt-4", "gpt-3.5-turbo", etc.'
2690
+
2691
+ if (selectedProvider === 'azure') {
2692
+ screenTitle = 'Azure Model Setup'
2693
+ description = `Enter your Azure OpenAI deployment name for ${modelTypeText}:`
2694
+ examples = 'For example: "gpt-4", "gpt-35-turbo", etc.'
2695
+ placeholder = 'gpt-4'
2696
+ } else if (selectedProvider === 'anthropic') {
2697
+ screenTitle = 'Claude Model Setup'
2698
+ description = `Enter the Claude model name for ${modelTypeText}:`
2699
+ examples =
2700
+ 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2701
+ placeholder = 'claude-3-5-sonnet-latest'
2702
+ } else if (selectedProvider === 'bigdream') {
2703
+ screenTitle = 'BigDream Model Setup'
2704
+ description = `Enter the BigDream model name for ${modelTypeText}:`
2705
+ examples =
2706
+ 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2707
+ placeholder = 'claude-3-5-sonnet-latest'
2708
+ } else if (selectedProvider === 'kimi') {
2709
+ screenTitle = 'Kimi Model Setup'
2710
+ description = `Enter the Kimi model name for ${modelTypeText}:`
2711
+ examples = 'For example: "kimi-k2-0711-preview"'
2712
+ placeholder = 'kimi-k2-0711-preview'
2713
+ } else if (selectedProvider === 'deepseek') {
2714
+ screenTitle = 'DeepSeek Model Setup'
2715
+ description = `Enter the DeepSeek model name for ${modelTypeText}:`
2716
+ examples =
2717
+ 'For example: "deepseek-chat", "deepseek-coder", "deepseek-reasoner", etc.'
2718
+ placeholder = 'deepseek-chat'
2719
+ } else if (selectedProvider === 'siliconflow') {
2720
+ screenTitle = 'SiliconFlow Model Setup'
2721
+ description = `Enter the SiliconFlow model name for ${modelTypeText}:`
2722
+ examples =
2723
+ 'For example: "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", etc.'
2724
+ placeholder = 'Qwen/Qwen2.5-72B-Instruct'
2725
+ } else if (selectedProvider === 'qwen') {
2726
+ screenTitle = 'Qwen Model Setup'
2727
+ description = `Enter the Qwen model name for ${modelTypeText}:`
2728
+ examples = 'For example: "qwen-plus", "qwen-turbo", "qwen-max", etc.'
2729
+ placeholder = 'qwen-plus'
2730
+ } else if (selectedProvider === 'glm') {
2731
+ screenTitle = 'GLM Model Setup'
2732
+ description = `Enter the GLM model name for ${modelTypeText}:`
2733
+ examples = 'For example: "glm-4", "glm-4v", "glm-3-turbo", etc.'
2734
+ placeholder = 'glm-4'
2735
+ } else if (selectedProvider === 'minimax') {
2736
+ screenTitle = 'MiniMax Model Setup'
2737
+ description = `Enter the MiniMax model name for ${modelTypeText}:`
2738
+ examples =
2739
+ 'For example: "abab6.5s-chat", "abab6.5g-chat", "abab5.5s-chat", etc.'
2740
+ placeholder = 'abab6.5s-chat'
2741
+ } else if (selectedProvider === 'baidu-qianfan') {
2742
+ screenTitle = 'Baidu Qianfan Model Setup'
2743
+ description = `Enter the Baidu Qianfan model name for ${modelTypeText}:`
2744
+ examples =
2745
+ 'For example: "ERNIE-4.0-8K", "ERNIE-3.5-8K", "ERNIE-Speed-128K", etc.'
2746
+ placeholder = 'ERNIE-4.0-8K'
2747
+ } else if (selectedProvider === 'custom-openai') {
2748
+ screenTitle = 'Custom API Model Setup'
2749
+ description = `Enter the model name for ${modelTypeText}:`
2750
+ examples = 'Enter the exact model name as supported by your API endpoint.'
2751
+ placeholder = 'model-name'
2752
+ }
2753
+
2754
+ return (
2755
+ <Box flexDirection="column" gap={1}>
2756
+ <Box
2757
+ flexDirection="column"
2758
+ gap={1}
2759
+ borderStyle="round"
2760
+ borderColor={theme.secondaryBorder}
2761
+ paddingX={2}
2762
+ paddingY={1}
2763
+ >
2764
+ <Text bold>
2765
+ {screenTitle}{' '}
2766
+ {exitState.pending
2767
+ ? `(press ${exitState.keyName} again to exit)`
2768
+ : ''}
2769
+ </Text>
2770
+ <Box flexDirection="column" gap={1}>
2771
+ <Text bold>{description}</Text>
2772
+ <Box flexDirection="column" width={70}>
2773
+ <Text color={theme.secondaryText}>
2774
+ {selectedProvider === 'azure'
2775
+ ? 'This is the deployment name you configured in your Azure OpenAI resource.'
2776
+ : selectedProvider === 'anthropic'
2777
+ ? 'This should be a valid Claude model identifier from Claude.'
2778
+ : selectedProvider === 'bigdream'
2779
+ ? 'This should be a valid Claude model identifier supported by BigDream.'
2780
+ : selectedProvider === 'kimi'
2781
+ ? 'This should be a valid Kimi model identifier from Moonshot AI.'
2782
+ : selectedProvider === 'deepseek'
2783
+ ? 'This should be a valid DeepSeek model identifier.'
2784
+ : selectedProvider === 'siliconflow'
2785
+ ? 'This should be a valid SiliconFlow model identifier.'
2786
+ : selectedProvider === 'qwen'
2787
+ ? 'This should be a valid Qwen model identifier from Alibaba Cloud.'
2788
+ : selectedProvider === 'glm'
2789
+ ? 'This should be a valid GLM model identifier from Zhipu AI.'
2790
+ : selectedProvider === 'minimax'
2791
+ ? 'This should be a valid MiniMax model identifier.'
2792
+ : selectedProvider === 'baidu-qianfan'
2793
+ ? 'This should be a valid Baidu Qianfan model identifier.'
2794
+ : 'This should match the model name supported by your API endpoint.'}
2795
+ <Newline />
2796
+ {examples}
2797
+ </Text>
2798
+ </Box>
2799
+
2800
+ <Box>
2801
+ <TextInput
2802
+ placeholder={placeholder}
2803
+ value={customModelName}
2804
+ onChange={setCustomModelName}
2805
+ onSubmit={handleCustomModelSubmit}
2806
+ columns={100}
2807
+ cursorOffset={customModelNameCursorOffset}
2808
+ onChangeCursorOffset={setCustomModelNameCursorOffset}
2809
+ showCursor={true}
2810
+ />
2811
+ </Box>
2812
+
2813
+ <Box marginTop={1}>
2814
+ <Text>
2815
+ <Text color={theme.suggestion} dimColor={!customModelName}>
2816
+ [Submit Model Name]
2817
+ </Text>
2818
+ <Text> - Press Enter or click to continue</Text>
2819
+ </Text>
2820
+ </Box>
2821
+
2822
+ <Box marginTop={1}>
2823
+ <Text dimColor>
2824
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2825
+ <Text color={theme.suggestion}>Esc</Text> to go back
2826
+ </Text>
2827
+ </Box>
2828
+ </Box>
2829
+ </Box>
2830
+ </Box>
2831
+ )
2832
+ }
2833
+
2834
+ // Render Context Length Selection Screen
2835
+ if (currentScreen === 'contextLength') {
2836
+ const selectedOption =
2837
+ CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength) ||
2838
+ CONTEXT_LENGTH_OPTIONS[2] // Default to 128K
2839
+
2840
+ return (
2841
+ <Box flexDirection="column" gap={1}>
2842
+ <Box
2843
+ flexDirection="column"
2844
+ gap={1}
2845
+ borderStyle="round"
2846
+ borderColor={theme.secondaryBorder}
2847
+ paddingX={2}
2848
+ paddingY={1}
2849
+ >
2850
+ <Text bold>
2851
+ Context Length Configuration{' '}
2852
+ {exitState.pending
2853
+ ? `(press ${exitState.keyName} again to exit)`
2854
+ : ''}
2855
+ </Text>
2856
+ <Box flexDirection="column" gap={1}>
2857
+ <Text bold>Choose the context window length for your model:</Text>
2858
+ <Box flexDirection="column" width={70}>
2859
+ <Text color={theme.secondaryText}>
2860
+ This determines how much conversation history and context the
2861
+ model can process at once. Higher values allow for longer
2862
+ conversations but may increase costs.
2863
+ </Text>
2864
+ </Box>
2865
+
2866
+ <Box flexDirection="column" marginY={1}>
2867
+ {CONTEXT_LENGTH_OPTIONS.map((option, index) => {
2868
+ const isSelected = option.value === contextLength
2869
+ return (
2870
+ <Box key={option.value} flexDirection="row">
2871
+ <Text color={isSelected ? 'blue' : undefined}>
2872
+ {isSelected ? '→ ' : ' '}
2873
+ {option.label}
2874
+ {option.value === DEFAULT_CONTEXT_LENGTH
2875
+ ? ' (recommended)'
2876
+ : ''}
2877
+ </Text>
2878
+ </Box>
2879
+ )
2880
+ })}
2881
+ </Box>
2882
+
2883
+ <Box flexDirection="column" marginY={1}>
2884
+ <Text dimColor>
2885
+ Selected:{' '}
2886
+ <Text color={theme.suggestion}>{selectedOption.label}</Text>
2887
+ </Text>
2888
+ </Box>
2889
+ </Box>
2890
+ </Box>
2891
+
2892
+ <Box marginLeft={1}>
2893
+ <Text dimColor>
2894
+ ↑/↓ to select · Enter to continue · Esc to go back
2895
+ </Text>
2896
+ </Box>
2897
+ </Box>
2898
+ )
2899
+ }
2900
+
2901
+ // Render Connection Test Screen
2902
+ if (currentScreen === 'connectionTest') {
2903
+ const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
2904
+ ' (',
2905
+ )[0]
2906
+
2907
+ return (
2908
+ <Box flexDirection="column" gap={1}>
2909
+ <Box
2910
+ flexDirection="column"
2911
+ gap={1}
2912
+ borderStyle="round"
2913
+ borderColor={theme.secondaryBorder}
2914
+ paddingX={2}
2915
+ paddingY={1}
2916
+ >
2917
+ <Text bold>
2918
+ Connection Test{' '}
2919
+ {exitState.pending
2920
+ ? `(press ${exitState.keyName} again to exit)`
2921
+ : ''}
2922
+ </Text>
2923
+ <Box flexDirection="column" gap={1}>
2924
+ <Text bold>Testing connection to {providerDisplayName}...</Text>
2925
+ <Box flexDirection="column" width={70}>
2926
+ <Text color={theme.secondaryText}>
2927
+ This will verify your configuration by sending a test request to
2928
+ the API.
2929
+ {selectedProvider === 'minimax' && (
2930
+ <>
2931
+ <Newline />
2932
+ For MiniMax, we'll test both v2 and v1 endpoints to find the
2933
+ best one.
2934
+ </>
2935
+ )}
2936
+ </Text>
2937
+ </Box>
2938
+
2939
+ {!connectionTestResult && !isTestingConnection && (
2940
+ <Box marginY={1}>
2941
+ <Text>
2942
+ <Text color={theme.suggestion}>Press Enter</Text> to start the
2943
+ connection test
2944
+ </Text>
2945
+ </Box>
2946
+ )}
2947
+
2948
+ {isTestingConnection && (
2949
+ <Box marginY={1}>
2950
+ <Text color={theme.suggestion}>🔄 Testing connection...</Text>
2951
+ </Box>
2952
+ )}
2953
+
2954
+ {connectionTestResult && (
2955
+ <Box flexDirection="column" marginY={1} paddingX={1}>
2956
+ <Text
2957
+ color={connectionTestResult.success ? theme.success : 'red'}
2958
+ >
2959
+ {connectionTestResult.message}
2960
+ </Text>
2961
+
2962
+ {connectionTestResult.endpoint && (
2963
+ <Text color={theme.secondaryText}>
2964
+ Endpoint: {connectionTestResult.endpoint}
2965
+ </Text>
2966
+ )}
2967
+
2968
+ {connectionTestResult.details && (
2969
+ <Text color={theme.secondaryText}>
2970
+ Details: {connectionTestResult.details}
2971
+ </Text>
2972
+ )}
2973
+
2974
+ {connectionTestResult.success ? (
2975
+ <Box marginTop={1}>
2976
+ <Text color={theme.success}>
2977
+ ✅ Automatically proceeding to confirmation...
2978
+ </Text>
2979
+ </Box>
2980
+ ) : (
2981
+ <Box marginTop={1}>
2982
+ <Text>
2983
+ <Text color={theme.suggestion}>Press Enter</Text> to retry
2984
+ test, or <Text color={theme.suggestion}>Esc</Text> to go
2985
+ back
2986
+ </Text>
2987
+ </Box>
2988
+ )}
2989
+ </Box>
2990
+ )}
2991
+
2992
+ <Box marginTop={1}>
2993
+ <Text dimColor>
2994
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
2995
+ context length
2996
+ </Text>
2997
+ </Box>
2998
+ </Box>
2999
+ </Box>
3000
+ </Box>
3001
+ )
3002
+ }
3003
+
3004
+ // Render Confirmation Screen
3005
+ if (currentScreen === 'confirmation') {
3006
+ // Show model profile being created
3007
+
3008
+ // Get provider display name
3009
+ const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
3010
+ ' (',
3011
+ )[0]
3012
+
3013
+ // Determine if provider requires API key
3014
+ const showsApiKey = selectedProvider !== 'ollama'
3015
+
3016
+ return (
3017
+ <Box flexDirection="column" gap={1}>
3018
+ <Box
3019
+ flexDirection="column"
3020
+ gap={1}
3021
+ borderStyle="round"
3022
+ borderColor={theme.secondaryBorder}
3023
+ paddingX={2}
3024
+ paddingY={1}
3025
+ >
3026
+ <Text bold>
3027
+ Configuration Confirmation{' '}
3028
+ {exitState.pending
3029
+ ? `(press ${exitState.keyName} again to exit)`
3030
+ : ''}
3031
+ </Text>
3032
+ <Box flexDirection="column" gap={1}>
3033
+ <Text bold>Confirm your model configuration:</Text>
3034
+ <Box flexDirection="column" width={70}>
3035
+ <Text color={theme.secondaryText}>
3036
+ Please review your selections before saving.
3037
+ </Text>
3038
+ </Box>
3039
+
3040
+ {validationError && (
3041
+ <Box flexDirection="column" marginY={1} paddingX={1}>
3042
+ <Text color={theme.error} bold>
3043
+ ⚠ Configuration Error:
3044
+ </Text>
3045
+ <Text color={theme.error}>{validationError}</Text>
3046
+ </Box>
3047
+ )}
3048
+
3049
+ <Box flexDirection="column" marginY={1} paddingX={1}>
3050
+ <Text>
3051
+ <Text bold>Provider: </Text>
3052
+ <Text color={theme.suggestion}>{providerDisplayName}</Text>
3053
+ </Text>
3054
+
3055
+ {selectedProvider === 'azure' && (
3056
+ <Text>
3057
+ <Text bold>Resource Name: </Text>
3058
+ <Text color={theme.suggestion}>{resourceName}</Text>
3059
+ </Text>
3060
+ )}
3061
+
3062
+ {selectedProvider === 'ollama' && (
3063
+ <Text>
3064
+ <Text bold>Server URL: </Text>
3065
+ <Text color={theme.suggestion}>{ollamaBaseUrl}</Text>
3066
+ </Text>
3067
+ )}
3068
+
3069
+ {selectedProvider === 'custom-openai' && (
3070
+ <Text>
3071
+ <Text bold>API Base URL: </Text>
3072
+ <Text color={theme.suggestion}>{customBaseUrl}</Text>
3073
+ </Text>
3074
+ )}
3075
+
3076
+ <Text>
3077
+ <Text bold>Model: </Text>
3078
+ <Text color={theme.suggestion}>{selectedModel}</Text>
3079
+ </Text>
3080
+
3081
+ {apiKey && showsApiKey && (
3082
+ <Text>
3083
+ <Text bold>API Key: </Text>
3084
+ <Text color={theme.suggestion}>****{apiKey.slice(-4)}</Text>
3085
+ </Text>
3086
+ )}
3087
+
3088
+ {maxTokens && (
3089
+ <Text>
3090
+ <Text bold>Max Tokens: </Text>
3091
+ <Text color={theme.suggestion}>{maxTokens}</Text>
3092
+ </Text>
3093
+ )}
3094
+
3095
+ <Text>
3096
+ <Text bold>Context Length: </Text>
3097
+ <Text color={theme.suggestion}>
3098
+ {CONTEXT_LENGTH_OPTIONS.find(
3099
+ opt => opt.value === contextLength,
3100
+ )?.label || `${contextLength.toLocaleString()} tokens`}
3101
+ </Text>
3102
+ </Text>
3103
+
3104
+ {supportsReasoningEffort && (
3105
+ <Text>
3106
+ <Text bold>Reasoning Effort: </Text>
3107
+ <Text color={theme.suggestion}>{reasoningEffort}</Text>
3108
+ </Text>
3109
+ )}
3110
+ </Box>
3111
+
3112
+ <Box marginTop={1}>
3113
+ <Text dimColor>
3114
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
3115
+ model parameters or <Text color={theme.suggestion}>Enter</Text>{' '}
3116
+ to save configuration
3117
+ </Text>
3118
+ </Box>
3119
+ </Box>
3120
+ </Box>
3121
+ </Box>
3122
+ )
3123
+ }
3124
+
3125
+ // Render Anthropic Sub-Menu Selection Screen
3126
+ if (currentScreen === 'anthropicSubMenu') {
3127
+ const anthropicOptions = [
3128
+ { label: 'Official Anthropic API', value: 'official' },
3129
+ { label: 'BigDream (Community Proxy)', value: 'bigdream' },
3130
+ { label: 'OpenDev (Community Proxy)', value: 'opendev' },
3131
+ { label: 'Custom Anthropic-Compatible API', value: 'custom' },
3132
+ ]
3133
+
3134
+ return (
3135
+ <Box flexDirection="column" gap={1}>
3136
+ <Box
3137
+ flexDirection="column"
3138
+ gap={1}
3139
+ borderStyle="round"
3140
+ borderColor={theme.secondaryBorder}
3141
+ paddingX={2}
3142
+ paddingY={1}
3143
+ >
3144
+ <Text bold>
3145
+ Claude Provider Selection{' '}
3146
+ {exitState.pending
3147
+ ? `(press ${exitState.keyName} again to exit)`
3148
+ : ''}
3149
+ </Text>
3150
+ <Box flexDirection="column" gap={1}>
3151
+ <Text bold>
3152
+ Choose your Anthropic API access method for this model profile:
3153
+ </Text>
3154
+ <Box flexDirection="column" width={70}>
3155
+ <Text color={theme.secondaryText}>
3156
+ • <Text bold>Official Anthropic API:</Text> Direct access to
3157
+ Anthropic's official API
3158
+ <Newline />• <Text bold>BigDream:</Text> Community proxy
3159
+ providing Claude access
3160
+ <Newline />• <Text bold>Custom:</Text> Your own
3161
+ Anthropic-compatible API endpoint
3162
+ </Text>
3163
+ </Box>
3164
+
3165
+ <Select
3166
+ options={anthropicOptions}
3167
+ onChange={handleAnthropicProviderSelection}
3168
+ />
3169
+
3170
+ <Box marginTop={1}>
3171
+ <Text dimColor>
3172
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
3173
+ provider selection
3174
+ </Text>
3175
+ </Box>
3176
+ </Box>
3177
+ </Box>
3178
+ </Box>
3179
+ )
3180
+ }
3181
+
3182
+ // Render Provider Selection Screen
3183
+ return (
3184
+ <ScreenContainer title="Provider Selection" exitState={exitState}>
3185
+ <Box flexDirection="column" gap={1}>
3186
+ <Text bold>
3187
+ Select your preferred AI provider for this model profile:
3188
+ </Text>
3189
+ <Box flexDirection="column" width={70}>
3190
+ <Text color={theme.secondaryText}>
3191
+ Choose the provider you want to use for this model profile.
3192
+ <Newline />
3193
+ This will determine which models are available to you.
3194
+ </Text>
3195
+ </Box>
3196
+
3197
+ <Select options={providerOptions} onChange={handleProviderSelection} />
3198
+
3199
+ <Box marginTop={1}>
3200
+ <Text dimColor>
3201
+ You can change this later by running{' '}
3202
+ <Text color={theme.suggestion}>/model</Text> again
3203
+ </Text>
3204
+ </Box>
3205
+ </Box>
3206
+ </ScreenContainer>
3207
+ )
3208
+ }