@shareai-lab/kode 1.0.70 → 1.0.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. package/README.md +342 -75
  2. package/README.zh-CN.md +292 -0
  3. package/cli.js +62 -0
  4. package/package.json +49 -25
  5. package/scripts/postinstall.js +56 -0
  6. package/src/ProjectOnboarding.tsx +198 -0
  7. package/src/Tool.ts +82 -0
  8. package/src/commands/agents.tsx +3401 -0
  9. package/src/commands/approvedTools.ts +53 -0
  10. package/src/commands/bug.tsx +20 -0
  11. package/src/commands/clear.ts +43 -0
  12. package/src/commands/compact.ts +120 -0
  13. package/src/commands/config.tsx +19 -0
  14. package/src/commands/cost.ts +18 -0
  15. package/src/commands/ctx_viz.ts +209 -0
  16. package/src/commands/doctor.ts +24 -0
  17. package/src/commands/help.tsx +19 -0
  18. package/src/commands/init.ts +37 -0
  19. package/src/commands/listen.ts +42 -0
  20. package/src/commands/login.tsx +51 -0
  21. package/src/commands/logout.tsx +40 -0
  22. package/src/commands/mcp.ts +41 -0
  23. package/src/commands/model.tsx +40 -0
  24. package/src/commands/modelstatus.tsx +20 -0
  25. package/src/commands/onboarding.tsx +34 -0
  26. package/src/commands/pr_comments.ts +59 -0
  27. package/src/commands/refreshCommands.ts +54 -0
  28. package/src/commands/release-notes.ts +34 -0
  29. package/src/commands/resume.tsx +31 -0
  30. package/src/commands/review.ts +49 -0
  31. package/src/commands/terminalSetup.ts +221 -0
  32. package/src/commands.ts +139 -0
  33. package/src/components/ApproveApiKey.tsx +93 -0
  34. package/src/components/AsciiLogo.tsx +13 -0
  35. package/src/components/AutoUpdater.tsx +148 -0
  36. package/src/components/Bug.tsx +367 -0
  37. package/src/components/Config.tsx +293 -0
  38. package/src/components/ConsoleOAuthFlow.tsx +327 -0
  39. package/src/components/Cost.tsx +23 -0
  40. package/src/components/CostThresholdDialog.tsx +46 -0
  41. package/src/components/CustomSelect/option-map.ts +42 -0
  42. package/src/components/CustomSelect/select-option.tsx +78 -0
  43. package/src/components/CustomSelect/select.tsx +152 -0
  44. package/src/components/CustomSelect/theme.ts +45 -0
  45. package/src/components/CustomSelect/use-select-state.ts +414 -0
  46. package/src/components/CustomSelect/use-select.ts +35 -0
  47. package/src/components/FallbackToolUseRejectedMessage.tsx +15 -0
  48. package/src/components/FileEditToolUpdatedMessage.tsx +66 -0
  49. package/src/components/Help.tsx +215 -0
  50. package/src/components/HighlightedCode.tsx +33 -0
  51. package/src/components/InvalidConfigDialog.tsx +113 -0
  52. package/src/components/Link.tsx +32 -0
  53. package/src/components/LogSelector.tsx +86 -0
  54. package/src/components/Logo.tsx +145 -0
  55. package/src/components/MCPServerApprovalDialog.tsx +100 -0
  56. package/src/components/MCPServerDialogCopy.tsx +25 -0
  57. package/src/components/MCPServerMultiselectDialog.tsx +109 -0
  58. package/src/components/Message.tsx +221 -0
  59. package/src/components/MessageResponse.tsx +15 -0
  60. package/src/components/MessageSelector.tsx +211 -0
  61. package/src/components/ModeIndicator.tsx +88 -0
  62. package/src/components/ModelConfig.tsx +301 -0
  63. package/src/components/ModelListManager.tsx +227 -0
  64. package/src/components/ModelSelector.tsx +3386 -0
  65. package/src/components/ModelStatusDisplay.tsx +230 -0
  66. package/src/components/Onboarding.tsx +274 -0
  67. package/src/components/PressEnterToContinue.tsx +11 -0
  68. package/src/components/PromptInput.tsx +740 -0
  69. package/src/components/SentryErrorBoundary.ts +33 -0
  70. package/src/components/Spinner.tsx +129 -0
  71. package/src/components/StickerRequestForm.tsx +16 -0
  72. package/src/components/StructuredDiff.tsx +191 -0
  73. package/src/components/TextInput.tsx +259 -0
  74. package/src/components/TodoItem.tsx +11 -0
  75. package/src/components/TokenWarning.tsx +31 -0
  76. package/src/components/ToolUseLoader.tsx +40 -0
  77. package/src/components/TrustDialog.tsx +106 -0
  78. package/src/components/binary-feedback/BinaryFeedback.tsx +63 -0
  79. package/src/components/binary-feedback/BinaryFeedbackOption.tsx +111 -0
  80. package/src/components/binary-feedback/BinaryFeedbackView.tsx +172 -0
  81. package/src/components/binary-feedback/utils.ts +220 -0
  82. package/src/components/messages/AssistantBashOutputMessage.tsx +22 -0
  83. package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +49 -0
  84. package/src/components/messages/AssistantRedactedThinkingMessage.tsx +19 -0
  85. package/src/components/messages/AssistantTextMessage.tsx +144 -0
  86. package/src/components/messages/AssistantThinkingMessage.tsx +40 -0
  87. package/src/components/messages/AssistantToolUseMessage.tsx +133 -0
  88. package/src/components/messages/TaskProgressMessage.tsx +32 -0
  89. package/src/components/messages/TaskToolMessage.tsx +58 -0
  90. package/src/components/messages/UserBashInputMessage.tsx +28 -0
  91. package/src/components/messages/UserCommandMessage.tsx +30 -0
  92. package/src/components/messages/UserKodingInputMessage.tsx +28 -0
  93. package/src/components/messages/UserPromptMessage.tsx +35 -0
  94. package/src/components/messages/UserTextMessage.tsx +39 -0
  95. package/src/components/messages/UserToolResultMessage/UserToolCanceledMessage.tsx +12 -0
  96. package/src/components/messages/UserToolResultMessage/UserToolErrorMessage.tsx +36 -0
  97. package/src/components/messages/UserToolResultMessage/UserToolRejectMessage.tsx +31 -0
  98. package/src/components/messages/UserToolResultMessage/UserToolResultMessage.tsx +57 -0
  99. package/src/components/messages/UserToolResultMessage/UserToolSuccessMessage.tsx +35 -0
  100. package/src/components/messages/UserToolResultMessage/utils.tsx +56 -0
  101. package/src/components/permissions/BashPermissionRequest/BashPermissionRequest.tsx +121 -0
  102. package/src/components/permissions/FallbackPermissionRequest.tsx +153 -0
  103. package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +182 -0
  104. package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +77 -0
  105. package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +164 -0
  106. package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +83 -0
  107. package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +240 -0
  108. package/src/components/permissions/PermissionRequest.tsx +101 -0
  109. package/src/components/permissions/PermissionRequestTitle.tsx +69 -0
  110. package/src/components/permissions/hooks.ts +44 -0
  111. package/src/components/permissions/toolUseOptions.ts +59 -0
  112. package/src/components/permissions/utils.ts +23 -0
  113. package/src/constants/betas.ts +5 -0
  114. package/src/constants/claude-asterisk-ascii-art.tsx +238 -0
  115. package/src/constants/figures.ts +4 -0
  116. package/src/constants/keys.ts +3 -0
  117. package/src/constants/macros.ts +8 -0
  118. package/src/constants/modelCapabilities.ts +179 -0
  119. package/src/constants/models.ts +1025 -0
  120. package/src/constants/oauth.ts +18 -0
  121. package/src/constants/product.ts +17 -0
  122. package/src/constants/prompts.ts +177 -0
  123. package/src/constants/releaseNotes.ts +7 -0
  124. package/src/context/PermissionContext.tsx +149 -0
  125. package/src/context.ts +278 -0
  126. package/src/cost-tracker.ts +84 -0
  127. package/src/entrypoints/cli.tsx +1518 -0
  128. package/src/entrypoints/mcp.ts +176 -0
  129. package/src/history.ts +25 -0
  130. package/src/hooks/useApiKeyVerification.ts +59 -0
  131. package/src/hooks/useArrowKeyHistory.ts +55 -0
  132. package/src/hooks/useCanUseTool.ts +138 -0
  133. package/src/hooks/useCancelRequest.ts +39 -0
  134. package/src/hooks/useDoublePress.ts +42 -0
  135. package/src/hooks/useExitOnCtrlCD.ts +31 -0
  136. package/src/hooks/useInterval.ts +25 -0
  137. package/src/hooks/useLogMessages.ts +16 -0
  138. package/src/hooks/useLogStartupTime.ts +12 -0
  139. package/src/hooks/useNotifyAfterTimeout.ts +65 -0
  140. package/src/hooks/usePermissionRequestLogging.ts +44 -0
  141. package/src/hooks/useTerminalSize.ts +49 -0
  142. package/src/hooks/useTextInput.ts +318 -0
  143. package/src/hooks/useUnifiedCompletion.ts +1404 -0
  144. package/src/messages.ts +38 -0
  145. package/src/permissions.ts +268 -0
  146. package/src/query.ts +707 -0
  147. package/src/screens/ConfigureNpmPrefix.tsx +197 -0
  148. package/src/screens/Doctor.tsx +219 -0
  149. package/src/screens/LogList.tsx +68 -0
  150. package/src/screens/REPL.tsx +798 -0
  151. package/src/screens/ResumeConversation.tsx +68 -0
  152. package/src/services/adapters/base.ts +38 -0
  153. package/src/services/adapters/chatCompletions.ts +90 -0
  154. package/src/services/adapters/responsesAPI.ts +170 -0
  155. package/src/services/browserMocks.ts +66 -0
  156. package/src/services/claude.ts +2083 -0
  157. package/src/services/customCommands.ts +704 -0
  158. package/src/services/fileFreshness.ts +377 -0
  159. package/src/services/gpt5ConnectionTest.ts +340 -0
  160. package/src/services/mcpClient.ts +564 -0
  161. package/src/services/mcpServerApproval.tsx +50 -0
  162. package/src/services/mentionProcessor.ts +273 -0
  163. package/src/services/modelAdapterFactory.ts +69 -0
  164. package/src/services/notifier.ts +40 -0
  165. package/src/services/oauth.ts +357 -0
  166. package/src/services/openai.ts +1305 -0
  167. package/src/services/responseStateManager.ts +90 -0
  168. package/src/services/sentry.ts +3 -0
  169. package/src/services/statsig.ts +171 -0
  170. package/src/services/statsigStorage.ts +86 -0
  171. package/src/services/systemReminder.ts +507 -0
  172. package/src/services/vcr.ts +161 -0
  173. package/src/test/testAdapters.ts +96 -0
  174. package/src/tools/ArchitectTool/ArchitectTool.tsx +122 -0
  175. package/src/tools/ArchitectTool/prompt.ts +15 -0
  176. package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +569 -0
  177. package/src/tools/BashTool/BashTool.tsx +243 -0
  178. package/src/tools/BashTool/BashToolResultMessage.tsx +38 -0
  179. package/src/tools/BashTool/OutputLine.tsx +49 -0
  180. package/src/tools/BashTool/prompt.ts +174 -0
  181. package/src/tools/BashTool/utils.ts +56 -0
  182. package/src/tools/FileEditTool/FileEditTool.tsx +315 -0
  183. package/src/tools/FileEditTool/prompt.ts +51 -0
  184. package/src/tools/FileEditTool/utils.ts +58 -0
  185. package/src/tools/FileReadTool/FileReadTool.tsx +404 -0
  186. package/src/tools/FileReadTool/prompt.ts +7 -0
  187. package/src/tools/FileWriteTool/FileWriteTool.tsx +297 -0
  188. package/src/tools/FileWriteTool/prompt.ts +10 -0
  189. package/src/tools/GlobTool/GlobTool.tsx +119 -0
  190. package/src/tools/GlobTool/prompt.ts +8 -0
  191. package/src/tools/GrepTool/GrepTool.tsx +147 -0
  192. package/src/tools/GrepTool/prompt.ts +11 -0
  193. package/src/tools/MCPTool/MCPTool.tsx +107 -0
  194. package/src/tools/MCPTool/prompt.ts +3 -0
  195. package/src/tools/MemoryReadTool/MemoryReadTool.tsx +127 -0
  196. package/src/tools/MemoryReadTool/prompt.ts +3 -0
  197. package/src/tools/MemoryWriteTool/MemoryWriteTool.tsx +89 -0
  198. package/src/tools/MemoryWriteTool/prompt.ts +3 -0
  199. package/src/tools/MultiEditTool/MultiEditTool.tsx +366 -0
  200. package/src/tools/MultiEditTool/prompt.ts +45 -0
  201. package/src/tools/NotebookEditTool/NotebookEditTool.tsx +298 -0
  202. package/src/tools/NotebookEditTool/prompt.ts +3 -0
  203. package/src/tools/NotebookReadTool/NotebookReadTool.tsx +258 -0
  204. package/src/tools/NotebookReadTool/prompt.ts +3 -0
  205. package/src/tools/StickerRequestTool/StickerRequestTool.tsx +93 -0
  206. package/src/tools/StickerRequestTool/prompt.ts +19 -0
  207. package/src/tools/TaskTool/TaskTool.tsx +466 -0
  208. package/src/tools/TaskTool/constants.ts +1 -0
  209. package/src/tools/TaskTool/prompt.ts +92 -0
  210. package/src/tools/ThinkTool/ThinkTool.tsx +54 -0
  211. package/src/tools/ThinkTool/prompt.ts +12 -0
  212. package/src/tools/TodoWriteTool/TodoWriteTool.tsx +290 -0
  213. package/src/tools/TodoWriteTool/prompt.ts +63 -0
  214. package/src/tools/lsTool/lsTool.tsx +272 -0
  215. package/src/tools/lsTool/prompt.ts +2 -0
  216. package/src/tools.ts +63 -0
  217. package/src/types/PermissionMode.ts +120 -0
  218. package/src/types/RequestContext.ts +72 -0
  219. package/src/types/conversation.ts +51 -0
  220. package/src/types/logs.ts +58 -0
  221. package/src/types/modelCapabilities.ts +64 -0
  222. package/src/types/notebook.ts +87 -0
  223. package/src/utils/Cursor.ts +436 -0
  224. package/src/utils/PersistentShell.ts +373 -0
  225. package/src/utils/advancedFuzzyMatcher.ts +290 -0
  226. package/src/utils/agentLoader.ts +284 -0
  227. package/src/utils/agentStorage.ts +97 -0
  228. package/src/utils/array.ts +3 -0
  229. package/src/utils/ask.tsx +99 -0
  230. package/src/utils/auth.ts +13 -0
  231. package/src/utils/autoCompactCore.ts +223 -0
  232. package/src/utils/autoUpdater.ts +318 -0
  233. package/src/utils/betas.ts +20 -0
  234. package/src/utils/browser.ts +14 -0
  235. package/src/utils/cleanup.ts +72 -0
  236. package/src/utils/commands.ts +261 -0
  237. package/src/utils/commonUnixCommands.ts +161 -0
  238. package/src/utils/config.ts +942 -0
  239. package/src/utils/conversationRecovery.ts +55 -0
  240. package/src/utils/debugLogger.ts +1123 -0
  241. package/src/utils/diff.ts +42 -0
  242. package/src/utils/env.ts +57 -0
  243. package/src/utils/errors.ts +21 -0
  244. package/src/utils/exampleCommands.ts +109 -0
  245. package/src/utils/execFileNoThrow.ts +51 -0
  246. package/src/utils/expertChatStorage.ts +136 -0
  247. package/src/utils/file.ts +402 -0
  248. package/src/utils/fileRecoveryCore.ts +71 -0
  249. package/src/utils/format.tsx +44 -0
  250. package/src/utils/fuzzyMatcher.ts +328 -0
  251. package/src/utils/generators.ts +62 -0
  252. package/src/utils/git.ts +92 -0
  253. package/src/utils/globalLogger.ts +77 -0
  254. package/src/utils/http.ts +10 -0
  255. package/src/utils/imagePaste.ts +38 -0
  256. package/src/utils/json.ts +13 -0
  257. package/src/utils/log.ts +382 -0
  258. package/src/utils/markdown.ts +213 -0
  259. package/src/utils/messageContextManager.ts +289 -0
  260. package/src/utils/messages.tsx +939 -0
  261. package/src/utils/model.ts +836 -0
  262. package/src/utils/permissions/filesystem.ts +118 -0
  263. package/src/utils/responseState.ts +23 -0
  264. package/src/utils/ripgrep.ts +167 -0
  265. package/src/utils/secureFile.ts +559 -0
  266. package/src/utils/sessionState.ts +49 -0
  267. package/src/utils/state.ts +25 -0
  268. package/src/utils/style.ts +29 -0
  269. package/src/utils/terminal.ts +50 -0
  270. package/src/utils/theme.ts +133 -0
  271. package/src/utils/thinking.ts +144 -0
  272. package/src/utils/todoStorage.ts +431 -0
  273. package/src/utils/tokens.ts +43 -0
  274. package/src/utils/toolExecutionController.ts +163 -0
  275. package/src/utils/unaryLogging.ts +26 -0
  276. package/src/utils/user.ts +37 -0
  277. package/src/utils/validate.ts +165 -0
  278. package/cli.mjs +0 -1803
@@ -0,0 +1,3386 @@
1
+ import React, { useState, useEffect, useCallback, useRef } from 'react'
2
+ import { Box, Text, useInput } from 'ink'
3
+ import { getTheme } from '../utils/theme'
4
+ import { Select } from './CustomSelect/select'
5
+ import { Newline } from 'ink'
6
+ import { getModelManager } from '../utils/model'
7
+
8
+ // 共享的屏幕容器组件,避免重复边框
9
+ function ScreenContainer({
10
+ title,
11
+ exitState,
12
+ children,
13
+ }: {
14
+ title: string
15
+ exitState: { pending: boolean; keyName: string }
16
+ children: React.ReactNode
17
+ }) {
18
+ const theme = getTheme()
19
+ return (
20
+ <Box
21
+ flexDirection="column"
22
+ gap={1}
23
+ borderStyle="round"
24
+ borderColor={theme.secondaryBorder}
25
+ paddingX={2}
26
+ paddingY={1}
27
+ >
28
+ <Text bold>
29
+ {title}{' '}
30
+ {exitState.pending ? `(press ${exitState.keyName} again to exit)` : ''}
31
+ </Text>
32
+ {children}
33
+ </Box>
34
+ )
35
+ }
36
+ import { PRODUCT_NAME } from '../constants/product'
37
+ import { useExitOnCtrlCD } from '../hooks/useExitOnCtrlCD'
38
+ import {
39
+ getGlobalConfig,
40
+ saveGlobalConfig,
41
+ ProviderType,
42
+ ModelPointerType,
43
+ setAllPointersToModel,
44
+ setModelPointer,
45
+ } from '../utils/config.js'
46
+ import models, { providers } from '../constants/models'
47
+ import TextInput from './TextInput'
48
+ import OpenAI from 'openai'
49
+ import chalk from 'chalk'
50
+ import { fetchAnthropicModels, verifyApiKey } from '../services/claude'
51
+ import { fetchCustomModels, getModelFeatures } from '../services/openai'
52
+ import { testGPT5Connection, validateGPT5Config } from '../services/gpt5ConnectionTest'
53
+ type Props = {
54
+ onDone: () => void
55
+ abortController?: AbortController
56
+ targetPointer?: ModelPointerType // NEW: Target pointer for configuration
57
+ isOnboarding?: boolean // NEW: Whether this is first-time setup
58
+ onCancel?: () => void // NEW: Cancel callback (different from onDone)
59
+ skipModelType?: boolean // NEW: Skip model type selection
60
+ }
61
+
62
+ type ModelInfo = {
63
+ model: string
64
+ provider: string
65
+ [key: string]: any
66
+ }
67
+
68
+ // Define reasoning effort options
69
+ type ReasoningEffortOption = 'low' | 'medium' | 'high'
70
+
71
+ // Define context length options (in tokens)
72
+ type ContextLengthOption = {
73
+ label: string
74
+ value: number
75
+ }
76
+
77
+ const CONTEXT_LENGTH_OPTIONS: ContextLengthOption[] = [
78
+ { label: '32K tokens', value: 32000 },
79
+ { label: '64K tokens', value: 64000 },
80
+ { label: '128K tokens', value: 128000 },
81
+ { label: '200K tokens', value: 200000 },
82
+ { label: '256K tokens', value: 256000 },
83
+ { label: '300K tokens', value: 300000 },
84
+ { label: '512K tokens', value: 512000 },
85
+ { label: '1000K tokens', value: 1000000 },
86
+ { label: '2000K tokens', value: 2000000 },
87
+ { label: '3000K tokens', value: 3000000 },
88
+ { label: '5000K tokens', value: 5000000 },
89
+ { label: '10000K tokens', value: 10000000 },
90
+ ]
91
+
92
+ const DEFAULT_CONTEXT_LENGTH = 128000
93
+
94
+ // Define max tokens options
95
+ type MaxTokensOption = {
96
+ label: string
97
+ value: number
98
+ }
99
+
100
+ const MAX_TOKENS_OPTIONS: MaxTokensOption[] = [
101
+ { label: '1K tokens', value: 1024 },
102
+ { label: '2K tokens', value: 2048 },
103
+ { label: '4K tokens', value: 4096 },
104
+ { label: '8K tokens (recommended)', value: 8192 },
105
+ { label: '16K tokens', value: 16384 },
106
+ { label: '32K tokens', value: 32768 },
107
+ { label: '64K tokens', value: 65536 },
108
+ { label: '128K tokens', value: 131072 },
109
+ ]
110
+
111
+ const DEFAULT_MAX_TOKENS = 8192
112
+
113
+ // Custom hook to handle Escape key navigation
114
+ function useEscapeNavigation(
115
+ onEscape: () => void,
116
+ abortController?: AbortController,
117
+ ) {
118
+ // Use a ref to track if we've handled the escape key
119
+ const handledRef = useRef(false)
120
+
121
+ useInput(
122
+ (input, key) => {
123
+ if (key.escape && !handledRef.current) {
124
+ handledRef.current = true
125
+ // Reset after a short delay to allow for multiple escapes
126
+ setTimeout(() => {
127
+ handledRef.current = false
128
+ }, 100)
129
+ onEscape()
130
+ }
131
+ },
132
+ { isActive: true },
133
+ )
134
+ }
135
+
136
+ function printModelConfig() {
137
+ const config = getGlobalConfig()
138
+ // Only show ModelProfile information - no legacy fields
139
+ const modelProfiles = config.modelProfiles || []
140
+ const activeProfiles = modelProfiles.filter(p => p.isActive)
141
+
142
+ if (activeProfiles.length === 0) {
143
+ console.log(chalk.gray(' ⎿ No active model profiles configured'))
144
+ return
145
+ }
146
+
147
+ const profileSummary = activeProfiles
148
+ .map(p => `${p.name} (${p.provider}: ${p.modelName})`)
149
+ .join(' | ')
150
+ console.log(chalk.gray(` ⎿ ${profileSummary}`))
151
+ }
152
+
153
+ export function ModelSelector({
154
+ onDone: onDoneProp,
155
+ abortController,
156
+ targetPointer,
157
+ isOnboarding = false,
158
+ onCancel,
159
+ skipModelType = false,
160
+ }: Props): React.ReactNode {
161
+ const config = getGlobalConfig()
162
+ const theme = getTheme()
163
+ const onDone = () => {
164
+ printModelConfig()
165
+ onDoneProp()
166
+ }
167
+ // Initialize the exit hook but don't use it for Escape key
168
+ const exitState = useExitOnCtrlCD(() => process.exit(0))
169
+
170
+ // Always start with provider selection in new system
171
+ const getInitialScreen = (): string => {
172
+ return 'provider'
173
+ }
174
+
175
+ // Screen navigation stack
176
+ const [screenStack, setScreenStack] = useState<
177
+ Array<
178
+ | 'provider'
179
+ | 'anthropicSubMenu'
180
+ | 'apiKey'
181
+ | 'resourceName'
182
+ | 'baseUrl'
183
+ | 'model'
184
+ | 'modelInput'
185
+ | 'modelParams'
186
+ | 'contextLength'
187
+ | 'connectionTest'
188
+ | 'confirmation'
189
+ >
190
+ >([getInitialScreen()])
191
+
192
+ // Current screen is always the last item in the stack
193
+ const currentScreen = screenStack[screenStack.length - 1]
194
+
195
+ // Function to navigate to a new screen
196
+ const navigateTo = (
197
+ screen:
198
+ | 'provider'
199
+ | 'anthropicSubMenu'
200
+ | 'apiKey'
201
+ | 'resourceName'
202
+ | 'baseUrl'
203
+ | 'model'
204
+ | 'modelInput'
205
+ | 'modelParams'
206
+ | 'contextLength'
207
+ | 'connectionTest'
208
+ | 'confirmation',
209
+ ) => {
210
+ setScreenStack(prev => [...prev, screen])
211
+ }
212
+
213
+ // Function to go back to the previous screen
214
+ const goBack = () => {
215
+ if (screenStack.length > 1) {
216
+ // Remove the current screen from the stack
217
+ setScreenStack(prev => prev.slice(0, -1))
218
+ } else {
219
+ // If we're at the first screen, call onDone to exit
220
+ onDone()
221
+ }
222
+ }
223
+
224
+ // State for model configuration
225
+ const [selectedProvider, setSelectedProvider] = useState<ProviderType>(
226
+ config.primaryProvider ?? 'anthropic',
227
+ )
228
+
229
+ // State for Anthropic provider sub-menu
230
+ const [anthropicProviderType, setAnthropicProviderType] = useState<
231
+ 'official' | 'bigdream' | 'opendev' | 'custom'
232
+ >('official')
233
+ const [selectedModel, setSelectedModel] = useState<string>('')
234
+ const [apiKey, setApiKey] = useState<string>('')
235
+
236
+ // New state for model parameters
237
+ const [maxTokens, setMaxTokens] = useState<string>(
238
+ config.maxTokens?.toString() || DEFAULT_MAX_TOKENS.toString(),
239
+ )
240
+ const [maxTokensMode, setMaxTokensMode] = useState<'preset' | 'custom'>(
241
+ 'preset',
242
+ )
243
+ const [selectedMaxTokensPreset, setSelectedMaxTokensPreset] =
244
+ useState<number>(config.maxTokens || DEFAULT_MAX_TOKENS)
245
+ const [reasoningEffort, setReasoningEffort] =
246
+ useState<ReasoningEffortOption>('medium')
247
+ const [supportsReasoningEffort, setSupportsReasoningEffort] =
248
+ useState<boolean>(false)
249
+
250
+ // Context length state (use default instead of legacy config)
251
+ const [contextLength, setContextLength] = useState<number>(
252
+ DEFAULT_CONTEXT_LENGTH,
253
+ )
254
+
255
+ // Form focus state
256
+ const [activeFieldIndex, setActiveFieldIndex] = useState(0)
257
+ const [maxTokensCursorOffset, setMaxTokensCursorOffset] = useState<number>(0)
258
+
259
+ // UI state
260
+
261
+ // Search and model loading state
262
+ const [availableModels, setAvailableModels] = useState<ModelInfo[]>([])
263
+ const [isLoadingModels, setIsLoadingModels] = useState(false)
264
+ const [modelLoadError, setModelLoadError] = useState<string | null>(null)
265
+ const [modelSearchQuery, setModelSearchQuery] = useState<string>('')
266
+ const [modelSearchCursorOffset, setModelSearchCursorOffset] =
267
+ useState<number>(0)
268
+ const [cursorOffset, setCursorOffset] = useState<number>(0)
269
+ const [apiKeyEdited, setApiKeyEdited] = useState<boolean>(false)
270
+
271
+ // Retry logic state
272
+ const [fetchRetryCount, setFetchRetryCount] = useState<number>(0)
273
+ const [isRetrying, setIsRetrying] = useState<boolean>(false)
274
+
275
+ // Connection test state
276
+ const [isTestingConnection, setIsTestingConnection] = useState<boolean>(false)
277
+ const [connectionTestResult, setConnectionTestResult] = useState<{
278
+ success: boolean
279
+ message: string
280
+ endpoint?: string
281
+ details?: string
282
+ } | null>(null)
283
+
284
+ // Validation error state for duplicate model detection
285
+ const [validationError, setValidationError] = useState<string | null>(null)
286
+
287
+ // State for Azure-specific configuration
288
+ const [resourceName, setResourceName] = useState<string>('')
289
+ const [resourceNameCursorOffset, setResourceNameCursorOffset] =
290
+ useState<number>(0)
291
+ const [customModelName, setCustomModelName] = useState<string>('')
292
+ const [customModelNameCursorOffset, setCustomModelNameCursorOffset] =
293
+ useState<number>(0)
294
+
295
+ // State for Ollama-specific configuration
296
+ const [ollamaBaseUrl, setOllamaBaseUrl] = useState<string>(
297
+ 'http://localhost:11434/v1',
298
+ )
299
+ const [ollamaBaseUrlCursorOffset, setOllamaBaseUrlCursorOffset] =
300
+ useState<number>(0)
301
+
302
+ // State for custom OpenAI-compatible API configuration
303
+ const [customBaseUrl, setCustomBaseUrl] = useState<string>('')
304
+ const [customBaseUrlCursorOffset, setCustomBaseUrlCursorOffset] =
305
+ useState<number>(0)
306
+
307
+ // State for provider base URL configuration (used for all providers)
308
+ const [providerBaseUrl, setProviderBaseUrl] = useState<string>('')
309
+ const [providerBaseUrlCursorOffset, setProviderBaseUrlCursorOffset] =
310
+ useState<number>(0)
311
+
312
+ // Reasoning effort options
313
+ const reasoningEffortOptions = [
314
+ { label: 'Low - Faster responses, less thorough reasoning', value: 'low' },
315
+ { label: 'Medium - Balanced speed and reasoning depth', value: 'medium' },
316
+ {
317
+ label: 'High - Slower responses, more thorough reasoning',
318
+ value: 'high',
319
+ },
320
+ ]
321
+
322
+ // Get available providers from models.ts, excluding community Claude providers (now in Anthropic sub-menu)
323
+ const availableProviders = Object.keys(providers).filter(
324
+ provider => provider !== 'bigdream' && provider !== 'opendev',
325
+ )
326
+
327
+ // Create provider options with nice labels
328
+ const providerOptions = availableProviders.map(provider => {
329
+ const modelCount = models[provider]?.length || 0
330
+ const label = getProviderLabel(provider, modelCount)
331
+ return {
332
+ label,
333
+ value: provider,
334
+ }
335
+ })
336
+
337
+ useEffect(() => {
338
+ if (!apiKeyEdited && selectedProvider) {
339
+ if (process.env[selectedProvider.toUpperCase() + '_API_KEY']) {
340
+ setApiKey(
341
+ process.env[selectedProvider.toUpperCase() + '_API_KEY'] as string,
342
+ )
343
+ } else {
344
+ setApiKey('')
345
+ }
346
+ }
347
+ }, [selectedProvider, apiKey, apiKeyEdited])
348
+
349
+ // Ensure contextLength is always set to a valid option when contextLength screen is displayed
350
+ useEffect(() => {
351
+ if (
352
+ currentScreen === 'contextLength' &&
353
+ !CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)
354
+ ) {
355
+ setContextLength(DEFAULT_CONTEXT_LENGTH)
356
+ }
357
+ }, [currentScreen, contextLength])
358
+
359
+ // Create a set of model names from our constants/models.ts for the current provider
360
+ const ourModelNames = new Set(
361
+ (models[selectedProvider as keyof typeof models] || []).map(
362
+ (model: any) => model.model,
363
+ ),
364
+ )
365
+
366
+ // Create model options from available models, filtered by search query
367
+ const filteredModels = modelSearchQuery
368
+ ? availableModels.filter(model =>
369
+ model.model?.toLowerCase().includes(modelSearchQuery.toLowerCase()),
370
+ )
371
+ : availableModels
372
+
373
+ // Sort models with priority for specific keywords
374
+ const sortModelsByPriority = (models: ModelInfo[]) => {
375
+ const priorityKeywords = [
376
+ 'claude',
377
+ 'kimi',
378
+ 'deepseek',
379
+ 'minimax',
380
+ 'o3',
381
+ 'gpt',
382
+ 'qwen',
383
+ ]
384
+
385
+ return models.sort((a, b) => {
386
+ // Add safety checks for undefined model names
387
+ const aModelLower = a.model?.toLowerCase() || ''
388
+ const bModelLower = b.model?.toLowerCase() || ''
389
+
390
+ // Check if models contain priority keywords
391
+ const aHasPriority = priorityKeywords.some(keyword =>
392
+ aModelLower.includes(keyword),
393
+ )
394
+ const bHasPriority = priorityKeywords.some(keyword =>
395
+ bModelLower.includes(keyword),
396
+ )
397
+
398
+ // If one has priority and the other doesn't, prioritize the one with keywords
399
+ if (aHasPriority && !bHasPriority) return -1
400
+ if (!aHasPriority && bHasPriority) return 1
401
+
402
+ // If both have priority or neither has priority, sort alphabetically
403
+ return a.model.localeCompare(b.model)
404
+ })
405
+ }
406
+
407
+ const sortedFilteredModels = sortModelsByPriority(filteredModels)
408
+
409
+ const modelOptions = sortedFilteredModels.map(model => {
410
+ // Check if this model is in our constants/models.ts list
411
+ const isInOurModels = ourModelNames.has(model.model)
412
+
413
+ return {
414
+ label: `${model.model}${getModelDetails(model)}`,
415
+ value: model.model,
416
+ }
417
+ })
418
+
419
+ function getModelDetails(model: ModelInfo): string {
420
+ const details = []
421
+
422
+ if (model.max_tokens) {
423
+ details.push(`${formatNumber(model.max_tokens)} tokens`)
424
+ }
425
+
426
+ if (model.supports_vision) {
427
+ details.push('vision')
428
+ }
429
+
430
+ if (model.supports_function_calling) {
431
+ details.push('tools')
432
+ }
433
+
434
+ return details.length > 0 ? ` (${details.join(', ')})` : ''
435
+ }
436
+
437
+ function formatNumber(num: number): string {
438
+ if (num >= 1000000) {
439
+ return `${(num / 1000000).toFixed(1)}M`
440
+ } else if (num >= 1000) {
441
+ return `${(num / 1000).toFixed(0)}K`
442
+ }
443
+ return num.toString()
444
+ }
445
+
446
+ function getProviderLabel(provider: string, modelCount: number): string {
447
+ // Use provider names from the providers object if available
448
+ if (providers[provider]) {
449
+ return `${providers[provider].name} ${providers[provider].status === 'wip' ? '(WIP)' : ''} (${modelCount} models)`
450
+ }
451
+ return `${provider}`
452
+ }
453
+
454
+ function handleProviderSelection(provider: string) {
455
+ const providerType = provider as ProviderType
456
+ setSelectedProvider(providerType)
457
+
458
+ if (provider === 'custom') {
459
+ // For custom provider, save and exit
460
+ saveConfiguration(providerType, selectedModel || '')
461
+ onDone()
462
+ } else if (provider === 'anthropic') {
463
+ // For Anthropic provider, go to sub-menu to choose between official, community proxies, or custom
464
+ navigateTo('anthropicSubMenu')
465
+ } else {
466
+ // For all other providers, go to base URL configuration first
467
+ // Initialize with the default base URL for the provider
468
+ const defaultBaseUrl = providers[providerType]?.baseURL || ''
469
+ setProviderBaseUrl(defaultBaseUrl)
470
+ navigateTo('baseUrl')
471
+ }
472
+ }
473
+
474
+ // Local implementation of fetchAnthropicModels for UI
475
+ async function fetchAnthropicModels(baseURL: string, apiKey: string) {
476
+ try {
477
+ const response = await fetch(`${baseURL}/v1/models`, {
478
+ method: 'GET',
479
+ headers: {
480
+ 'x-api-key': apiKey,
481
+ 'anthropic-version': '2023-06-01',
482
+ 'Content-Type': 'application/json',
483
+ },
484
+ })
485
+
486
+ if (!response.ok) {
487
+ if (response.status === 401) {
488
+ throw new Error(
489
+ 'Invalid API key. Please check your API key and try again.',
490
+ )
491
+ } else if (response.status === 403) {
492
+ throw new Error('API key does not have permission to access models.')
493
+ } else if (response.status === 404) {
494
+ throw new Error(
495
+ 'API endpoint not found. This provider may not support model listing.',
496
+ )
497
+ } else if (response.status === 429) {
498
+ throw new Error(
499
+ 'Too many requests. Please wait a moment and try again.',
500
+ )
501
+ } else if (response.status >= 500) {
502
+ throw new Error(
503
+ 'API service is temporarily unavailable. Please try again later.',
504
+ )
505
+ } else {
506
+ throw new Error(`Unable to connect to API (${response.status}).`)
507
+ }
508
+ }
509
+
510
+ const data = await response.json()
511
+
512
+ // Handle different response formats
513
+ let models = []
514
+ if (data && data.data && Array.isArray(data.data)) {
515
+ models = data.data
516
+ } else if (Array.isArray(data)) {
517
+ models = data
518
+ } else if (data && data.models && Array.isArray(data.models)) {
519
+ models = data.models
520
+ } else {
521
+ throw new Error('API returned unexpected response format.')
522
+ }
523
+
524
+ return models
525
+ } catch (error) {
526
+ if (
527
+ error instanceof Error &&
528
+ (error.message.includes('API key') ||
529
+ error.message.includes('API endpoint') ||
530
+ error.message.includes('API service') ||
531
+ error.message.includes('response format'))
532
+ ) {
533
+ throw error
534
+ }
535
+
536
+ if (error instanceof Error && error.message.includes('fetch')) {
537
+ throw new Error(
538
+ 'Unable to connect to the API. Please check the base URL and your internet connection.',
539
+ )
540
+ }
541
+
542
+ throw new Error(
543
+ 'Failed to fetch models from API. Please check your configuration and try again.',
544
+ )
545
+ }
546
+ }
547
+
548
+ // 通用的Anthropic兼容模型获取函数,实现三层降级策略
549
+ async function fetchAnthropicCompatibleModelsWithFallback(
550
+ baseURL: string,
551
+ provider: string,
552
+ apiKeyUrl: string,
553
+ ) {
554
+ let lastError: Error | null = null
555
+
556
+ // 第一层:尝试使用 Anthropic 风格的 API
557
+ try {
558
+ const models = await fetchAnthropicModels(baseURL, apiKey)
559
+ return models.map((model: any) => ({
560
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
561
+ provider: provider,
562
+ max_tokens: model.max_tokens || 8192,
563
+ supports_vision: model.supports_vision || true,
564
+ supports_function_calling: model.supports_function_calling || true,
565
+ supports_reasoning_effort: false,
566
+ }))
567
+ } catch (error) {
568
+ lastError = error as Error
569
+ console.log(
570
+ `Anthropic API failed for ${provider}, trying OpenAI format:`,
571
+ error,
572
+ )
573
+ }
574
+
575
+ // 第二层:尝试使用 OpenAI 风格的 API
576
+ try {
577
+ const models = await fetchCustomModels(baseURL, apiKey)
578
+ return models.map((model: any) => ({
579
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
580
+ provider: provider,
581
+ max_tokens: model.max_tokens || 8192,
582
+ supports_vision: model.supports_vision || false,
583
+ supports_function_calling: model.supports_function_calling || true,
584
+ supports_reasoning_effort: false,
585
+ }))
586
+ } catch (error) {
587
+ lastError = error as Error
588
+ console.log(
589
+ `OpenAI API failed for ${provider}, falling back to manual input:`,
590
+ error,
591
+ )
592
+ }
593
+
594
+ // 第三层:抛出错误,触发手动输入模式
595
+ let errorMessage = `Failed to fetch ${provider} models using both Anthropic and OpenAI API formats`
596
+
597
+ if (lastError) {
598
+ errorMessage = lastError.message
599
+ }
600
+
601
+ // 添加有用的建议
602
+ if (errorMessage.includes('API key')) {
603
+ errorMessage += `\n\n💡 Tip: Get your API key from ${apiKeyUrl}`
604
+ } else if (errorMessage.includes('permission')) {
605
+ errorMessage += `\n\n💡 Tip: Make sure your API key has access to the ${provider} API`
606
+ } else if (errorMessage.includes('connection')) {
607
+ errorMessage += '\n\n💡 Tip: Check your internet connection and try again'
608
+ }
609
+
610
+ setModelLoadError(errorMessage)
611
+ throw new Error(errorMessage)
612
+ }
613
+
614
+ // 统一处理所有Anthropic兼容提供商的模型获取
615
+ async function fetchAnthropicCompatibleProviderModels() {
616
+ // 根据anthropicProviderType确定默认baseURL和API key获取地址
617
+ let defaultBaseURL: string
618
+ let apiKeyUrl: string
619
+ let actualProvider: string
620
+
621
+ switch (anthropicProviderType) {
622
+ case 'official':
623
+ defaultBaseURL = 'https://api.anthropic.com'
624
+ apiKeyUrl = 'https://console.anthropic.com/settings/keys'
625
+ actualProvider = 'anthropic'
626
+ break
627
+ case 'bigdream':
628
+ defaultBaseURL = 'https://api-key.info'
629
+ apiKeyUrl = 'https://api-key.info/register?aff=MSl4'
630
+ actualProvider = 'bigdream'
631
+ break
632
+ case 'opendev':
633
+ defaultBaseURL = 'https://api.openai-next.com'
634
+ apiKeyUrl = 'https://api.openai-next.com/register/?aff_code=4xo7'
635
+ actualProvider = 'opendev'
636
+ break
637
+ case 'custom':
638
+ defaultBaseURL = providerBaseUrl
639
+ apiKeyUrl = 'your custom API provider'
640
+ actualProvider = 'anthropic'
641
+ break
642
+ default:
643
+ throw new Error(
644
+ `Unsupported Anthropic provider type: ${anthropicProviderType}`,
645
+ )
646
+ }
647
+
648
+ const baseURL =
649
+ anthropicProviderType === 'custom'
650
+ ? providerBaseUrl
651
+ : providerBaseUrl || defaultBaseURL
652
+ return await fetchAnthropicCompatibleModelsWithFallback(
653
+ baseURL,
654
+ actualProvider,
655
+ apiKeyUrl,
656
+ )
657
+ }
658
+
659
+ // Remove duplicate function definitions - using unified fetchAnthropicCompatibleProviderModels instead
660
+
661
+ async function fetchKimiModels() {
662
+ try {
663
+ const baseURL = providerBaseUrl || 'https://api.moonshot.cn/v1'
664
+ const models = await fetchCustomModels(baseURL, apiKey)
665
+
666
+ const kimiModels = models.map((model: any) => ({
667
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
668
+ provider: 'kimi',
669
+ max_tokens: model.max_tokens || 8192,
670
+ supports_vision: false, // Default to false, could be enhanced
671
+ supports_function_calling: true,
672
+ supports_reasoning_effort: false,
673
+ }))
674
+
675
+ return kimiModels
676
+ } catch (error) {
677
+ let errorMessage = 'Failed to fetch Kimi models'
678
+
679
+ if (error instanceof Error) {
680
+ errorMessage = error.message
681
+ }
682
+
683
+ // Add helpful suggestions based on error type
684
+ if (errorMessage.includes('API key')) {
685
+ errorMessage +=
686
+ '\n\n💡 Tip: Get your API key from https://platform.moonshot.cn/console/api-keys'
687
+ } else if (errorMessage.includes('permission')) {
688
+ errorMessage +=
689
+ '\n\n💡 Tip: Make sure your API key has access to the Kimi API'
690
+ } else if (errorMessage.includes('connection')) {
691
+ errorMessage +=
692
+ '\n\n💡 Tip: Check your internet connection and try again'
693
+ }
694
+
695
+ setModelLoadError(errorMessage)
696
+ throw error
697
+ }
698
+ }
699
+
700
+ async function fetchDeepSeekModels() {
701
+ try {
702
+ const baseURL = providerBaseUrl || 'https://api.deepseek.com'
703
+ const models = await fetchCustomModels(baseURL, apiKey)
704
+
705
+ const deepseekModels = models.map((model: any) => ({
706
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
707
+ provider: 'deepseek',
708
+ max_tokens: model.max_tokens || 8192,
709
+ supports_vision: false, // Default to false, could be enhanced
710
+ supports_function_calling: true,
711
+ supports_reasoning_effort: false,
712
+ }))
713
+
714
+ return deepseekModels
715
+ } catch (error) {
716
+ let errorMessage = 'Failed to fetch DeepSeek models'
717
+
718
+ if (error instanceof Error) {
719
+ errorMessage = error.message
720
+ }
721
+
722
+ // Add helpful suggestions based on error type
723
+ if (errorMessage.includes('API key')) {
724
+ errorMessage +=
725
+ '\n\n💡 Tip: Get your API key from https://platform.deepseek.com/api_keys'
726
+ } else if (errorMessage.includes('permission')) {
727
+ errorMessage +=
728
+ '\n\n💡 Tip: Make sure your API key has access to the DeepSeek API'
729
+ } else if (errorMessage.includes('connection')) {
730
+ errorMessage +=
731
+ '\n\n💡 Tip: Check your internet connection and try again'
732
+ }
733
+
734
+ setModelLoadError(errorMessage)
735
+ throw error
736
+ }
737
+ }
738
+
739
+ async function fetchSiliconFlowModels() {
740
+ try {
741
+ const baseURL = providerBaseUrl || 'https://api.siliconflow.cn/v1'
742
+ const models = await fetchCustomModels(baseURL, apiKey)
743
+
744
+ const siliconflowModels = models.map((model: any) => ({
745
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
746
+ provider: 'siliconflow',
747
+ max_tokens: model.max_tokens || 8192,
748
+ supports_vision: false, // Default to false, could be enhanced
749
+ supports_function_calling: true,
750
+ supports_reasoning_effort: false,
751
+ }))
752
+
753
+ return siliconflowModels
754
+ } catch (error) {
755
+ let errorMessage = 'Failed to fetch SiliconFlow models'
756
+
757
+ if (error instanceof Error) {
758
+ errorMessage = error.message
759
+ }
760
+
761
+ // Add helpful suggestions based on error type
762
+ if (errorMessage.includes('API key')) {
763
+ errorMessage +=
764
+ '\n\n💡 Tip: Get your API key from https://cloud.siliconflow.cn/i/oJWsm6io'
765
+ } else if (errorMessage.includes('permission')) {
766
+ errorMessage +=
767
+ '\n\n💡 Tip: Make sure your API key has access to the SiliconFlow API'
768
+ } else if (errorMessage.includes('connection')) {
769
+ errorMessage +=
770
+ '\n\n💡 Tip: Check your internet connection and try again'
771
+ }
772
+
773
+ setModelLoadError(errorMessage)
774
+ throw error
775
+ }
776
+ }
777
+
778
+ async function fetchQwenModels() {
779
+ try {
780
+ const baseURL =
781
+ providerBaseUrl || 'https://dashscope.aliyuncs.com/compatible-mode/v1'
782
+ const models = await fetchCustomModels(baseURL, apiKey)
783
+
784
+ const qwenModels = models.map((model: any) => ({
785
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
786
+ provider: 'qwen',
787
+ max_tokens: model.max_tokens || 8192,
788
+ supports_vision: false,
789
+ supports_function_calling: true,
790
+ supports_reasoning_effort: false,
791
+ }))
792
+
793
+ return qwenModels
794
+ } catch (error) {
795
+ let errorMessage = 'Failed to fetch Qwen models'
796
+
797
+ if (error instanceof Error) {
798
+ errorMessage = error.message
799
+ }
800
+
801
+ if (errorMessage.includes('API key')) {
802
+ errorMessage +=
803
+ '\n\n💡 Tip: Get your API key from https://bailian.console.aliyun.com/?tab=model#/api-key'
804
+ } else if (errorMessage.includes('permission')) {
805
+ errorMessage +=
806
+ '\n\n💡 Tip: Make sure your API key has access to the Qwen API'
807
+ } else if (errorMessage.includes('connection')) {
808
+ errorMessage +=
809
+ '\n\n💡 Tip: Check your internet connection and try again'
810
+ }
811
+
812
+ setModelLoadError(errorMessage)
813
+ throw error
814
+ }
815
+ }
816
+
817
+ async function fetchGLMModels() {
818
+ try {
819
+ const baseURL = providerBaseUrl || 'https://open.bigmodel.cn/api/paas/v4'
820
+ const models = await fetchCustomModels(baseURL, apiKey)
821
+
822
+ const glmModels = models.map((model: any) => ({
823
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
824
+ provider: 'glm',
825
+ max_tokens: model.max_tokens || 8192,
826
+ supports_vision: false,
827
+ supports_function_calling: true,
828
+ supports_reasoning_effort: false,
829
+ }))
830
+
831
+ return glmModels
832
+ } catch (error) {
833
+ let errorMessage = 'Failed to fetch GLM models'
834
+
835
+ if (error instanceof Error) {
836
+ errorMessage = error.message
837
+ }
838
+
839
+ if (errorMessage.includes('API key')) {
840
+ errorMessage +=
841
+ '\n\n💡 Tip: Get your API key from https://open.bigmodel.cn (API Keys section)'
842
+ } else if (errorMessage.includes('permission')) {
843
+ errorMessage +=
844
+ '\n\n💡 Tip: Make sure your API key has access to the GLM API'
845
+ } else if (errorMessage.includes('connection')) {
846
+ errorMessage +=
847
+ '\n\n💡 Tip: Check your internet connection and try again'
848
+ }
849
+
850
+ setModelLoadError(errorMessage)
851
+ throw error
852
+ }
853
+ }
854
+
855
+ async function fetchMinimaxModels() {
856
+ try {
857
+ const baseURL = providerBaseUrl || 'https://api.minimaxi.com/v1'
858
+ const models = await fetchCustomModels(baseURL, apiKey)
859
+
860
+ const minimaxModels = models.map((model: any) => ({
861
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
862
+ provider: 'minimax',
863
+ max_tokens: model.max_tokens || 8192,
864
+ supports_vision: false,
865
+ supports_function_calling: true,
866
+ supports_reasoning_effort: false,
867
+ }))
868
+
869
+ return minimaxModels
870
+ } catch (error) {
871
+ let errorMessage = 'Failed to fetch MiniMax models'
872
+
873
+ if (error instanceof Error) {
874
+ errorMessage = error.message
875
+ }
876
+
877
+ if (errorMessage.includes('API key')) {
878
+ errorMessage +=
879
+ '\n\n💡 Tip: Get your API key from https://www.minimax.io/platform/user-center/basic-information'
880
+ } else if (errorMessage.includes('permission')) {
881
+ errorMessage +=
882
+ '\n\n💡 Tip: Make sure your API key has access to the MiniMax API'
883
+ } else if (errorMessage.includes('connection')) {
884
+ errorMessage +=
885
+ '\n\n💡 Tip: Check your internet connection and try again'
886
+ }
887
+
888
+ setModelLoadError(errorMessage)
889
+ throw error
890
+ }
891
+ }
892
+
893
+ async function fetchBaiduQianfanModels() {
894
+ try {
895
+ const baseURL = providerBaseUrl || 'https://qianfan.baidubce.com/v2'
896
+ const models = await fetchCustomModels(baseURL, apiKey)
897
+
898
+ const baiduModels = models.map((model: any) => ({
899
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
900
+ provider: 'baidu-qianfan',
901
+ max_tokens: model.max_tokens || 8192,
902
+ supports_vision: false,
903
+ supports_function_calling: true,
904
+ supports_reasoning_effort: false,
905
+ }))
906
+
907
+ return baiduModels
908
+ } catch (error) {
909
+ let errorMessage = 'Failed to fetch Baidu Qianfan models'
910
+
911
+ if (error instanceof Error) {
912
+ errorMessage = error.message
913
+ }
914
+
915
+ if (errorMessage.includes('API key')) {
916
+ errorMessage +=
917
+ '\n\n💡 Tip: Get your API key from https://console.bce.baidu.com/iam/#/iam/accesslist'
918
+ } else if (errorMessage.includes('permission')) {
919
+ errorMessage +=
920
+ '\n\n💡 Tip: Make sure your API key has access to the Baidu Qianfan API'
921
+ } else if (errorMessage.includes('connection')) {
922
+ errorMessage +=
923
+ '\n\n💡 Tip: Check your internet connection and try again'
924
+ }
925
+
926
+ setModelLoadError(errorMessage)
927
+ throw error
928
+ }
929
+ }
930
+
931
+ async function fetchCustomOpenAIModels() {
932
+ try {
933
+ const models = await fetchCustomModels(customBaseUrl, apiKey)
934
+
935
+ const customModels = models.map((model: any) => ({
936
+ model: model.modelName || model.id || model.name || model.model || 'unknown',
937
+ provider: 'custom-openai',
938
+ max_tokens: model.max_tokens || 4096,
939
+ supports_vision: false, // Default to false, could be enhanced
940
+ supports_function_calling: true,
941
+ supports_reasoning_effort: false,
942
+ }))
943
+
944
+ return customModels
945
+ } catch (error) {
946
+ let errorMessage = 'Failed to fetch custom API models'
947
+
948
+ if (error instanceof Error) {
949
+ errorMessage = error.message
950
+ }
951
+
952
+ // Add helpful suggestions based on error type
953
+ if (errorMessage.includes('API key')) {
954
+ errorMessage +=
955
+ '\n\n💡 Tip: Check that your API key is valid for this endpoint'
956
+ } else if (errorMessage.includes('endpoint not found')) {
957
+ errorMessage +=
958
+ '\n\n💡 Tip: Make sure the base URL ends with /v1 and supports OpenAI-compatible API'
959
+ } else if (errorMessage.includes('connect')) {
960
+ errorMessage +=
961
+ '\n\n💡 Tip: Verify the base URL is correct and accessible'
962
+ } else if (errorMessage.includes('response format')) {
963
+ errorMessage +=
964
+ '\n\n💡 Tip: This API may not be fully OpenAI-compatible'
965
+ }
966
+
967
+ setModelLoadError(errorMessage)
968
+ throw error
969
+ }
970
+ }
971
+
972
+ async function fetchGeminiModels() {
973
+ try {
974
+ const response = await fetch(
975
+ `https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`,
976
+ )
977
+
978
+ if (!response.ok) {
979
+ const errorData = await response.json()
980
+ throw new Error(
981
+ errorData.error?.message || `API error: ${response.status}`,
982
+ )
983
+ }
984
+
985
+ const { models } = await response.json()
986
+
987
+ const geminiModels = models
988
+ .filter((model: any) =>
989
+ model.supportedGenerationMethods.includes('generateContent'),
990
+ )
991
+ .map((model: any) => ({
992
+ model: model.name.replace('models/', ''),
993
+ provider: 'gemini',
994
+ max_tokens: model.outputTokenLimit,
995
+ supports_vision:
996
+ model.supportedGenerationMethods.includes('generateContent'),
997
+ supports_function_calling:
998
+ model.supportedGenerationMethods.includes('generateContent'),
999
+ }))
1000
+
1001
+ return geminiModels
1002
+ } catch (error) {
1003
+ setModelLoadError(
1004
+ error instanceof Error ? error.message : 'Unknown error',
1005
+ )
1006
+ throw error
1007
+ }
1008
+ }
1009
+
1010
+ async function fetchOllamaModels() {
1011
+ try {
1012
+ const response = await fetch(`${ollamaBaseUrl}/models`)
1013
+
1014
+ if (!response.ok) {
1015
+ throw new Error(`HTTP error ${response.status}: ${response.statusText}`)
1016
+ }
1017
+
1018
+ const responseData = await response.json()
1019
+
1020
+ // Properly handle Ollama API response format
1021
+ // Ollama API can return models in different formats based on version
1022
+ let models = []
1023
+
1024
+ // Check if data field exists (newer Ollama versions)
1025
+ if (responseData.data && Array.isArray(responseData.data)) {
1026
+ models = responseData.data
1027
+ }
1028
+ // Check if models array is directly at the root (older Ollama versions)
1029
+ else if (Array.isArray(responseData.models)) {
1030
+ models = responseData.models
1031
+ }
1032
+ // If response is already an array
1033
+ else if (Array.isArray(responseData)) {
1034
+ models = responseData
1035
+ } else {
1036
+ throw new Error(
1037
+ 'Invalid response from Ollama API: missing models array',
1038
+ )
1039
+ }
1040
+
1041
+ // Transform Ollama models to our format
1042
+ const ollamaModels = models.map((model: any) => ({
1043
+ model:
1044
+ model.name ??
1045
+ model.modelName ??
1046
+ (typeof model === 'string' ? model : ''),
1047
+ provider: 'ollama',
1048
+ max_tokens: 4096, // Default value
1049
+ supports_vision: false,
1050
+ supports_function_calling: true,
1051
+ supports_reasoning_effort: false,
1052
+ }))
1053
+
1054
+ // Filter out models with empty names
1055
+ const validModels = ollamaModels.filter(model => model.model)
1056
+
1057
+ setAvailableModels(validModels)
1058
+
1059
+ // Only navigate if we have models
1060
+ if (validModels.length > 0) {
1061
+ navigateTo('model')
1062
+ } else {
1063
+ setModelLoadError('No models found in your Ollama installation')
1064
+ }
1065
+
1066
+ return validModels
1067
+ } catch (error) {
1068
+ const errorMessage =
1069
+ error instanceof Error ? error.message : String(error)
1070
+
1071
+ if (errorMessage.includes('fetch')) {
1072
+ setModelLoadError(
1073
+ `Could not connect to Ollama server at ${ollamaBaseUrl}. Make sure Ollama is running and the URL is correct.`,
1074
+ )
1075
+ } else {
1076
+ setModelLoadError(`Error loading Ollama models: ${errorMessage}`)
1077
+ }
1078
+
1079
+ console.error('Error fetching Ollama models:', error)
1080
+ return []
1081
+ }
1082
+ }
1083
+
1084
+ async function fetchModelsWithRetry() {
1085
+ const MAX_RETRIES = 2
1086
+ let lastError: Error | null = null
1087
+
1088
+ for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
1089
+ setFetchRetryCount(attempt)
1090
+ setIsRetrying(attempt > 1)
1091
+
1092
+ if (attempt > 1) {
1093
+ // Show retry message
1094
+ setModelLoadError(
1095
+ `Attempt ${attempt}/${MAX_RETRIES}: Retrying model discovery...`,
1096
+ )
1097
+ // Wait 1 second before retrying
1098
+ await new Promise(resolve => setTimeout(resolve, 1000))
1099
+ }
1100
+
1101
+ try {
1102
+ const models = await fetchModels()
1103
+ // Success! Reset retry state and return models
1104
+ setFetchRetryCount(0)
1105
+ setIsRetrying(false)
1106
+ setModelLoadError(null)
1107
+ return models
1108
+ } catch (error) {
1109
+ lastError = error instanceof Error ? error : new Error(String(error))
1110
+ console.log(`Model fetch attempt ${attempt} failed:`, lastError.message)
1111
+
1112
+ if (attempt === MAX_RETRIES) {
1113
+ // Final attempt failed, break to handle fallback
1114
+ break
1115
+ }
1116
+ }
1117
+ }
1118
+
1119
+ // All retries failed, handle fallback to manual input
1120
+ setIsRetrying(false)
1121
+ const errorMessage = lastError?.message || 'Unknown error'
1122
+
1123
+ // Check if provider supports manual input fallback
1124
+ const supportsManualInput = [
1125
+ 'anthropic',
1126
+ 'kimi',
1127
+ 'deepseek',
1128
+ 'siliconflow',
1129
+ 'qwen',
1130
+ 'glm',
1131
+ 'minimax',
1132
+ 'baidu-qianfan',
1133
+ 'custom-openai',
1134
+ ].includes(selectedProvider)
1135
+
1136
+ if (supportsManualInput) {
1137
+ setModelLoadError(
1138
+ `Failed to auto-discover models after ${MAX_RETRIES} attempts: ${errorMessage}\n\n⚡ Automatically switching to manual model configuration...`,
1139
+ )
1140
+
1141
+ // Automatically switch to manual input after 2 seconds
1142
+ setTimeout(() => {
1143
+ setModelLoadError(null)
1144
+ navigateTo('modelInput')
1145
+ }, 2000)
1146
+ } else {
1147
+ setModelLoadError(
1148
+ `Failed to load models after ${MAX_RETRIES} attempts: ${errorMessage}`,
1149
+ )
1150
+ }
1151
+
1152
+ return []
1153
+ }
1154
+
1155
+ async function fetchModels() {
1156
+ setIsLoadingModels(true)
1157
+ setModelLoadError(null)
1158
+
1159
+ try {
1160
+ // For Anthropic provider (including official and community proxies via sub-menu), use the same logic
1161
+ if (selectedProvider === 'anthropic') {
1162
+ const anthropicModels = await fetchAnthropicCompatibleProviderModels()
1163
+ setAvailableModels(anthropicModels)
1164
+ navigateTo('model')
1165
+ return anthropicModels
1166
+ }
1167
+
1168
+ // For custom OpenAI-compatible APIs, use the fetchCustomOpenAIModels function
1169
+ if (selectedProvider === 'custom-openai') {
1170
+ const customModels = await fetchCustomOpenAIModels()
1171
+ setAvailableModels(customModels)
1172
+ navigateTo('model')
1173
+ return customModels
1174
+ }
1175
+
1176
+ // For Gemini, use the separate fetchGeminiModels function
1177
+ if (selectedProvider === 'gemini') {
1178
+ const geminiModels = await fetchGeminiModels()
1179
+ setAvailableModels(geminiModels)
1180
+ navigateTo('model')
1181
+ return geminiModels
1182
+ }
1183
+
1184
+ // For Kimi, use the fetchKimiModels function
1185
+ if (selectedProvider === 'kimi') {
1186
+ const kimiModels = await fetchKimiModels()
1187
+ setAvailableModels(kimiModels)
1188
+ navigateTo('model')
1189
+ return kimiModels
1190
+ }
1191
+
1192
+ // For DeepSeek, use the fetchDeepSeekModels function
1193
+ if (selectedProvider === 'deepseek') {
1194
+ const deepseekModels = await fetchDeepSeekModels()
1195
+ setAvailableModels(deepseekModels)
1196
+ navigateTo('model')
1197
+ return deepseekModels
1198
+ }
1199
+
1200
+ // For SiliconFlow, use the fetchSiliconFlowModels function
1201
+ if (selectedProvider === 'siliconflow') {
1202
+ const siliconflowModels = await fetchSiliconFlowModels()
1203
+ setAvailableModels(siliconflowModels)
1204
+ navigateTo('model')
1205
+ return siliconflowModels
1206
+ }
1207
+
1208
+ // For Qwen, use the fetchQwenModels function
1209
+ if (selectedProvider === 'qwen') {
1210
+ const qwenModels = await fetchQwenModels()
1211
+ setAvailableModels(qwenModels)
1212
+ navigateTo('model')
1213
+ return qwenModels
1214
+ }
1215
+
1216
+ // For GLM, use the fetchGLMModels function
1217
+ if (selectedProvider === 'glm') {
1218
+ const glmModels = await fetchGLMModels()
1219
+ setAvailableModels(glmModels)
1220
+ navigateTo('model')
1221
+ return glmModels
1222
+ }
1223
+
1224
+ // For Baidu Qianfan, use the fetchBaiduQianfanModels function
1225
+ if (selectedProvider === 'baidu-qianfan') {
1226
+ const baiduModels = await fetchBaiduQianfanModels()
1227
+ setAvailableModels(baiduModels)
1228
+ navigateTo('model')
1229
+ return baiduModels
1230
+ }
1231
+
1232
+ // For Azure, skip model fetching and go directly to model input
1233
+ if (selectedProvider === 'azure') {
1234
+ navigateTo('modelInput')
1235
+ return []
1236
+ }
1237
+
1238
+ // For all other providers, use the OpenAI client
1239
+ let baseURL = providerBaseUrl || providers[selectedProvider]?.baseURL
1240
+
1241
+ // For custom-openai provider, use the custom base URL
1242
+ if (selectedProvider === 'custom-openai') {
1243
+ baseURL = customBaseUrl
1244
+ }
1245
+
1246
+ const openai = new OpenAI({
1247
+ apiKey: apiKey || 'dummy-key-for-ollama', // Ollama doesn't need a real key
1248
+ baseURL: baseURL,
1249
+ dangerouslyAllowBrowser: true,
1250
+ })
1251
+
1252
+ // Fetch the models
1253
+ const response = await openai.models.list()
1254
+
1255
+ // Transform the response into our ModelInfo format
1256
+ const fetchedModels = []
1257
+ for (const model of response.data) {
1258
+ const modelName = (model as any).modelName || (model as any).id || (model as any).name || (model as any).model || 'unknown'
1259
+ const modelInfo = models[selectedProvider as keyof typeof models]?.find(
1260
+ m => m.model === modelName,
1261
+ )
1262
+ fetchedModels.push({
1263
+ model: modelName,
1264
+ provider: selectedProvider,
1265
+ max_tokens: modelInfo?.max_output_tokens,
1266
+ supports_vision: modelInfo?.supports_vision || false,
1267
+ supports_function_calling:
1268
+ modelInfo?.supports_function_calling || false,
1269
+ supports_reasoning_effort:
1270
+ modelInfo?.supports_reasoning_effort || false,
1271
+ })
1272
+ }
1273
+
1274
+ setAvailableModels(fetchedModels)
1275
+
1276
+ // Navigate to model selection screen if models were loaded successfully
1277
+ navigateTo('model')
1278
+
1279
+ return fetchedModels
1280
+ } catch (error) {
1281
+ // Log for debugging
1282
+ console.error('Error fetching models:', error)
1283
+
1284
+ // Re-throw the error so that fetchModelsWithRetry can handle it properly
1285
+ throw error
1286
+ } finally {
1287
+ setIsLoadingModels(false)
1288
+ }
1289
+ }
1290
+
1291
+ function handleApiKeySubmit(key: string) {
1292
+ setApiKey(key)
1293
+
1294
+ // For Azure, go to resource name input next
1295
+ if (selectedProvider === 'azure') {
1296
+ navigateTo('resourceName')
1297
+ return
1298
+ }
1299
+
1300
+ // Fetch models with the provided API key
1301
+ fetchModelsWithRetry().catch(error => {
1302
+ // The retry logic in fetchModelsWithRetry already handles the error display
1303
+ // This catch is just to prevent unhandled promise rejection
1304
+ console.error('Final error after retries:', error)
1305
+ })
1306
+ }
1307
+
1308
+ function handleResourceNameSubmit(name: string) {
1309
+ setResourceName(name)
1310
+ navigateTo('modelInput')
1311
+ }
1312
+
1313
+ function handleOllamaBaseUrlSubmit(url: string) {
1314
+ setOllamaBaseUrl(url)
1315
+ setIsLoadingModels(true)
1316
+ setModelLoadError(null)
1317
+
1318
+ // Use the dedicated Ollama model fetch function
1319
+ fetchOllamaModels().finally(() => {
1320
+ setIsLoadingModels(false)
1321
+ })
1322
+ }
1323
+
1324
+ function handleCustomBaseUrlSubmit(url: string) {
1325
+ // Automatically remove trailing slash from baseURL
1326
+ const cleanUrl = url.replace(/\/+$/, '')
1327
+ setCustomBaseUrl(cleanUrl)
1328
+ // After setting custom base URL, go to API key input
1329
+ navigateTo('apiKey')
1330
+ }
1331
+
1332
+ function handleProviderBaseUrlSubmit(url: string) {
1333
+ // Automatically remove trailing slash from baseURL
1334
+ const cleanUrl = url.replace(/\/+$/, '')
1335
+ setProviderBaseUrl(cleanUrl)
1336
+
1337
+ // For Ollama, handle differently - it tries to fetch models immediately
1338
+ if (selectedProvider === 'ollama') {
1339
+ setOllamaBaseUrl(cleanUrl)
1340
+ setIsLoadingModels(true)
1341
+ setModelLoadError(null)
1342
+
1343
+ // Use the dedicated Ollama model fetch function
1344
+ fetchOllamaModels().finally(() => {
1345
+ setIsLoadingModels(false)
1346
+ })
1347
+ } else {
1348
+ // For all other providers, go to API key input next
1349
+ navigateTo('apiKey')
1350
+ }
1351
+ }
1352
+
1353
+ function handleAnthropicProviderSelection(
1354
+ providerType: 'official' | 'bigdream' | 'custom',
1355
+ ) {
1356
+ setAnthropicProviderType(providerType)
1357
+
1358
+ if (providerType === 'custom') {
1359
+ // For custom Anthropic provider, go to base URL configuration
1360
+ setProviderBaseUrl('')
1361
+ navigateTo('baseUrl')
1362
+ } else {
1363
+ // For official/community proxy providers, set default base URL and go to API key
1364
+ const defaultUrls = {
1365
+ official: 'https://api.anthropic.com',
1366
+ bigdream: 'https://api-key.info',
1367
+ opendev: 'https://api.openai-next.com',
1368
+ }
1369
+ setProviderBaseUrl(defaultUrls[providerType])
1370
+ navigateTo('apiKey')
1371
+ }
1372
+ }
1373
+
1374
+ function handleCustomModelSubmit(model: string) {
1375
+ setCustomModelName(model)
1376
+ setSelectedModel(model)
1377
+
1378
+ // No model info available, so set default values
1379
+ setSupportsReasoningEffort(false)
1380
+ setReasoningEffort(null)
1381
+
1382
+ // Use default max tokens for manually entered models
1383
+ setMaxTokensMode('preset')
1384
+ setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1385
+ setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1386
+ setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1387
+
1388
+ // Go to model parameters screen
1389
+ navigateTo('modelParams')
1390
+ // Reset active field index
1391
+ setActiveFieldIndex(0)
1392
+ }
1393
+
1394
+ function handleModelSelection(model: string) {
1395
+ setSelectedModel(model)
1396
+
1397
+ // Check if the selected model supports reasoning_effort
1398
+ const modelInfo = availableModels.find(m => m.model === model)
1399
+ setSupportsReasoningEffort(modelInfo?.supports_reasoning_effort || false)
1400
+
1401
+ if (!modelInfo?.supports_reasoning_effort) {
1402
+ setReasoningEffort(null)
1403
+ }
1404
+
1405
+ // Set max tokens based on model info or default
1406
+ if (modelInfo?.max_tokens) {
1407
+ const modelMaxTokens = modelInfo.max_tokens
1408
+ // Check if the model's max tokens matches any of our presets
1409
+ const matchingPreset = MAX_TOKENS_OPTIONS.find(
1410
+ option => option.value === modelMaxTokens,
1411
+ )
1412
+
1413
+ if (matchingPreset) {
1414
+ setMaxTokensMode('preset')
1415
+ setSelectedMaxTokensPreset(modelMaxTokens)
1416
+ setMaxTokens(modelMaxTokens.toString())
1417
+ } else {
1418
+ setMaxTokensMode('custom')
1419
+ setMaxTokens(modelMaxTokens.toString())
1420
+ }
1421
+ setMaxTokensCursorOffset(modelMaxTokens.toString().length)
1422
+ } else {
1423
+ // No model-specific max tokens, use default
1424
+ setMaxTokensMode('preset')
1425
+ setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1426
+ setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1427
+ setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1428
+ }
1429
+
1430
+ // Go to model parameters screen
1431
+ navigateTo('modelParams')
1432
+ // Reset active field index
1433
+ setActiveFieldIndex(0)
1434
+ }
1435
+
1436
+ const handleModelParamsSubmit = () => {
1437
+ // Values are already in state, no need to extract from form
1438
+ // Ensure contextLength is set to a valid option before navigating
1439
+ if (!CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)) {
1440
+ setContextLength(DEFAULT_CONTEXT_LENGTH)
1441
+ }
1442
+ // Navigate to context length screen
1443
+ navigateTo('contextLength')
1444
+ }
1445
+
1446
+ async function testConnection(): Promise<{
1447
+ success: boolean
1448
+ message: string
1449
+ endpoint?: string
1450
+ details?: string
1451
+ }> {
1452
+ setIsTestingConnection(true)
1453
+ setConnectionTestResult(null)
1454
+
1455
+ try {
1456
+ // Determine the base URL to test
1457
+ let testBaseURL =
1458
+ providerBaseUrl || providers[selectedProvider]?.baseURL || ''
1459
+
1460
+ if (selectedProvider === 'azure') {
1461
+ testBaseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${selectedModel}`
1462
+ } else if (selectedProvider === 'custom-openai') {
1463
+ testBaseURL = customBaseUrl
1464
+ }
1465
+
1466
+ // For OpenAI-compatible providers, try multiple endpoints in order of preference
1467
+ const isOpenAICompatible = [
1468
+ 'minimax',
1469
+ 'kimi',
1470
+ 'deepseek',
1471
+ 'siliconflow',
1472
+ 'qwen',
1473
+ 'glm',
1474
+ 'baidu-qianfan',
1475
+ 'openai',
1476
+ 'mistral',
1477
+ 'xai',
1478
+ 'groq',
1479
+ 'custom-openai',
1480
+ ].includes(selectedProvider)
1481
+
1482
+ if (isOpenAICompatible) {
1483
+ // 🔥 Use specialized GPT-5 connection test for GPT-5 models
1484
+ const isGPT5 = selectedModel?.toLowerCase().includes('gpt-5')
1485
+
1486
+ if (isGPT5) {
1487
+ console.log(`🚀 Using specialized GPT-5 connection test for model: ${selectedModel}`)
1488
+
1489
+ // Validate configuration first
1490
+ const configValidation = validateGPT5Config({
1491
+ model: selectedModel,
1492
+ apiKey: apiKey,
1493
+ baseURL: testBaseURL,
1494
+ maxTokens: parseInt(maxTokens) || 8192,
1495
+ provider: selectedProvider,
1496
+ })
1497
+
1498
+ if (!configValidation.valid) {
1499
+ return {
1500
+ success: false,
1501
+ message: '❌ GPT-5 configuration validation failed',
1502
+ details: configValidation.errors.join('\n'),
1503
+ }
1504
+ }
1505
+
1506
+ // Use specialized GPT-5 test service
1507
+ const gpt5Result = await testGPT5Connection({
1508
+ model: selectedModel,
1509
+ apiKey: apiKey,
1510
+ baseURL: testBaseURL,
1511
+ maxTokens: parseInt(maxTokens) || 8192,
1512
+ provider: selectedProvider,
1513
+ })
1514
+
1515
+ return gpt5Result
1516
+ }
1517
+
1518
+ // For non-GPT-5 OpenAI-compatible models, use existing logic
1519
+ const endpointsToTry = []
1520
+
1521
+ if (selectedProvider === 'minimax') {
1522
+ endpointsToTry.push(
1523
+ {
1524
+ path: '/text/chatcompletion_v2',
1525
+ name: 'MiniMax v2 (recommended)',
1526
+ },
1527
+ { path: '/chat/completions', name: 'Standard OpenAI' },
1528
+ )
1529
+ } else {
1530
+ endpointsToTry.push({
1531
+ path: '/chat/completions',
1532
+ name: 'Standard OpenAI',
1533
+ })
1534
+ }
1535
+
1536
+ let lastError = null
1537
+ for (const endpoint of endpointsToTry) {
1538
+ try {
1539
+ const testResult = await testChatEndpoint(
1540
+ testBaseURL,
1541
+ endpoint.path,
1542
+ endpoint.name,
1543
+ )
1544
+
1545
+ if (testResult.success) {
1546
+ return testResult
1547
+ }
1548
+ lastError = testResult
1549
+ } catch (error) {
1550
+ lastError = {
1551
+ success: false,
1552
+ message: `Failed to test ${endpoint.name}`,
1553
+ endpoint: endpoint.path,
1554
+ details: error instanceof Error ? error.message : String(error),
1555
+ }
1556
+ }
1557
+ }
1558
+
1559
+ return (
1560
+ lastError || {
1561
+ success: false,
1562
+ message: 'All endpoints failed',
1563
+ details: 'No endpoints could be reached',
1564
+ }
1565
+ )
1566
+ } else {
1567
+ // For non-OpenAI providers (like Anthropic, Gemini), use different test approach
1568
+ return await testProviderSpecificEndpoint(testBaseURL)
1569
+ }
1570
+ } catch (error) {
1571
+ return {
1572
+ success: false,
1573
+ message: 'Connection test failed',
1574
+ details: error instanceof Error ? error.message : String(error),
1575
+ }
1576
+ } finally {
1577
+ setIsTestingConnection(false)
1578
+ }
1579
+ }
1580
+
1581
+ async function testChatEndpoint(
1582
+ baseURL: string,
1583
+ endpointPath: string,
1584
+ endpointName: string,
1585
+ ): Promise<{
1586
+ success: boolean
1587
+ message: string
1588
+ endpoint?: string
1589
+ details?: string
1590
+ }> {
1591
+ const testURL = `${baseURL.replace(/\/+$/, '')}${endpointPath}`
1592
+
1593
+ // Create a test message that expects a specific response
1594
+ const testPayload: any = {
1595
+ model: selectedModel,
1596
+ messages: [
1597
+ {
1598
+ role: 'user',
1599
+ content:
1600
+ 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.',
1601
+ },
1602
+ ],
1603
+ max_tokens: Math.max(parseInt(maxTokens) || 8192, 8192), // Ensure minimum 8192 tokens for connection test
1604
+ temperature: 0,
1605
+ stream: false,
1606
+ }
1607
+
1608
+ // GPT-5 parameter compatibility fix
1609
+ if (selectedModel && selectedModel.toLowerCase().includes('gpt-5')) {
1610
+ console.log(`Applying GPT-5 parameter fix for model: ${selectedModel}`)
1611
+
1612
+ // GPT-5 requires max_completion_tokens instead of max_tokens
1613
+ if (testPayload.max_tokens) {
1614
+ testPayload.max_completion_tokens = testPayload.max_tokens
1615
+ delete testPayload.max_tokens
1616
+ console.log(`Transformed max_tokens → max_completion_tokens: ${testPayload.max_completion_tokens}`)
1617
+ }
1618
+
1619
+ // GPT-5 temperature handling - ensure it's 1 or undefined
1620
+ if (testPayload.temperature !== undefined && testPayload.temperature !== 1) {
1621
+ console.log(`Adjusting temperature from ${testPayload.temperature} to 1 for GPT-5`)
1622
+ testPayload.temperature = 1
1623
+ }
1624
+ }
1625
+
1626
+ const headers: Record<string, string> = {
1627
+ 'Content-Type': 'application/json',
1628
+ }
1629
+
1630
+ // Add authorization headers
1631
+ if (selectedProvider === 'azure') {
1632
+ headers['api-key'] = apiKey
1633
+ } else {
1634
+ headers['Authorization'] = `Bearer ${apiKey}`
1635
+ }
1636
+
1637
+ try {
1638
+ const response = await fetch(testURL, {
1639
+ method: 'POST',
1640
+ headers,
1641
+ body: JSON.stringify(testPayload),
1642
+ })
1643
+
1644
+ if (response.ok) {
1645
+ const data = await response.json()
1646
+ console.log(
1647
+ '[DEBUG] Connection test response:',
1648
+ JSON.stringify(data, null, 2),
1649
+ )
1650
+
1651
+ // Check if we got a valid response with content
1652
+ let responseContent = ''
1653
+
1654
+ if (data.choices && data.choices.length > 0) {
1655
+ responseContent = data.choices[0]?.message?.content || ''
1656
+ } else if (data.reply) {
1657
+ // Handle MiniMax format
1658
+ responseContent = data.reply
1659
+ } else if (data.output) {
1660
+ // Handle other formats
1661
+ responseContent = data.output?.text || data.output || ''
1662
+ }
1663
+
1664
+ console.log('[DEBUG] Extracted response content:', responseContent)
1665
+
1666
+ // Check if response contains "YES" (case insensitive)
1667
+ const containsYes = responseContent.toLowerCase().includes('yes')
1668
+
1669
+ if (containsYes) {
1670
+ return {
1671
+ success: true,
1672
+ message: `✅ Connection test passed with ${endpointName}`,
1673
+ endpoint: endpointPath,
1674
+ details: `Model responded correctly: "${responseContent.trim()}"`,
1675
+ }
1676
+ } else {
1677
+ return {
1678
+ success: false,
1679
+ message: `⚠️ ${endpointName} connected but model response unexpected`,
1680
+ endpoint: endpointPath,
1681
+ details: `Expected "YES" but got: "${responseContent.trim() || '(empty response)'}"`,
1682
+ }
1683
+ }
1684
+ } else {
1685
+ const errorData = await response.json().catch(() => null)
1686
+ const errorMessage =
1687
+ errorData?.error?.message || errorData?.message || response.statusText
1688
+
1689
+ return {
1690
+ success: false,
1691
+ message: `❌ ${endpointName} failed (${response.status})`,
1692
+ endpoint: endpointPath,
1693
+ details: `Error: ${errorMessage}`,
1694
+ }
1695
+ }
1696
+ } catch (error) {
1697
+ return {
1698
+ success: false,
1699
+ message: `❌ ${endpointName} connection failed`,
1700
+ endpoint: endpointPath,
1701
+ details: error instanceof Error ? error.message : String(error),
1702
+ }
1703
+ }
1704
+ }
1705
+
1706
+ async function testResponsesEndpoint(
1707
+ baseURL: string,
1708
+ endpointPath: string,
1709
+ endpointName: string,
1710
+ ): Promise<{
1711
+ success: boolean
1712
+ message: string
1713
+ endpoint?: string
1714
+ details?: string
1715
+ }> {
1716
+ const testURL = `${baseURL.replace(/\/+$/, '')}${endpointPath}`
1717
+
1718
+ // 🔧 Enhanced GPT-5 Responses API test payload
1719
+ const testPayload: any = {
1720
+ model: selectedModel,
1721
+ input: [
1722
+ {
1723
+ role: 'user',
1724
+ content:
1725
+ 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.',
1726
+ },
1727
+ ],
1728
+ max_completion_tokens: Math.max(parseInt(maxTokens) || 8192, 8192),
1729
+ temperature: 1, // GPT-5 only supports temperature=1
1730
+ // 🚀 Add reasoning configuration for better GPT-5 performance
1731
+ reasoning: {
1732
+ effort: 'low', // Fast response for connection test
1733
+ },
1734
+ }
1735
+
1736
+ console.log(`🔧 Testing GPT-5 Responses API for model: ${selectedModel}`)
1737
+ console.log(`🔧 Test URL: ${testURL}`)
1738
+ console.log(`🔧 Test payload:`, JSON.stringify(testPayload, null, 2))
1739
+
1740
+ const headers: Record<string, string> = {
1741
+ 'Content-Type': 'application/json',
1742
+ 'Authorization': `Bearer ${apiKey}`,
1743
+ }
1744
+
1745
+ try {
1746
+ const response = await fetch(testURL, {
1747
+ method: 'POST',
1748
+ headers,
1749
+ body: JSON.stringify(testPayload),
1750
+ })
1751
+
1752
+ if (response.ok) {
1753
+ const data = await response.json()
1754
+ console.log(
1755
+ '[DEBUG] Responses API connection test response:',
1756
+ JSON.stringify(data, null, 2),
1757
+ )
1758
+
1759
+ // Extract content from Responses API format
1760
+ let responseContent = ''
1761
+
1762
+ if (data.output_text) {
1763
+ responseContent = data.output_text
1764
+ } else if (data.output) {
1765
+ responseContent = typeof data.output === 'string' ? data.output : data.output.text || ''
1766
+ }
1767
+
1768
+ console.log('[DEBUG] Extracted response content:', responseContent)
1769
+
1770
+ // Check if response contains "YES" (case insensitive)
1771
+ const containsYes = responseContent.toLowerCase().includes('yes')
1772
+
1773
+ if (containsYes) {
1774
+ return {
1775
+ success: true,
1776
+ message: `✅ Connection test passed with ${endpointName}`,
1777
+ endpoint: endpointPath,
1778
+ details: `GPT-5 responded correctly via Responses API: "${responseContent.trim()}"`,
1779
+ }
1780
+ } else {
1781
+ return {
1782
+ success: false,
1783
+ message: `⚠️ ${endpointName} connected but model response unexpected`,
1784
+ endpoint: endpointPath,
1785
+ details: `Expected "YES" but got: "${responseContent.trim() || '(empty response)'}"`,
1786
+ }
1787
+ }
1788
+ } else {
1789
+ // 🔧 Enhanced error handling with detailed debugging
1790
+ const errorData = await response.json().catch(() => null)
1791
+ const errorMessage =
1792
+ errorData?.error?.message || errorData?.message || response.statusText
1793
+
1794
+ console.log(`🚨 GPT-5 Responses API Error (${response.status}):`, errorData)
1795
+
1796
+ // 🔧 Provide specific guidance for common GPT-5 errors
1797
+ let details = `Responses API Error: ${errorMessage}`
1798
+ if (response.status === 400 && errorMessage.includes('max_tokens')) {
1799
+ details += '\n🔧 Note: This appears to be a parameter compatibility issue. The fallback to Chat Completions should handle this.'
1800
+ } else if (response.status === 404) {
1801
+ details += '\n🔧 Note: Responses API endpoint may not be available for this model or provider.'
1802
+ } else if (response.status === 401) {
1803
+ details += '\n🔧 Note: API key authentication failed.'
1804
+ }
1805
+
1806
+ return {
1807
+ success: false,
1808
+ message: `❌ ${endpointName} failed (${response.status})`,
1809
+ endpoint: endpointPath,
1810
+ details: details,
1811
+ }
1812
+ }
1813
+ } catch (error) {
1814
+ return {
1815
+ success: false,
1816
+ message: `❌ ${endpointName} connection failed`,
1817
+ endpoint: endpointPath,
1818
+ details: error instanceof Error ? error.message : String(error),
1819
+ }
1820
+ }
1821
+ }
1822
+
1823
+ async function testProviderSpecificEndpoint(baseURL: string): Promise<{
1824
+ success: boolean
1825
+ message: string
1826
+ endpoint?: string
1827
+ details?: string
1828
+ }> {
1829
+ // For Anthropic and Anthropic-compatible providers, use the official SDK for testing
1830
+ if (selectedProvider === 'anthropic' || selectedProvider === 'bigdream') {
1831
+ try {
1832
+ console.log(
1833
+ `[DEBUG] Testing ${selectedProvider} connection using official Anthropic SDK...`,
1834
+ )
1835
+
1836
+ // Determine the baseURL for testing
1837
+ let testBaseURL: string | undefined = undefined
1838
+ if (selectedProvider === 'bigdream') {
1839
+ testBaseURL = baseURL || 'https://api-key.info'
1840
+ } else if (selectedProvider === 'anthropic') {
1841
+ // For anthropic, use user-provided baseURL if available, otherwise undefined (official API)
1842
+ testBaseURL =
1843
+ baseURL && baseURL !== 'https://api.anthropic.com'
1844
+ ? baseURL
1845
+ : undefined
1846
+ }
1847
+
1848
+ // Use the verifyApiKey function which uses the official Anthropic SDK
1849
+ const isValid = await verifyApiKey(apiKey, testBaseURL)
1850
+
1851
+ if (isValid) {
1852
+ return {
1853
+ success: true,
1854
+ message: `✅ ${selectedProvider} connection test passed`,
1855
+ endpoint: '/messages',
1856
+ details: 'API key verified using official Anthropic SDK',
1857
+ }
1858
+ } else {
1859
+ return {
1860
+ success: false,
1861
+ message: `❌ ${selectedProvider} API key verification failed`,
1862
+ endpoint: '/messages',
1863
+ details:
1864
+ 'Invalid API key. Please check your API key and try again.',
1865
+ }
1866
+ }
1867
+ } catch (error) {
1868
+ console.log(`[DEBUG] ${selectedProvider} connection test error:`, error)
1869
+ return {
1870
+ success: false,
1871
+ message: `❌ ${selectedProvider} connection failed`,
1872
+ endpoint: '/messages',
1873
+ details: error instanceof Error ? error.message : String(error),
1874
+ }
1875
+ }
1876
+ }
1877
+
1878
+ // For other providers, return a placeholder success (we can extend this later)
1879
+ return {
1880
+ success: true,
1881
+ message: `✅ Configuration saved for ${selectedProvider}`,
1882
+ details: 'Provider-specific testing not implemented yet',
1883
+ }
1884
+ }
1885
+
1886
+ async function handleConnectionTest() {
1887
+ const result = await testConnection()
1888
+ setConnectionTestResult(result)
1889
+
1890
+ if (result.success) {
1891
+ // Auto-advance to confirmation after a short delay
1892
+ setTimeout(() => {
1893
+ navigateTo('confirmation')
1894
+ }, 2000)
1895
+ }
1896
+ }
1897
+
1898
+ const handleContextLengthSubmit = () => {
1899
+ // Context length value is already in state
1900
+ // Navigate to connection test screen
1901
+ navigateTo('connectionTest')
1902
+ }
1903
+
1904
+ async function saveConfiguration(
1905
+ provider: ProviderType,
1906
+ model: string,
1907
+ ): Promise<string | null> {
1908
+ let baseURL = providerBaseUrl || providers[provider]?.baseURL || ''
1909
+ let actualProvider = provider
1910
+
1911
+ // For Anthropic provider, determine the actual provider based on sub-menu selection
1912
+ if (provider === 'anthropic') {
1913
+ switch (anthropicProviderType) {
1914
+ case 'official':
1915
+ actualProvider = 'anthropic'
1916
+ baseURL = baseURL || 'https://api.anthropic.com'
1917
+ break
1918
+ case 'bigdream':
1919
+ actualProvider = 'bigdream'
1920
+ baseURL = baseURL || 'https://api-key.info'
1921
+ break
1922
+ case 'custom':
1923
+ actualProvider = 'anthropic' // Use anthropic for custom endpoints
1924
+ // baseURL is already set from user input
1925
+ break
1926
+ }
1927
+ }
1928
+
1929
+ // For Azure, construct the baseURL using the resource name
1930
+ if (provider === 'azure') {
1931
+ baseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${model}`
1932
+ }
1933
+ // For custom OpenAI-compatible API, use the custom base URL
1934
+ else if (provider === 'custom-openai') {
1935
+ baseURL = customBaseUrl
1936
+ }
1937
+
1938
+ try {
1939
+ // Use ModelManager's addModel method for duplicate validation
1940
+ const modelManager = getModelManager()
1941
+
1942
+ const modelConfig = {
1943
+ name: `${actualProvider} ${model}`,
1944
+ provider: actualProvider,
1945
+ modelName: model,
1946
+ baseURL: baseURL,
1947
+ apiKey: apiKey || '',
1948
+ maxTokens: parseInt(maxTokens) || DEFAULT_MAX_TOKENS,
1949
+ contextLength: contextLength || DEFAULT_CONTEXT_LENGTH,
1950
+ reasoningEffort,
1951
+ }
1952
+
1953
+ // addModel method will throw error if duplicate exists
1954
+ return await modelManager.addModel(modelConfig)
1955
+ } catch (error) {
1956
+ // Validation failed - show error to user
1957
+ setValidationError(
1958
+ error instanceof Error ? error.message : 'Failed to add model',
1959
+ )
1960
+ return null
1961
+ }
1962
+ }
1963
+
1964
+ async function handleConfirmation() {
1965
+ // Clear any previous validation errors
1966
+ setValidationError(null)
1967
+
1968
+ // Save the configuration and exit
1969
+ const modelId = await saveConfiguration(selectedProvider, selectedModel)
1970
+
1971
+ // If validation failed (modelId is null), don't proceed
1972
+ if (!modelId) {
1973
+ return // Error is already set in saveConfiguration
1974
+ }
1975
+
1976
+ // Handle model pointer assignment for new system
1977
+ if (modelId && (isOnboarding || targetPointer)) {
1978
+ if (isOnboarding) {
1979
+ // First-time setup: set all pointers to this model
1980
+ setAllPointersToModel(modelId)
1981
+ } else if (targetPointer) {
1982
+ // Specific pointer configuration: only set target pointer
1983
+ setModelPointer(targetPointer, modelId)
1984
+ }
1985
+ }
1986
+
1987
+ onDone()
1988
+ }
1989
+
1990
+ // Handle back navigation based on current screen
1991
+ const handleBack = () => {
1992
+ if (currentScreen === 'provider') {
1993
+ // If we're at the first screen, exit
1994
+ if (onCancel) {
1995
+ onCancel()
1996
+ } else {
1997
+ onDone()
1998
+ }
1999
+ } else {
2000
+ // Remove the current screen from the stack
2001
+ setScreenStack(prev => prev.slice(0, -1))
2002
+ }
2003
+ }
2004
+
2005
+ // Use escape navigation hook
2006
+ useEscapeNavigation(handleBack, abortController)
2007
+
2008
+ // Handle cursor offset changes
2009
+ function handleCursorOffsetChange(offset: number) {
2010
+ setCursorOffset(offset)
2011
+ }
2012
+
2013
+ // Handle API key changes
2014
+ function handleApiKeyChange(value: string) {
2015
+ setApiKeyEdited(true)
2016
+ setApiKey(value)
2017
+ }
2018
+
2019
+ // Handle model search query changes
2020
+ function handleModelSearchChange(value: string) {
2021
+ setModelSearchQuery(value)
2022
+ // Update cursor position to end of text when typing
2023
+ setModelSearchCursorOffset(value.length)
2024
+ }
2025
+
2026
+ // Handle model search cursor offset changes
2027
+ function handleModelSearchCursorOffsetChange(offset: number) {
2028
+ setModelSearchCursorOffset(offset)
2029
+ }
2030
+
2031
+ // Handle input for Resource Name screen
2032
+ useInput((input, key) => {
2033
+ // Handle API key submission on Enter
2034
+ if (currentScreen === 'apiKey' && key.return) {
2035
+ if (apiKey) {
2036
+ handleApiKeySubmit(apiKey)
2037
+ }
2038
+ return
2039
+ }
2040
+
2041
+ if (currentScreen === 'apiKey' && key.tab) {
2042
+ // For providers that support manual model input, skip to manual model input
2043
+ if (
2044
+ selectedProvider === 'anthropic' ||
2045
+ selectedProvider === 'kimi' ||
2046
+ selectedProvider === 'deepseek' ||
2047
+ selectedProvider === 'qwen' ||
2048
+ selectedProvider === 'glm' ||
2049
+ selectedProvider === 'minimax' ||
2050
+ selectedProvider === 'baidu-qianfan' ||
2051
+ selectedProvider === 'siliconflow' ||
2052
+ selectedProvider === 'custom-openai'
2053
+ ) {
2054
+ navigateTo('modelInput')
2055
+ return
2056
+ }
2057
+
2058
+ // For other providers, try to fetch models without API key
2059
+ fetchModelsWithRetry().catch(error => {
2060
+ // The retry logic in fetchModelsWithRetry already handles the error display
2061
+ // This catch is just to prevent unhandled promise rejection
2062
+ console.error('Final error after retries:', error)
2063
+ })
2064
+ return
2065
+ }
2066
+
2067
+ // Handle Resource Name submission on Enter
2068
+ if (currentScreen === 'resourceName' && key.return) {
2069
+ if (resourceName) {
2070
+ handleResourceNameSubmit(resourceName)
2071
+ }
2072
+ return
2073
+ }
2074
+
2075
+ // Handle Base URL submission on Enter
2076
+ if (currentScreen === 'baseUrl' && key.return) {
2077
+ if (selectedProvider === 'custom-openai') {
2078
+ handleCustomBaseUrlSubmit(customBaseUrl)
2079
+ } else {
2080
+ // For all other providers (including ollama), use the general handler
2081
+ handleProviderBaseUrlSubmit(providerBaseUrl)
2082
+ }
2083
+ return
2084
+ }
2085
+
2086
+ // Handle Custom Model Name submission on Enter
2087
+ if (currentScreen === 'modelInput' && key.return) {
2088
+ if (customModelName) {
2089
+ handleCustomModelSubmit(customModelName)
2090
+ }
2091
+ return
2092
+ }
2093
+
2094
+ // Handle confirmation on Enter
2095
+ if (currentScreen === 'confirmation' && key.return) {
2096
+ handleConfirmation().catch(error => {
2097
+ console.error('Error in handleConfirmation:', error)
2098
+ setValidationError(
2099
+ error instanceof Error ? error.message : 'Unexpected error occurred',
2100
+ )
2101
+ })
2102
+ return
2103
+ }
2104
+
2105
+ // Handle connection test
2106
+ if (currentScreen === 'connectionTest') {
2107
+ if (key.return) {
2108
+ if (!isTestingConnection && !connectionTestResult) {
2109
+ handleConnectionTest()
2110
+ } else if (connectionTestResult && connectionTestResult.success) {
2111
+ navigateTo('confirmation')
2112
+ } else if (connectionTestResult && !connectionTestResult.success) {
2113
+ // Retry the test
2114
+ handleConnectionTest()
2115
+ }
2116
+ return
2117
+ }
2118
+ }
2119
+
2120
+ // Handle context length selection
2121
+ if (currentScreen === 'contextLength') {
2122
+ if (key.return) {
2123
+ handleContextLengthSubmit()
2124
+ return
2125
+ }
2126
+
2127
+ if (key.upArrow) {
2128
+ const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
2129
+ opt => opt.value === contextLength,
2130
+ )
2131
+ const newIndex =
2132
+ currentIndex > 0
2133
+ ? currentIndex - 1
2134
+ : currentIndex === -1
2135
+ ? CONTEXT_LENGTH_OPTIONS.findIndex(
2136
+ opt => opt.value === DEFAULT_CONTEXT_LENGTH,
2137
+ ) || 0
2138
+ : CONTEXT_LENGTH_OPTIONS.length - 1
2139
+ setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
2140
+ return
2141
+ }
2142
+
2143
+ if (key.downArrow) {
2144
+ const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
2145
+ opt => opt.value === contextLength,
2146
+ )
2147
+ const newIndex =
2148
+ currentIndex === -1
2149
+ ? CONTEXT_LENGTH_OPTIONS.findIndex(
2150
+ opt => opt.value === DEFAULT_CONTEXT_LENGTH,
2151
+ ) || 0
2152
+ : (currentIndex + 1) % CONTEXT_LENGTH_OPTIONS.length
2153
+ setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
2154
+ return
2155
+ }
2156
+ }
2157
+
2158
+ // Handle paste event (Ctrl+V or Cmd+V)
2159
+ if (
2160
+ currentScreen === 'apiKey' &&
2161
+ ((key.ctrl && input === 'v') || (key.meta && input === 'v'))
2162
+ ) {
2163
+ // We can't directly access clipboard in terminal, but we can show a message
2164
+ setModelLoadError(
2165
+ "Please use your terminal's paste functionality or type the API key manually",
2166
+ )
2167
+ return
2168
+ }
2169
+
2170
+ // Handle Tab key for form navigation in model params screen
2171
+ if (currentScreen === 'modelParams' && key.tab) {
2172
+ const formFields = getFormFieldsForModelParams()
2173
+ // Move to next field
2174
+ setActiveFieldIndex(current => (current + 1) % formFields.length)
2175
+ return
2176
+ }
2177
+
2178
+ // Handle Enter key for form submission in model params screen
2179
+ if (currentScreen === 'modelParams' && key.return) {
2180
+ const formFields = getFormFieldsForModelParams()
2181
+ const currentField = formFields[activeFieldIndex]
2182
+
2183
+ if (
2184
+ currentField.name === 'submit' ||
2185
+ activeFieldIndex === formFields.length - 1
2186
+ ) {
2187
+ // If on the Continue button, submit the form
2188
+ handleModelParamsSubmit()
2189
+ } else if (currentField.component === 'select') {
2190
+ // For select fields, move to the next field (since selection should be handled by Select component)
2191
+ setActiveFieldIndex(current =>
2192
+ Math.min(current + 1, formFields.length - 1),
2193
+ )
2194
+ }
2195
+ return
2196
+ }
2197
+ })
2198
+
2199
+ // Helper function to get form fields for model params
2200
+ function getFormFieldsForModelParams() {
2201
+ return [
2202
+ {
2203
+ name: 'maxTokens',
2204
+ label: 'Maximum Tokens',
2205
+ description: 'Select the maximum number of tokens to generate.',
2206
+ value: parseInt(maxTokens),
2207
+ component: 'select',
2208
+ options: MAX_TOKENS_OPTIONS.map(option => ({
2209
+ label: option.label,
2210
+ value: option.value.toString(),
2211
+ })),
2212
+ defaultValue: maxTokens,
2213
+ },
2214
+ ...(supportsReasoningEffort
2215
+ ? [
2216
+ {
2217
+ name: 'reasoningEffort',
2218
+ label: 'Reasoning Effort',
2219
+ description: 'Controls reasoning depth for complex problems.',
2220
+ value: reasoningEffort,
2221
+ component: 'select',
2222
+ },
2223
+ ]
2224
+ : []),
2225
+ {
2226
+ name: 'submit',
2227
+ label: 'Continue →',
2228
+ component: 'button',
2229
+ },
2230
+ ]
2231
+ }
2232
+
2233
+ // Render API Key Input Screen
2234
+ if (currentScreen === 'apiKey') {
2235
+ const modelTypeText = 'this model profile'
2236
+
2237
+ return (
2238
+ <Box flexDirection="column" gap={1}>
2239
+ <Box
2240
+ flexDirection="column"
2241
+ gap={1}
2242
+ borderStyle="round"
2243
+ borderColor={theme.secondaryBorder}
2244
+ paddingX={2}
2245
+ paddingY={1}
2246
+ >
2247
+ <Text bold>
2248
+ API Key Setup{' '}
2249
+ {exitState.pending
2250
+ ? `(press ${exitState.keyName} again to exit)`
2251
+ : ''}
2252
+ </Text>
2253
+ <Box flexDirection="column" gap={1}>
2254
+ <Text bold>
2255
+ Enter your {getProviderLabel(selectedProvider, 0).split(' (')[0]}{' '}
2256
+ API key for {modelTypeText}:
2257
+ </Text>
2258
+ <Box flexDirection="column" width={70}>
2259
+ <Text color={theme.secondaryText}>
2260
+ This key will be stored locally and used to access the{' '}
2261
+ {selectedProvider} API.
2262
+ <Newline />
2263
+ Your key is never sent to our servers.
2264
+ <Newline />
2265
+ <Newline />
2266
+ {selectedProvider === 'kimi' && (
2267
+ <>
2268
+ 💡 Get your API key from:{' '}
2269
+ <Text color={theme.suggestion}>
2270
+ https://platform.moonshot.cn/console/api-keys
2271
+ </Text>
2272
+ </>
2273
+ )}
2274
+ {selectedProvider === 'deepseek' && (
2275
+ <>
2276
+ 💡 Get your API key from:{' '}
2277
+ <Text color={theme.suggestion}>
2278
+ https://platform.deepseek.com/api_keys
2279
+ </Text>
2280
+ </>
2281
+ )}
2282
+ {selectedProvider === 'siliconflow' && (
2283
+ <>
2284
+ 💡 Get your API key from:{' '}
2285
+ <Text color={theme.suggestion}>
2286
+ https://cloud.siliconflow.cn/i/oJWsm6io
2287
+ </Text>
2288
+ </>
2289
+ )}
2290
+ {selectedProvider === 'qwen' && (
2291
+ <>
2292
+ 💡 Get your API key from:{' '}
2293
+ <Text color={theme.suggestion}>
2294
+ https://bailian.console.aliyun.com/?tab=model#/api-key
2295
+ </Text>
2296
+ </>
2297
+ )}
2298
+ {selectedProvider === 'glm' && (
2299
+ <>
2300
+ 💡 Get your API key from:{' '}
2301
+ <Text color={theme.suggestion}>
2302
+ https://open.bigmodel.cn (API Keys section)
2303
+ </Text>
2304
+ </>
2305
+ )}
2306
+ {selectedProvider === 'minimax' && (
2307
+ <>
2308
+ 💡 Get your API key from:{' '}
2309
+ <Text color={theme.suggestion}>
2310
+ https://www.minimax.io/platform/user-center/basic-information
2311
+ </Text>
2312
+ </>
2313
+ )}
2314
+ {selectedProvider === 'baidu-qianfan' && (
2315
+ <>
2316
+ 💡 Get your API key from:{' '}
2317
+ <Text color={theme.suggestion}>
2318
+ https://console.bce.baidu.com/iam/#/iam/accesslist
2319
+ </Text>
2320
+ </>
2321
+ )}
2322
+ {selectedProvider === 'anthropic' && (
2323
+ <>
2324
+ 💡 Get your API key from:{' '}
2325
+ <Text color={theme.suggestion}>
2326
+ {anthropicProviderType === 'official'
2327
+ ? 'https://console.anthropic.com/settings/keys'
2328
+ : anthropicProviderType === 'bigdream'
2329
+ ? 'https://api-key.info/register?aff=MSl4'
2330
+ : anthropicProviderType === 'opendev'
2331
+ ? 'https://api.openai-next.com/register/?aff_code=4xo7'
2332
+ : 'your custom API provider'}
2333
+ </Text>
2334
+ </>
2335
+ )}
2336
+ {selectedProvider === 'openai' && (
2337
+ <>
2338
+ 💡 Get your API key from:{' '}
2339
+ <Text color={theme.suggestion}>
2340
+ https://platform.openai.com/api-keys
2341
+ </Text>
2342
+ </>
2343
+ )}
2344
+ </Text>
2345
+ </Box>
2346
+
2347
+ <Box>
2348
+ <TextInput
2349
+ placeholder="sk-..."
2350
+ value={apiKey}
2351
+ onChange={handleApiKeyChange}
2352
+ onSubmit={handleApiKeySubmit}
2353
+ mask="*"
2354
+ columns={500}
2355
+ cursorOffset={cursorOffset}
2356
+ onChangeCursorOffset={handleCursorOffsetChange}
2357
+ showCursor={true}
2358
+ />
2359
+ </Box>
2360
+
2361
+ <Box marginTop={1}>
2362
+ <Text>
2363
+ <Text color={theme.suggestion} dimColor={!apiKey}>
2364
+ [Submit API Key]
2365
+ </Text>
2366
+ <Text>
2367
+ {' '}
2368
+ - Press Enter or click to continue with this API key
2369
+ </Text>
2370
+ </Text>
2371
+ </Box>
2372
+
2373
+ {isLoadingModels && (
2374
+ <Box>
2375
+ <Text color={theme.suggestion}>
2376
+ Loading available models...
2377
+ </Text>
2378
+ </Box>
2379
+ )}
2380
+ {modelLoadError && (
2381
+ <Box>
2382
+ <Text color="red">Error: {modelLoadError}</Text>
2383
+ </Box>
2384
+ )}
2385
+ <Box marginTop={1}>
2386
+ <Text dimColor>
2387
+ Press <Text color={theme.suggestion}>Enter</Text> to continue,{' '}
2388
+ <Text color={theme.suggestion}>Tab</Text> to{' '}
2389
+ {selectedProvider === 'anthropic' ||
2390
+ selectedProvider === 'kimi' ||
2391
+ selectedProvider === 'deepseek' ||
2392
+ selectedProvider === 'qwen' ||
2393
+ selectedProvider === 'glm' ||
2394
+ selectedProvider === 'minimax' ||
2395
+ selectedProvider === 'baidu-qianfan' ||
2396
+ selectedProvider === 'siliconflow' ||
2397
+ selectedProvider === 'custom-openai'
2398
+ ? 'skip to manual model input'
2399
+ : 'skip using a key'}
2400
+ , or <Text color={theme.suggestion}>Esc</Text> to go back
2401
+ </Text>
2402
+ </Box>
2403
+ </Box>
2404
+ </Box>
2405
+ </Box>
2406
+ )
2407
+ }
2408
+
2409
+ // Render Model Selection Screen
2410
+ if (currentScreen === 'model') {
2411
+ const modelTypeText = 'this model profile'
2412
+
2413
+ return (
2414
+ <Box flexDirection="column" gap={1}>
2415
+ <Box
2416
+ flexDirection="column"
2417
+ gap={1}
2418
+ borderStyle="round"
2419
+ borderColor={theme.secondaryBorder}
2420
+ paddingX={2}
2421
+ paddingY={1}
2422
+ >
2423
+ <Text bold>
2424
+ Model Selection{' '}
2425
+ {exitState.pending
2426
+ ? `(press ${exitState.keyName} again to exit)`
2427
+ : ''}
2428
+ </Text>
2429
+ <Box flexDirection="column" gap={1}>
2430
+ <Text bold>
2431
+ Select a model from{' '}
2432
+ {
2433
+ getProviderLabel(
2434
+ selectedProvider,
2435
+ availableModels.length,
2436
+ ).split(' (')[0]
2437
+ }{' '}
2438
+ for {modelTypeText}:
2439
+ </Text>
2440
+ <Box flexDirection="column" width={70}>
2441
+ <Text color={theme.secondaryText}>
2442
+ This model profile can be assigned to different pointers (main,
2443
+ task, reasoning, quick) for various use cases.
2444
+ </Text>
2445
+ </Box>
2446
+
2447
+ <Box marginY={1}>
2448
+ <Text bold>Search models:</Text>
2449
+ <TextInput
2450
+ placeholder="Type to filter models..."
2451
+ value={modelSearchQuery}
2452
+ onChange={handleModelSearchChange}
2453
+ columns={100}
2454
+ cursorOffset={modelSearchCursorOffset}
2455
+ onChangeCursorOffset={handleModelSearchCursorOffsetChange}
2456
+ showCursor={true}
2457
+ focus={true}
2458
+ />
2459
+ </Box>
2460
+
2461
+ {modelOptions.length > 0 ? (
2462
+ <>
2463
+ <Select
2464
+ options={modelOptions}
2465
+ onChange={handleModelSelection}
2466
+ />
2467
+ <Text dimColor>
2468
+ Showing {modelOptions.length} of {availableModels.length}{' '}
2469
+ models
2470
+ </Text>
2471
+ </>
2472
+ ) : (
2473
+ <Box>
2474
+ {availableModels.length > 0 ? (
2475
+ <Text color="yellow">
2476
+ No models match your search. Try a different query.
2477
+ </Text>
2478
+ ) : (
2479
+ <Text color="yellow">
2480
+ No models available for this provider.
2481
+ </Text>
2482
+ )}
2483
+ </Box>
2484
+ )}
2485
+
2486
+ <Box marginTop={1}>
2487
+ <Text dimColor>
2488
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
2489
+ API key input
2490
+ </Text>
2491
+ </Box>
2492
+ </Box>
2493
+ </Box>
2494
+ </Box>
2495
+ )
2496
+ }
2497
+
2498
+ if (currentScreen === 'modelParams') {
2499
+ // Define form fields
2500
+ const formFields = getFormFieldsForModelParams()
2501
+
2502
+ return (
2503
+ <Box flexDirection="column" gap={1}>
2504
+ <Box
2505
+ flexDirection="column"
2506
+ gap={1}
2507
+ borderStyle="round"
2508
+ borderColor={theme.secondaryBorder}
2509
+ paddingX={2}
2510
+ paddingY={1}
2511
+ >
2512
+ <Text bold>
2513
+ Model Parameters{' '}
2514
+ {exitState.pending
2515
+ ? `(press ${exitState.keyName} again to exit)`
2516
+ : ''}
2517
+ </Text>
2518
+ <Box flexDirection="column" gap={1}>
2519
+ <Text bold>Configure parameters for {selectedModel}:</Text>
2520
+ <Box flexDirection="column" width={70}>
2521
+ <Text color={theme.secondaryText}>
2522
+ Use <Text color={theme.suggestion}>Tab</Text> to navigate
2523
+ between fields. Press{' '}
2524
+ <Text color={theme.suggestion}>Enter</Text> to submit.
2525
+ </Text>
2526
+ </Box>
2527
+
2528
+ <Box flexDirection="column">
2529
+ {formFields.map((field, index) => (
2530
+ <Box flexDirection="column" marginY={1} key={field.name}>
2531
+ {field.component !== 'button' ? (
2532
+ <>
2533
+ <Text
2534
+ bold
2535
+ color={
2536
+ activeFieldIndex === index ? theme.success : undefined
2537
+ }
2538
+ >
2539
+ {field.label}
2540
+ </Text>
2541
+ {field.description && (
2542
+ <Text color={theme.secondaryText}>
2543
+ {field.description}
2544
+ </Text>
2545
+ )}
2546
+ </>
2547
+ ) : (
2548
+ <Text
2549
+ bold
2550
+ color={
2551
+ activeFieldIndex === index ? theme.success : undefined
2552
+ }
2553
+ >
2554
+ {field.label}
2555
+ </Text>
2556
+ )}
2557
+ <Box marginY={1}>
2558
+ {activeFieldIndex === index ? (
2559
+ field.component === 'select' ? (
2560
+ field.name === 'maxTokens' ? (
2561
+ <Select
2562
+ options={field.options || []}
2563
+ onChange={value => {
2564
+ const numValue = parseInt(value)
2565
+ setMaxTokens(numValue.toString())
2566
+ setSelectedMaxTokensPreset(numValue)
2567
+ setMaxTokensCursorOffset(
2568
+ numValue.toString().length,
2569
+ )
2570
+ // Move to next field after selection
2571
+ setTimeout(() => {
2572
+ setActiveFieldIndex(index + 1)
2573
+ }, 100)
2574
+ }}
2575
+ defaultValue={field.defaultValue}
2576
+ />
2577
+ ) : (
2578
+ <Select
2579
+ options={reasoningEffortOptions}
2580
+ onChange={value => {
2581
+ setReasoningEffort(value as ReasoningEffortOption)
2582
+ // Move to next field after selection
2583
+ setTimeout(() => {
2584
+ setActiveFieldIndex(index + 1)
2585
+ }, 100)
2586
+ }}
2587
+ defaultValue={reasoningEffort}
2588
+ />
2589
+ )
2590
+ ) : null
2591
+ ) : field.name === 'maxTokens' ? (
2592
+ <Text color={theme.secondaryText}>
2593
+ Current:{' '}
2594
+ <Text color={theme.suggestion}>
2595
+ {MAX_TOKENS_OPTIONS.find(
2596
+ opt => opt.value === parseInt(maxTokens),
2597
+ )?.label || `${maxTokens} tokens`}
2598
+ </Text>
2599
+ </Text>
2600
+ ) : field.name === 'reasoningEffort' ? (
2601
+ <Text color={theme.secondaryText}>
2602
+ Current:{' '}
2603
+ <Text color={theme.suggestion}>{reasoningEffort}</Text>
2604
+ </Text>
2605
+ ) : null}
2606
+ </Box>
2607
+ </Box>
2608
+ ))}
2609
+
2610
+ <Box marginTop={1}>
2611
+ <Text dimColor>
2612
+ Press <Text color={theme.suggestion}>Tab</Text> to navigate,{' '}
2613
+ <Text color={theme.suggestion}>Enter</Text> to continue, or{' '}
2614
+ <Text color={theme.suggestion}>Esc</Text> to go back
2615
+ </Text>
2616
+ </Box>
2617
+ </Box>
2618
+ </Box>
2619
+ </Box>
2620
+ </Box>
2621
+ )
2622
+ }
2623
+
2624
+ // Render Resource Name Input Screen
2625
+ if (currentScreen === 'resourceName') {
2626
+ return (
2627
+ <Box flexDirection="column" gap={1}>
2628
+ <Box
2629
+ flexDirection="column"
2630
+ gap={1}
2631
+ borderStyle="round"
2632
+ borderColor={theme.secondaryBorder}
2633
+ paddingX={2}
2634
+ paddingY={1}
2635
+ >
2636
+ <Text bold>
2637
+ Azure Resource Setup{' '}
2638
+ {exitState.pending
2639
+ ? `(press ${exitState.keyName} again to exit)`
2640
+ : ''}
2641
+ </Text>
2642
+ <Box flexDirection="column" gap={1}>
2643
+ <Text bold>Enter your Azure OpenAI resource name:</Text>
2644
+ <Box flexDirection="column" width={70}>
2645
+ <Text color={theme.secondaryText}>
2646
+ This is the name of your Azure OpenAI resource (without the full
2647
+ domain).
2648
+ <Newline />
2649
+ For example, if your endpoint is
2650
+ "https://myresource.openai.azure.com", enter "myresource".
2651
+ </Text>
2652
+ </Box>
2653
+
2654
+ <Box>
2655
+ <TextInput
2656
+ placeholder="myazureresource"
2657
+ value={resourceName}
2658
+ onChange={setResourceName}
2659
+ onSubmit={handleResourceNameSubmit}
2660
+ columns={100}
2661
+ cursorOffset={resourceNameCursorOffset}
2662
+ onChangeCursorOffset={setResourceNameCursorOffset}
2663
+ showCursor={true}
2664
+ />
2665
+ </Box>
2666
+
2667
+ <Box marginTop={1}>
2668
+ <Text>
2669
+ <Text color={theme.suggestion} dimColor={!resourceName}>
2670
+ [Submit Resource Name]
2671
+ </Text>
2672
+ <Text> - Press Enter or click to continue</Text>
2673
+ </Text>
2674
+ </Box>
2675
+
2676
+ <Box marginTop={1}>
2677
+ <Text dimColor>
2678
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2679
+ <Text color={theme.suggestion}>Esc</Text> to go back
2680
+ </Text>
2681
+ </Box>
2682
+ </Box>
2683
+ </Box>
2684
+ </Box>
2685
+ )
2686
+ }
2687
+
2688
+ // Render Base URL Input Screen (for all providers)
2689
+ if (currentScreen === 'baseUrl') {
2690
+ const isCustomOpenAI = selectedProvider === 'custom-openai'
2691
+
2692
+ // For custom-openai, we still use the old logic with customBaseUrl
2693
+ if (isCustomOpenAI) {
2694
+ return (
2695
+ <Box flexDirection="column" gap={1}>
2696
+ <Box
2697
+ flexDirection="column"
2698
+ gap={1}
2699
+ borderStyle="round"
2700
+ borderColor={theme.secondaryBorder}
2701
+ paddingX={2}
2702
+ paddingY={1}
2703
+ >
2704
+ <Text bold>
2705
+ Custom API Server Setup{' '}
2706
+ {exitState.pending
2707
+ ? `(press ${exitState.keyName} again to exit)`
2708
+ : ''}
2709
+ </Text>
2710
+ <Box flexDirection="column" gap={1}>
2711
+ <Text bold>Enter your custom API URL:</Text>
2712
+ <Box flexDirection="column" width={70}>
2713
+ <Text color={theme.secondaryText}>
2714
+ This is the base URL for your OpenAI-compatible API.
2715
+ <Newline />
2716
+ For example: https://api.example.com/v1
2717
+ </Text>
2718
+ </Box>
2719
+
2720
+ <Box>
2721
+ <TextInput
2722
+ placeholder="https://api.example.com/v1"
2723
+ value={customBaseUrl}
2724
+ onChange={setCustomBaseUrl}
2725
+ onSubmit={handleCustomBaseUrlSubmit}
2726
+ columns={100}
2727
+ cursorOffset={customBaseUrlCursorOffset}
2728
+ onChangeCursorOffset={setCustomBaseUrlCursorOffset}
2729
+ showCursor={!isLoadingModels}
2730
+ focus={!isLoadingModels}
2731
+ />
2732
+ </Box>
2733
+
2734
+ <Box marginTop={1}>
2735
+ <Text>
2736
+ <Text
2737
+ color={
2738
+ isLoadingModels ? theme.secondaryText : theme.suggestion
2739
+ }
2740
+ >
2741
+ [Submit Base URL]
2742
+ </Text>
2743
+ <Text> - Press Enter or click to continue</Text>
2744
+ </Text>
2745
+ </Box>
2746
+
2747
+ <Box marginTop={1}>
2748
+ <Text dimColor>
2749
+ Press <Text color={theme.suggestion}>Enter</Text> to continue
2750
+ or <Text color={theme.suggestion}>Esc</Text> to go back
2751
+ </Text>
2752
+ </Box>
2753
+ </Box>
2754
+ </Box>
2755
+ </Box>
2756
+ )
2757
+ }
2758
+
2759
+ // For all other providers, use the new general provider URL configuration
2760
+ const providerName = providers[selectedProvider]?.name || selectedProvider
2761
+ const defaultUrl = providers[selectedProvider]?.baseURL || ''
2762
+
2763
+ return (
2764
+ <Box flexDirection="column" gap={1}>
2765
+ <Box
2766
+ flexDirection="column"
2767
+ gap={1}
2768
+ borderStyle="round"
2769
+ borderColor={theme.secondaryBorder}
2770
+ paddingX={2}
2771
+ paddingY={1}
2772
+ >
2773
+ <Text bold>
2774
+ {providerName} API Configuration{' '}
2775
+ {exitState.pending
2776
+ ? `(press ${exitState.keyName} again to exit)`
2777
+ : ''}
2778
+ </Text>
2779
+ <Box flexDirection="column" gap={1}>
2780
+ <Text bold>Configure the API endpoint for {providerName}:</Text>
2781
+ <Box flexDirection="column" width={70}>
2782
+ <Text color={theme.secondaryText}>
2783
+ {selectedProvider === 'ollama' ? (
2784
+ <>
2785
+ This is the URL of your Ollama server.
2786
+ <Newline />
2787
+ Default is http://localhost:11434/v1 for local Ollama
2788
+ installations.
2789
+ </>
2790
+ ) : (
2791
+ <>
2792
+ This is the base URL for the {providerName} API.
2793
+ <Newline />
2794
+ You can modify this URL or press Enter to use the default.
2795
+ </>
2796
+ )}
2797
+ </Text>
2798
+ </Box>
2799
+
2800
+ <Box>
2801
+ <TextInput
2802
+ placeholder={defaultUrl}
2803
+ value={providerBaseUrl}
2804
+ onChange={setProviderBaseUrl}
2805
+ onSubmit={handleProviderBaseUrlSubmit}
2806
+ columns={100}
2807
+ cursorOffset={providerBaseUrlCursorOffset}
2808
+ onChangeCursorOffset={setProviderBaseUrlCursorOffset}
2809
+ showCursor={!isLoadingModels}
2810
+ focus={!isLoadingModels}
2811
+ />
2812
+ </Box>
2813
+
2814
+ <Box marginTop={1}>
2815
+ <Text>
2816
+ <Text
2817
+ color={
2818
+ isLoadingModels ? theme.secondaryText : theme.suggestion
2819
+ }
2820
+ >
2821
+ [Submit Base URL]
2822
+ </Text>
2823
+ <Text> - Press Enter or click to continue</Text>
2824
+ </Text>
2825
+ </Box>
2826
+
2827
+ {isLoadingModels && (
2828
+ <Box marginTop={1}>
2829
+ <Text color={theme.success}>
2830
+ {selectedProvider === 'ollama'
2831
+ ? 'Connecting to Ollama server...'
2832
+ : `Connecting to ${providerName}...`}
2833
+ </Text>
2834
+ </Box>
2835
+ )}
2836
+
2837
+ {modelLoadError && (
2838
+ <Box marginTop={1}>
2839
+ <Text color="red">Error: {modelLoadError}</Text>
2840
+ </Box>
2841
+ )}
2842
+
2843
+ <Box marginTop={1}>
2844
+ <Text dimColor>
2845
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2846
+ <Text color={theme.suggestion}>Esc</Text> to go back
2847
+ </Text>
2848
+ </Box>
2849
+ </Box>
2850
+ </Box>
2851
+ </Box>
2852
+ )
2853
+ }
2854
+
2855
+ // Render Custom Model Input Screen
2856
+ if (currentScreen === 'modelInput') {
2857
+ const modelTypeText = 'this model profile'
2858
+
2859
+ // Determine the screen title and description based on provider
2860
+ let screenTitle = 'Manual Model Setup'
2861
+ let description = 'Enter the model name manually'
2862
+ let placeholder = 'gpt-4'
2863
+ let examples = 'For example: "gpt-4", "gpt-3.5-turbo", etc.'
2864
+
2865
+ if (selectedProvider === 'azure') {
2866
+ screenTitle = 'Azure Model Setup'
2867
+ description = `Enter your Azure OpenAI deployment name for ${modelTypeText}:`
2868
+ examples = 'For example: "gpt-4", "gpt-35-turbo", etc.'
2869
+ placeholder = 'gpt-4'
2870
+ } else if (selectedProvider === 'anthropic') {
2871
+ screenTitle = 'Claude Model Setup'
2872
+ description = `Enter the Claude model name for ${modelTypeText}:`
2873
+ examples =
2874
+ 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2875
+ placeholder = 'claude-3-5-sonnet-latest'
2876
+ } else if (selectedProvider === 'bigdream') {
2877
+ screenTitle = 'BigDream Model Setup'
2878
+ description = `Enter the BigDream model name for ${modelTypeText}:`
2879
+ examples =
2880
+ 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2881
+ placeholder = 'claude-3-5-sonnet-latest'
2882
+ } else if (selectedProvider === 'kimi') {
2883
+ screenTitle = 'Kimi Model Setup'
2884
+ description = `Enter the Kimi model name for ${modelTypeText}:`
2885
+ examples = 'For example: "kimi-k2-0711-preview"'
2886
+ placeholder = 'kimi-k2-0711-preview'
2887
+ } else if (selectedProvider === 'deepseek') {
2888
+ screenTitle = 'DeepSeek Model Setup'
2889
+ description = `Enter the DeepSeek model name for ${modelTypeText}:`
2890
+ examples =
2891
+ 'For example: "deepseek-chat", "deepseek-coder", "deepseek-reasoner", etc.'
2892
+ placeholder = 'deepseek-chat'
2893
+ } else if (selectedProvider === 'siliconflow') {
2894
+ screenTitle = 'SiliconFlow Model Setup'
2895
+ description = `Enter the SiliconFlow model name for ${modelTypeText}:`
2896
+ examples =
2897
+ 'For example: "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", etc.'
2898
+ placeholder = 'Qwen/Qwen2.5-72B-Instruct'
2899
+ } else if (selectedProvider === 'qwen') {
2900
+ screenTitle = 'Qwen Model Setup'
2901
+ description = `Enter the Qwen model name for ${modelTypeText}:`
2902
+ examples = 'For example: "qwen-plus", "qwen-turbo", "qwen-max", etc.'
2903
+ placeholder = 'qwen-plus'
2904
+ } else if (selectedProvider === 'glm') {
2905
+ screenTitle = 'GLM Model Setup'
2906
+ description = `Enter the GLM model name for ${modelTypeText}:`
2907
+ examples = 'For example: "glm-4", "glm-4v", "glm-3-turbo", etc.'
2908
+ placeholder = 'glm-4'
2909
+ } else if (selectedProvider === 'minimax') {
2910
+ screenTitle = 'MiniMax Model Setup'
2911
+ description = `Enter the MiniMax model name for ${modelTypeText}:`
2912
+ examples =
2913
+ 'For example: "abab6.5s-chat", "abab6.5g-chat", "abab5.5s-chat", etc.'
2914
+ placeholder = 'abab6.5s-chat'
2915
+ } else if (selectedProvider === 'baidu-qianfan') {
2916
+ screenTitle = 'Baidu Qianfan Model Setup'
2917
+ description = `Enter the Baidu Qianfan model name for ${modelTypeText}:`
2918
+ examples =
2919
+ 'For example: "ERNIE-4.0-8K", "ERNIE-3.5-8K", "ERNIE-Speed-128K", etc.'
2920
+ placeholder = 'ERNIE-4.0-8K'
2921
+ } else if (selectedProvider === 'custom-openai') {
2922
+ screenTitle = 'Custom API Model Setup'
2923
+ description = `Enter the model name for ${modelTypeText}:`
2924
+ examples = 'Enter the exact model name as supported by your API endpoint.'
2925
+ placeholder = 'model-name'
2926
+ }
2927
+
2928
+ return (
2929
+ <Box flexDirection="column" gap={1}>
2930
+ <Box
2931
+ flexDirection="column"
2932
+ gap={1}
2933
+ borderStyle="round"
2934
+ borderColor={theme.secondaryBorder}
2935
+ paddingX={2}
2936
+ paddingY={1}
2937
+ >
2938
+ <Text bold>
2939
+ {screenTitle}{' '}
2940
+ {exitState.pending
2941
+ ? `(press ${exitState.keyName} again to exit)`
2942
+ : ''}
2943
+ </Text>
2944
+ <Box flexDirection="column" gap={1}>
2945
+ <Text bold>{description}</Text>
2946
+ <Box flexDirection="column" width={70}>
2947
+ <Text color={theme.secondaryText}>
2948
+ {selectedProvider === 'azure'
2949
+ ? 'This is the deployment name you configured in your Azure OpenAI resource.'
2950
+ : selectedProvider === 'anthropic'
2951
+ ? 'This should be a valid Claude model identifier from Claude.'
2952
+ : selectedProvider === 'bigdream'
2953
+ ? 'This should be a valid Claude model identifier supported by BigDream.'
2954
+ : selectedProvider === 'kimi'
2955
+ ? 'This should be a valid Kimi model identifier from Moonshot AI.'
2956
+ : selectedProvider === 'deepseek'
2957
+ ? 'This should be a valid DeepSeek model identifier.'
2958
+ : selectedProvider === 'siliconflow'
2959
+ ? 'This should be a valid SiliconFlow model identifier.'
2960
+ : selectedProvider === 'qwen'
2961
+ ? 'This should be a valid Qwen model identifier from Alibaba Cloud.'
2962
+ : selectedProvider === 'glm'
2963
+ ? 'This should be a valid GLM model identifier from Zhipu AI.'
2964
+ : selectedProvider === 'minimax'
2965
+ ? 'This should be a valid MiniMax model identifier.'
2966
+ : selectedProvider === 'baidu-qianfan'
2967
+ ? 'This should be a valid Baidu Qianfan model identifier.'
2968
+ : 'This should match the model name supported by your API endpoint.'}
2969
+ <Newline />
2970
+ {examples}
2971
+ </Text>
2972
+ </Box>
2973
+
2974
+ <Box>
2975
+ <TextInput
2976
+ placeholder={placeholder}
2977
+ value={customModelName}
2978
+ onChange={setCustomModelName}
2979
+ onSubmit={handleCustomModelSubmit}
2980
+ columns={100}
2981
+ cursorOffset={customModelNameCursorOffset}
2982
+ onChangeCursorOffset={setCustomModelNameCursorOffset}
2983
+ showCursor={true}
2984
+ />
2985
+ </Box>
2986
+
2987
+ <Box marginTop={1}>
2988
+ <Text>
2989
+ <Text color={theme.suggestion} dimColor={!customModelName}>
2990
+ [Submit Model Name]
2991
+ </Text>
2992
+ <Text> - Press Enter or click to continue</Text>
2993
+ </Text>
2994
+ </Box>
2995
+
2996
+ <Box marginTop={1}>
2997
+ <Text dimColor>
2998
+ Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2999
+ <Text color={theme.suggestion}>Esc</Text> to go back
3000
+ </Text>
3001
+ </Box>
3002
+ </Box>
3003
+ </Box>
3004
+ </Box>
3005
+ )
3006
+ }
3007
+
3008
+ // Render Context Length Selection Screen
3009
+ if (currentScreen === 'contextLength') {
3010
+ const selectedOption =
3011
+ CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength) ||
3012
+ CONTEXT_LENGTH_OPTIONS[2] // Default to 128K
3013
+
3014
+ return (
3015
+ <Box flexDirection="column" gap={1}>
3016
+ <Box
3017
+ flexDirection="column"
3018
+ gap={1}
3019
+ borderStyle="round"
3020
+ borderColor={theme.secondaryBorder}
3021
+ paddingX={2}
3022
+ paddingY={1}
3023
+ >
3024
+ <Text bold>
3025
+ Context Length Configuration{' '}
3026
+ {exitState.pending
3027
+ ? `(press ${exitState.keyName} again to exit)`
3028
+ : ''}
3029
+ </Text>
3030
+ <Box flexDirection="column" gap={1}>
3031
+ <Text bold>Choose the context window length for your model:</Text>
3032
+ <Box flexDirection="column" width={70}>
3033
+ <Text color={theme.secondaryText}>
3034
+ This determines how much conversation history and context the
3035
+ model can process at once. Higher values allow for longer
3036
+ conversations but may increase costs.
3037
+ </Text>
3038
+ </Box>
3039
+
3040
+ <Box flexDirection="column" marginY={1}>
3041
+ {CONTEXT_LENGTH_OPTIONS.map((option, index) => {
3042
+ const isSelected = option.value === contextLength
3043
+ return (
3044
+ <Box key={option.value} flexDirection="row">
3045
+ <Text color={isSelected ? 'blue' : undefined}>
3046
+ {isSelected ? '→ ' : ' '}
3047
+ {option.label}
3048
+ {option.value === DEFAULT_CONTEXT_LENGTH
3049
+ ? ' (recommended)'
3050
+ : ''}
3051
+ </Text>
3052
+ </Box>
3053
+ )
3054
+ })}
3055
+ </Box>
3056
+
3057
+ <Box flexDirection="column" marginY={1}>
3058
+ <Text dimColor>
3059
+ Selected:{' '}
3060
+ <Text color={theme.suggestion}>{selectedOption.label}</Text>
3061
+ </Text>
3062
+ </Box>
3063
+ </Box>
3064
+ </Box>
3065
+
3066
+ <Box marginLeft={1}>
3067
+ <Text dimColor>
3068
+ ↑/↓ to select · Enter to continue · Esc to go back
3069
+ </Text>
3070
+ </Box>
3071
+ </Box>
3072
+ )
3073
+ }
3074
+
3075
+ // Render Connection Test Screen
3076
+ if (currentScreen === 'connectionTest') {
3077
+ const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
3078
+ ' (',
3079
+ )[0]
3080
+
3081
+ return (
3082
+ <Box flexDirection="column" gap={1}>
3083
+ <Box
3084
+ flexDirection="column"
3085
+ gap={1}
3086
+ borderStyle="round"
3087
+ borderColor={theme.secondaryBorder}
3088
+ paddingX={2}
3089
+ paddingY={1}
3090
+ >
3091
+ <Text bold>
3092
+ Connection Test{' '}
3093
+ {exitState.pending
3094
+ ? `(press ${exitState.keyName} again to exit)`
3095
+ : ''}
3096
+ </Text>
3097
+ <Box flexDirection="column" gap={1}>
3098
+ <Text bold>Testing connection to {providerDisplayName}...</Text>
3099
+ <Box flexDirection="column" width={70}>
3100
+ <Text color={theme.secondaryText}>
3101
+ This will verify your configuration by sending a test request to
3102
+ the API.
3103
+ {selectedProvider === 'minimax' && (
3104
+ <>
3105
+ <Newline />
3106
+ For MiniMax, we'll test both v2 and v1 endpoints to find the
3107
+ best one.
3108
+ </>
3109
+ )}
3110
+ </Text>
3111
+ </Box>
3112
+
3113
+ {!connectionTestResult && !isTestingConnection && (
3114
+ <Box marginY={1}>
3115
+ <Text>
3116
+ <Text color={theme.suggestion}>Press Enter</Text> to start the
3117
+ connection test
3118
+ </Text>
3119
+ </Box>
3120
+ )}
3121
+
3122
+ {isTestingConnection && (
3123
+ <Box marginY={1}>
3124
+ <Text color={theme.suggestion}>🔄 Testing connection...</Text>
3125
+ </Box>
3126
+ )}
3127
+
3128
+ {connectionTestResult && (
3129
+ <Box flexDirection="column" marginY={1} paddingX={1}>
3130
+ <Text
3131
+ color={connectionTestResult.success ? theme.success : 'red'}
3132
+ >
3133
+ {connectionTestResult.message}
3134
+ </Text>
3135
+
3136
+ {connectionTestResult.endpoint && (
3137
+ <Text color={theme.secondaryText}>
3138
+ Endpoint: {connectionTestResult.endpoint}
3139
+ </Text>
3140
+ )}
3141
+
3142
+ {connectionTestResult.details && (
3143
+ <Text color={theme.secondaryText}>
3144
+ Details: {connectionTestResult.details}
3145
+ </Text>
3146
+ )}
3147
+
3148
+ {connectionTestResult.success ? (
3149
+ <Box marginTop={1}>
3150
+ <Text color={theme.success}>
3151
+ ✅ Automatically proceeding to confirmation...
3152
+ </Text>
3153
+ </Box>
3154
+ ) : (
3155
+ <Box marginTop={1}>
3156
+ <Text>
3157
+ <Text color={theme.suggestion}>Press Enter</Text> to retry
3158
+ test, or <Text color={theme.suggestion}>Esc</Text> to go
3159
+ back
3160
+ </Text>
3161
+ </Box>
3162
+ )}
3163
+ </Box>
3164
+ )}
3165
+
3166
+ <Box marginTop={1}>
3167
+ <Text dimColor>
3168
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
3169
+ context length
3170
+ </Text>
3171
+ </Box>
3172
+ </Box>
3173
+ </Box>
3174
+ </Box>
3175
+ )
3176
+ }
3177
+
3178
+ // Render Confirmation Screen
3179
+ if (currentScreen === 'confirmation') {
3180
+ // Show model profile being created
3181
+
3182
+ // Get provider display name
3183
+ const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
3184
+ ' (',
3185
+ )[0]
3186
+
3187
+ // Determine if provider requires API key
3188
+ const showsApiKey = selectedProvider !== 'ollama'
3189
+
3190
+ return (
3191
+ <Box flexDirection="column" gap={1}>
3192
+ <Box
3193
+ flexDirection="column"
3194
+ gap={1}
3195
+ borderStyle="round"
3196
+ borderColor={theme.secondaryBorder}
3197
+ paddingX={2}
3198
+ paddingY={1}
3199
+ >
3200
+ <Text bold>
3201
+ Configuration Confirmation{' '}
3202
+ {exitState.pending
3203
+ ? `(press ${exitState.keyName} again to exit)`
3204
+ : ''}
3205
+ </Text>
3206
+ <Box flexDirection="column" gap={1}>
3207
+ <Text bold>Confirm your model configuration:</Text>
3208
+ <Box flexDirection="column" width={70}>
3209
+ <Text color={theme.secondaryText}>
3210
+ Please review your selections before saving.
3211
+ </Text>
3212
+ </Box>
3213
+
3214
+ {validationError && (
3215
+ <Box flexDirection="column" marginY={1} paddingX={1}>
3216
+ <Text color={theme.error} bold>
3217
+ ⚠ Configuration Error:
3218
+ </Text>
3219
+ <Text color={theme.error}>{validationError}</Text>
3220
+ </Box>
3221
+ )}
3222
+
3223
+ <Box flexDirection="column" marginY={1} paddingX={1}>
3224
+ <Text>
3225
+ <Text bold>Provider: </Text>
3226
+ <Text color={theme.suggestion}>{providerDisplayName}</Text>
3227
+ </Text>
3228
+
3229
+ {selectedProvider === 'azure' && (
3230
+ <Text>
3231
+ <Text bold>Resource Name: </Text>
3232
+ <Text color={theme.suggestion}>{resourceName}</Text>
3233
+ </Text>
3234
+ )}
3235
+
3236
+ {selectedProvider === 'ollama' && (
3237
+ <Text>
3238
+ <Text bold>Server URL: </Text>
3239
+ <Text color={theme.suggestion}>{ollamaBaseUrl}</Text>
3240
+ </Text>
3241
+ )}
3242
+
3243
+ {selectedProvider === 'custom-openai' && (
3244
+ <Text>
3245
+ <Text bold>API Base URL: </Text>
3246
+ <Text color={theme.suggestion}>{customBaseUrl}</Text>
3247
+ </Text>
3248
+ )}
3249
+
3250
+ <Text>
3251
+ <Text bold>Model: </Text>
3252
+ <Text color={theme.suggestion}>{selectedModel}</Text>
3253
+ </Text>
3254
+
3255
+ {apiKey && showsApiKey && (
3256
+ <Text>
3257
+ <Text bold>API Key: </Text>
3258
+ <Text color={theme.suggestion}>****{apiKey.slice(-4)}</Text>
3259
+ </Text>
3260
+ )}
3261
+
3262
+ {maxTokens && (
3263
+ <Text>
3264
+ <Text bold>Max Tokens: </Text>
3265
+ <Text color={theme.suggestion}>{maxTokens}</Text>
3266
+ </Text>
3267
+ )}
3268
+
3269
+ <Text>
3270
+ <Text bold>Context Length: </Text>
3271
+ <Text color={theme.suggestion}>
3272
+ {CONTEXT_LENGTH_OPTIONS.find(
3273
+ opt => opt.value === contextLength,
3274
+ )?.label || `${contextLength.toLocaleString()} tokens`}
3275
+ </Text>
3276
+ </Text>
3277
+
3278
+ {supportsReasoningEffort && (
3279
+ <Text>
3280
+ <Text bold>Reasoning Effort: </Text>
3281
+ <Text color={theme.suggestion}>{reasoningEffort}</Text>
3282
+ </Text>
3283
+ )}
3284
+ </Box>
3285
+
3286
+ <Box marginTop={1}>
3287
+ <Text dimColor>
3288
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
3289
+ model parameters or <Text color={theme.suggestion}>Enter</Text>{' '}
3290
+ to save configuration
3291
+ </Text>
3292
+ </Box>
3293
+ </Box>
3294
+ </Box>
3295
+ </Box>
3296
+ )
3297
+ }
3298
+
3299
+ // Render Anthropic Sub-Menu Selection Screen
3300
+ if (currentScreen === 'anthropicSubMenu') {
3301
+ const anthropicOptions = [
3302
+ { label: 'Official Anthropic API', value: 'official' },
3303
+ { label: 'BigDream (Community Proxy)', value: 'bigdream' },
3304
+ { label: 'OpenDev (Community Proxy)', value: 'opendev' },
3305
+ { label: 'Custom Anthropic-Compatible API', value: 'custom' },
3306
+ ]
3307
+
3308
+ return (
3309
+ <Box flexDirection="column" gap={1}>
3310
+ <Box
3311
+ flexDirection="column"
3312
+ gap={1}
3313
+ borderStyle="round"
3314
+ borderColor={theme.secondaryBorder}
3315
+ paddingX={2}
3316
+ paddingY={1}
3317
+ >
3318
+ <Text bold>
3319
+ Claude Provider Selection{' '}
3320
+ {exitState.pending
3321
+ ? `(press ${exitState.keyName} again to exit)`
3322
+ : ''}
3323
+ </Text>
3324
+ <Box flexDirection="column" gap={1}>
3325
+ <Text bold>
3326
+ Choose your Anthropic API access method for this model profile:
3327
+ </Text>
3328
+ <Box flexDirection="column" width={70}>
3329
+ <Text color={theme.secondaryText}>
3330
+ • <Text bold>Official Anthropic API:</Text> Direct access to
3331
+ Anthropic's official API
3332
+ <Newline />• <Text bold>BigDream:</Text> Community proxy
3333
+ providing Claude access
3334
+ <Newline />• <Text bold>Custom:</Text> Your own
3335
+ Anthropic-compatible API endpoint
3336
+ </Text>
3337
+ </Box>
3338
+
3339
+ <Select
3340
+ options={anthropicOptions}
3341
+ onChange={handleAnthropicProviderSelection}
3342
+ />
3343
+
3344
+ <Box marginTop={1}>
3345
+ <Text dimColor>
3346
+ Press <Text color={theme.suggestion}>Esc</Text> to go back to
3347
+ provider selection
3348
+ </Text>
3349
+ </Box>
3350
+ </Box>
3351
+ </Box>
3352
+ </Box>
3353
+ )
3354
+ }
3355
+
3356
+ // Render Provider Selection Screen
3357
+ return (
3358
+ <ScreenContainer
3359
+ title="Provider Selection"
3360
+ exitState={exitState}
3361
+ children={
3362
+ <Box flexDirection="column" gap={1}>
3363
+ <Text bold>
3364
+ Select your preferred AI provider for this model profile:
3365
+ </Text>
3366
+ <Box flexDirection="column" width={70}>
3367
+ <Text color={theme.secondaryText}>
3368
+ Choose the provider you want to use for this model profile.
3369
+ <Newline />
3370
+ This will determine which models are available to you.
3371
+ </Text>
3372
+ </Box>
3373
+
3374
+ <Select options={providerOptions} onChange={handleProviderSelection} />
3375
+
3376
+ <Box marginTop={1}>
3377
+ <Text dimColor>
3378
+ You can change this later by running{' '}
3379
+ <Text color={theme.suggestion}>/model</Text> again
3380
+ </Text>
3381
+ </Box>
3382
+ </Box>
3383
+ }
3384
+ />
3385
+ )
3386
+ }