@shareai-lab/kode 1.1.13 → 1.1.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (288) hide show
  1. package/dist/entrypoints/cli.js +2 -1
  2. package/dist/entrypoints/cli.js.map +2 -2
  3. package/dist/index.js +5 -26
  4. package/dist/package.json +4 -1
  5. package/package.json +9 -104
  6. package/dist/test/testAdapters.js +0 -88
  7. package/dist/test/testAdapters.js.map +0 -1
  8. package/src/ProjectOnboarding.tsx +0 -198
  9. package/src/Tool.ts +0 -83
  10. package/src/commands/agents.tsx +0 -3416
  11. package/src/commands/approvedTools.ts +0 -53
  12. package/src/commands/bug.tsx +0 -20
  13. package/src/commands/clear.ts +0 -43
  14. package/src/commands/compact.ts +0 -120
  15. package/src/commands/config.tsx +0 -19
  16. package/src/commands/cost.ts +0 -18
  17. package/src/commands/ctx_viz.ts +0 -209
  18. package/src/commands/doctor.ts +0 -24
  19. package/src/commands/help.tsx +0 -19
  20. package/src/commands/init.ts +0 -37
  21. package/src/commands/listen.ts +0 -42
  22. package/src/commands/login.tsx +0 -51
  23. package/src/commands/logout.tsx +0 -40
  24. package/src/commands/mcp.ts +0 -41
  25. package/src/commands/model.tsx +0 -40
  26. package/src/commands/modelstatus.tsx +0 -20
  27. package/src/commands/onboarding.tsx +0 -34
  28. package/src/commands/pr_comments.ts +0 -59
  29. package/src/commands/refreshCommands.ts +0 -54
  30. package/src/commands/release-notes.ts +0 -34
  31. package/src/commands/resume.tsx +0 -31
  32. package/src/commands/review.ts +0 -49
  33. package/src/commands/terminalSetup.ts +0 -221
  34. package/src/commands.ts +0 -139
  35. package/src/components/ApproveApiKey.tsx +0 -93
  36. package/src/components/AsciiLogo.tsx +0 -13
  37. package/src/components/AutoUpdater.tsx +0 -148
  38. package/src/components/Bug.tsx +0 -367
  39. package/src/components/Config.tsx +0 -293
  40. package/src/components/ConsoleOAuthFlow.tsx +0 -327
  41. package/src/components/Cost.tsx +0 -23
  42. package/src/components/CostThresholdDialog.tsx +0 -46
  43. package/src/components/CustomSelect/option-map.ts +0 -42
  44. package/src/components/CustomSelect/select-option.tsx +0 -78
  45. package/src/components/CustomSelect/select.tsx +0 -152
  46. package/src/components/CustomSelect/theme.ts +0 -45
  47. package/src/components/CustomSelect/use-select-state.ts +0 -414
  48. package/src/components/CustomSelect/use-select.ts +0 -35
  49. package/src/components/FallbackToolUseRejectedMessage.tsx +0 -15
  50. package/src/components/FileEditToolUpdatedMessage.tsx +0 -66
  51. package/src/components/Help.tsx +0 -215
  52. package/src/components/HighlightedCode.tsx +0 -33
  53. package/src/components/InvalidConfigDialog.tsx +0 -113
  54. package/src/components/Link.tsx +0 -32
  55. package/src/components/LogSelector.tsx +0 -86
  56. package/src/components/Logo.tsx +0 -170
  57. package/src/components/MCPServerApprovalDialog.tsx +0 -100
  58. package/src/components/MCPServerDialogCopy.tsx +0 -25
  59. package/src/components/MCPServerMultiselectDialog.tsx +0 -109
  60. package/src/components/Message.tsx +0 -221
  61. package/src/components/MessageResponse.tsx +0 -15
  62. package/src/components/MessageSelector.tsx +0 -211
  63. package/src/components/ModeIndicator.tsx +0 -88
  64. package/src/components/ModelConfig.tsx +0 -301
  65. package/src/components/ModelListManager.tsx +0 -227
  66. package/src/components/ModelSelector.tsx +0 -3387
  67. package/src/components/ModelStatusDisplay.tsx +0 -230
  68. package/src/components/Onboarding.tsx +0 -274
  69. package/src/components/PressEnterToContinue.tsx +0 -11
  70. package/src/components/PromptInput.tsx +0 -760
  71. package/src/components/SentryErrorBoundary.ts +0 -39
  72. package/src/components/Spinner.tsx +0 -129
  73. package/src/components/StickerRequestForm.tsx +0 -16
  74. package/src/components/StructuredDiff.tsx +0 -191
  75. package/src/components/TextInput.tsx +0 -259
  76. package/src/components/TodoItem.tsx +0 -47
  77. package/src/components/TokenWarning.tsx +0 -31
  78. package/src/components/ToolUseLoader.tsx +0 -40
  79. package/src/components/TrustDialog.tsx +0 -106
  80. package/src/components/binary-feedback/BinaryFeedback.tsx +0 -63
  81. package/src/components/binary-feedback/BinaryFeedbackOption.tsx +0 -111
  82. package/src/components/binary-feedback/BinaryFeedbackView.tsx +0 -172
  83. package/src/components/binary-feedback/utils.ts +0 -220
  84. package/src/components/messages/AssistantBashOutputMessage.tsx +0 -22
  85. package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +0 -49
  86. package/src/components/messages/AssistantRedactedThinkingMessage.tsx +0 -19
  87. package/src/components/messages/AssistantTextMessage.tsx +0 -144
  88. package/src/components/messages/AssistantThinkingMessage.tsx +0 -40
  89. package/src/components/messages/AssistantToolUseMessage.tsx +0 -132
  90. package/src/components/messages/TaskProgressMessage.tsx +0 -32
  91. package/src/components/messages/TaskToolMessage.tsx +0 -58
  92. package/src/components/messages/UserBashInputMessage.tsx +0 -28
  93. package/src/components/messages/UserCommandMessage.tsx +0 -30
  94. package/src/components/messages/UserKodingInputMessage.tsx +0 -28
  95. package/src/components/messages/UserPromptMessage.tsx +0 -35
  96. package/src/components/messages/UserTextMessage.tsx +0 -39
  97. package/src/components/messages/UserToolResultMessage/UserToolCanceledMessage.tsx +0 -12
  98. package/src/components/messages/UserToolResultMessage/UserToolErrorMessage.tsx +0 -36
  99. package/src/components/messages/UserToolResultMessage/UserToolRejectMessage.tsx +0 -31
  100. package/src/components/messages/UserToolResultMessage/UserToolResultMessage.tsx +0 -57
  101. package/src/components/messages/UserToolResultMessage/UserToolSuccessMessage.tsx +0 -35
  102. package/src/components/messages/UserToolResultMessage/utils.tsx +0 -56
  103. package/src/components/permissions/BashPermissionRequest/BashPermissionRequest.tsx +0 -121
  104. package/src/components/permissions/FallbackPermissionRequest.tsx +0 -153
  105. package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +0 -182
  106. package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +0 -77
  107. package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +0 -164
  108. package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +0 -83
  109. package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +0 -240
  110. package/src/components/permissions/PermissionRequest.tsx +0 -101
  111. package/src/components/permissions/PermissionRequestTitle.tsx +0 -69
  112. package/src/components/permissions/hooks.ts +0 -44
  113. package/src/components/permissions/toolUseOptions.ts +0 -59
  114. package/src/components/permissions/utils.ts +0 -23
  115. package/src/constants/betas.ts +0 -5
  116. package/src/constants/claude-asterisk-ascii-art.tsx +0 -238
  117. package/src/constants/figures.ts +0 -4
  118. package/src/constants/keys.ts +0 -3
  119. package/src/constants/macros.ts +0 -11
  120. package/src/constants/modelCapabilities.ts +0 -179
  121. package/src/constants/models.ts +0 -1025
  122. package/src/constants/oauth.ts +0 -18
  123. package/src/constants/product.ts +0 -17
  124. package/src/constants/prompts.ts +0 -168
  125. package/src/constants/releaseNotes.ts +0 -7
  126. package/src/context/PermissionContext.tsx +0 -149
  127. package/src/context.ts +0 -278
  128. package/src/cost-tracker.ts +0 -84
  129. package/src/entrypoints/cli.tsx +0 -1561
  130. package/src/entrypoints/mcp.ts +0 -175
  131. package/src/history.ts +0 -25
  132. package/src/hooks/useApiKeyVerification.ts +0 -59
  133. package/src/hooks/useArrowKeyHistory.ts +0 -55
  134. package/src/hooks/useCanUseTool.ts +0 -138
  135. package/src/hooks/useCancelRequest.ts +0 -39
  136. package/src/hooks/useDoublePress.ts +0 -41
  137. package/src/hooks/useExitOnCtrlCD.ts +0 -31
  138. package/src/hooks/useInterval.ts +0 -25
  139. package/src/hooks/useLogMessages.ts +0 -16
  140. package/src/hooks/useLogStartupTime.ts +0 -12
  141. package/src/hooks/useNotifyAfterTimeout.ts +0 -65
  142. package/src/hooks/usePermissionRequestLogging.ts +0 -44
  143. package/src/hooks/useTerminalSize.ts +0 -49
  144. package/src/hooks/useTextInput.ts +0 -317
  145. package/src/hooks/useUnifiedCompletion.ts +0 -1405
  146. package/src/index.ts +0 -34
  147. package/src/messages.ts +0 -38
  148. package/src/permissions.ts +0 -268
  149. package/src/query.ts +0 -720
  150. package/src/screens/ConfigureNpmPrefix.tsx +0 -197
  151. package/src/screens/Doctor.tsx +0 -219
  152. package/src/screens/LogList.tsx +0 -68
  153. package/src/screens/REPL.tsx +0 -813
  154. package/src/screens/ResumeConversation.tsx +0 -68
  155. package/src/services/adapters/base.ts +0 -38
  156. package/src/services/adapters/chatCompletions.ts +0 -90
  157. package/src/services/adapters/responsesAPI.ts +0 -170
  158. package/src/services/browserMocks.ts +0 -66
  159. package/src/services/claude.ts +0 -2197
  160. package/src/services/customCommands.ts +0 -704
  161. package/src/services/fileFreshness.ts +0 -377
  162. package/src/services/gpt5ConnectionTest.ts +0 -340
  163. package/src/services/mcpClient.ts +0 -564
  164. package/src/services/mcpServerApproval.tsx +0 -50
  165. package/src/services/mentionProcessor.ts +0 -273
  166. package/src/services/modelAdapterFactory.ts +0 -69
  167. package/src/services/notifier.ts +0 -40
  168. package/src/services/oauth.ts +0 -357
  169. package/src/services/openai.ts +0 -1359
  170. package/src/services/responseStateManager.ts +0 -90
  171. package/src/services/sentry.ts +0 -3
  172. package/src/services/statsig.ts +0 -172
  173. package/src/services/statsigStorage.ts +0 -86
  174. package/src/services/systemReminder.ts +0 -507
  175. package/src/services/vcr.ts +0 -161
  176. package/src/test/testAdapters.ts +0 -96
  177. package/src/tools/ArchitectTool/ArchitectTool.tsx +0 -135
  178. package/src/tools/ArchitectTool/prompt.ts +0 -15
  179. package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +0 -576
  180. package/src/tools/BashTool/BashTool.tsx +0 -243
  181. package/src/tools/BashTool/BashToolResultMessage.tsx +0 -38
  182. package/src/tools/BashTool/OutputLine.tsx +0 -49
  183. package/src/tools/BashTool/prompt.ts +0 -174
  184. package/src/tools/BashTool/utils.ts +0 -56
  185. package/src/tools/FileEditTool/FileEditTool.tsx +0 -319
  186. package/src/tools/FileEditTool/prompt.ts +0 -51
  187. package/src/tools/FileEditTool/utils.ts +0 -58
  188. package/src/tools/FileReadTool/FileReadTool.tsx +0 -404
  189. package/src/tools/FileReadTool/prompt.ts +0 -7
  190. package/src/tools/FileWriteTool/FileWriteTool.tsx +0 -301
  191. package/src/tools/FileWriteTool/prompt.ts +0 -10
  192. package/src/tools/GlobTool/GlobTool.tsx +0 -119
  193. package/src/tools/GlobTool/prompt.ts +0 -8
  194. package/src/tools/GrepTool/GrepTool.tsx +0 -147
  195. package/src/tools/GrepTool/prompt.ts +0 -11
  196. package/src/tools/MCPTool/MCPTool.tsx +0 -107
  197. package/src/tools/MCPTool/prompt.ts +0 -3
  198. package/src/tools/MemoryReadTool/MemoryReadTool.tsx +0 -127
  199. package/src/tools/MemoryReadTool/prompt.ts +0 -3
  200. package/src/tools/MemoryWriteTool/MemoryWriteTool.tsx +0 -89
  201. package/src/tools/MemoryWriteTool/prompt.ts +0 -3
  202. package/src/tools/MultiEditTool/MultiEditTool.tsx +0 -388
  203. package/src/tools/MultiEditTool/prompt.ts +0 -45
  204. package/src/tools/NotebookEditTool/NotebookEditTool.tsx +0 -298
  205. package/src/tools/NotebookEditTool/prompt.ts +0 -3
  206. package/src/tools/NotebookReadTool/NotebookReadTool.tsx +0 -258
  207. package/src/tools/NotebookReadTool/prompt.ts +0 -3
  208. package/src/tools/StickerRequestTool/StickerRequestTool.tsx +0 -107
  209. package/src/tools/StickerRequestTool/prompt.ts +0 -19
  210. package/src/tools/TaskTool/TaskTool.tsx +0 -438
  211. package/src/tools/TaskTool/constants.ts +0 -1
  212. package/src/tools/TaskTool/prompt.ts +0 -92
  213. package/src/tools/ThinkTool/ThinkTool.tsx +0 -54
  214. package/src/tools/ThinkTool/prompt.ts +0 -12
  215. package/src/tools/TodoWriteTool/TodoWriteTool.tsx +0 -313
  216. package/src/tools/TodoWriteTool/prompt.ts +0 -63
  217. package/src/tools/URLFetcherTool/URLFetcherTool.tsx +0 -178
  218. package/src/tools/URLFetcherTool/cache.ts +0 -55
  219. package/src/tools/URLFetcherTool/htmlToMarkdown.ts +0 -55
  220. package/src/tools/URLFetcherTool/prompt.ts +0 -17
  221. package/src/tools/WebSearchTool/WebSearchTool.tsx +0 -103
  222. package/src/tools/WebSearchTool/prompt.ts +0 -13
  223. package/src/tools/WebSearchTool/searchProviders.ts +0 -66
  224. package/src/tools/lsTool/lsTool.tsx +0 -272
  225. package/src/tools/lsTool/prompt.ts +0 -2
  226. package/src/tools.ts +0 -67
  227. package/src/types/PermissionMode.ts +0 -120
  228. package/src/types/RequestContext.ts +0 -72
  229. package/src/types/common.d.ts +0 -2
  230. package/src/types/conversation.ts +0 -51
  231. package/src/types/logs.ts +0 -58
  232. package/src/types/modelCapabilities.ts +0 -64
  233. package/src/types/notebook.ts +0 -87
  234. package/src/utils/Cursor.ts +0 -436
  235. package/src/utils/PersistentShell.ts +0 -552
  236. package/src/utils/advancedFuzzyMatcher.ts +0 -290
  237. package/src/utils/agentLoader.ts +0 -278
  238. package/src/utils/agentStorage.ts +0 -97
  239. package/src/utils/array.ts +0 -3
  240. package/src/utils/ask.tsx +0 -99
  241. package/src/utils/auth.ts +0 -13
  242. package/src/utils/autoCompactCore.ts +0 -223
  243. package/src/utils/autoUpdater.ts +0 -458
  244. package/src/utils/betas.ts +0 -20
  245. package/src/utils/browser.ts +0 -14
  246. package/src/utils/cleanup.ts +0 -72
  247. package/src/utils/commands.ts +0 -261
  248. package/src/utils/commonUnixCommands.ts +0 -161
  249. package/src/utils/config.ts +0 -945
  250. package/src/utils/conversationRecovery.ts +0 -55
  251. package/src/utils/debugLogger.ts +0 -1235
  252. package/src/utils/diff.ts +0 -42
  253. package/src/utils/env.ts +0 -57
  254. package/src/utils/errors.ts +0 -21
  255. package/src/utils/exampleCommands.ts +0 -109
  256. package/src/utils/execFileNoThrow.ts +0 -51
  257. package/src/utils/expertChatStorage.ts +0 -136
  258. package/src/utils/file.ts +0 -405
  259. package/src/utils/fileRecoveryCore.ts +0 -71
  260. package/src/utils/format.tsx +0 -44
  261. package/src/utils/fuzzyMatcher.ts +0 -328
  262. package/src/utils/generators.ts +0 -62
  263. package/src/utils/git.ts +0 -92
  264. package/src/utils/globalLogger.ts +0 -77
  265. package/src/utils/http.ts +0 -10
  266. package/src/utils/imagePaste.ts +0 -38
  267. package/src/utils/json.ts +0 -13
  268. package/src/utils/log.ts +0 -382
  269. package/src/utils/markdown.ts +0 -213
  270. package/src/utils/messageContextManager.ts +0 -294
  271. package/src/utils/messages.tsx +0 -945
  272. package/src/utils/model.ts +0 -914
  273. package/src/utils/permissions/filesystem.ts +0 -127
  274. package/src/utils/responseState.ts +0 -23
  275. package/src/utils/ripgrep.ts +0 -167
  276. package/src/utils/secureFile.ts +0 -564
  277. package/src/utils/sessionState.ts +0 -49
  278. package/src/utils/state.ts +0 -25
  279. package/src/utils/style.ts +0 -29
  280. package/src/utils/terminal.ts +0 -50
  281. package/src/utils/theme.ts +0 -127
  282. package/src/utils/thinking.ts +0 -144
  283. package/src/utils/todoStorage.ts +0 -431
  284. package/src/utils/tokens.ts +0 -43
  285. package/src/utils/toolExecutionController.ts +0 -163
  286. package/src/utils/unaryLogging.ts +0 -26
  287. package/src/utils/user.ts +0 -37
  288. package/src/utils/validate.ts +0 -165
@@ -1,3387 +0,0 @@
1
- import React, { useState, useEffect, useCallback, useRef } from 'react'
2
- import { Box, Text, useInput } from 'ink'
3
- import { getTheme } from '../utils/theme'
4
- import { Select } from './CustomSelect/select'
5
- import { Newline } from 'ink'
6
- import { getModelManager } from '../utils/model'
7
-
8
- // 共享的屏幕容器组件,避免重复边框
9
- function ScreenContainer({
10
- title,
11
- exitState,
12
- children,
13
- }: {
14
- title: string
15
- exitState: { pending: boolean; keyName: string }
16
- children: React.ReactNode
17
- }) {
18
- const theme = getTheme()
19
- return (
20
- <Box
21
- flexDirection="column"
22
- gap={1}
23
- borderStyle="round"
24
- borderColor={theme.secondaryBorder}
25
- paddingX={2}
26
- paddingY={1}
27
- >
28
- <Text bold>
29
- {title}{' '}
30
- {exitState.pending ? `(press ${exitState.keyName} again to exit)` : ''}
31
- </Text>
32
- {children}
33
- </Box>
34
- )
35
- }
36
- import { PRODUCT_NAME } from '../constants/product'
37
- import { useExitOnCtrlCD } from '../hooks/useExitOnCtrlCD'
38
- import {
39
- getGlobalConfig,
40
- saveGlobalConfig,
41
- ProviderType,
42
- ModelPointerType,
43
- setAllPointersToModel,
44
- setModelPointer,
45
- } from '../utils/config.js'
46
- import models, { providers } from '../constants/models'
47
- import TextInput from './TextInput'
48
- import OpenAI from 'openai'
49
- import chalk from 'chalk'
50
- import { fetchAnthropicModels, verifyApiKey } from '../services/claude'
51
- import { fetchCustomModels, getModelFeatures } from '../services/openai'
52
- import { testGPT5Connection, validateGPT5Config } from '../services/gpt5ConnectionTest'
53
- type Props = {
54
- onDone: () => void
55
- abortController?: AbortController
56
- targetPointer?: ModelPointerType // NEW: Target pointer for configuration
57
- isOnboarding?: boolean // NEW: Whether this is first-time setup
58
- onCancel?: () => void // NEW: Cancel callback (different from onDone)
59
- skipModelType?: boolean // NEW: Skip model type selection
60
- }
61
-
62
- type ModelInfo = {
63
- model: string
64
- provider: string
65
- [key: string]: any
66
- }
67
-
68
- // Define reasoning effort options
69
- type ReasoningEffortOption = 'low' | 'medium' | 'high'
70
-
71
- // Define context length options (in tokens)
72
- type ContextLengthOption = {
73
- label: string
74
- value: number
75
- }
76
-
77
- const CONTEXT_LENGTH_OPTIONS: ContextLengthOption[] = [
78
- { label: '32K tokens', value: 32000 },
79
- { label: '64K tokens', value: 64000 },
80
- { label: '128K tokens', value: 128000 },
81
- { label: '200K tokens', value: 200000 },
82
- { label: '256K tokens', value: 256000 },
83
- { label: '300K tokens', value: 300000 },
84
- { label: '512K tokens', value: 512000 },
85
- { label: '1000K tokens', value: 1000000 },
86
- { label: '2000K tokens', value: 2000000 },
87
- { label: '3000K tokens', value: 3000000 },
88
- { label: '5000K tokens', value: 5000000 },
89
- { label: '10000K tokens', value: 10000000 },
90
- ]
91
-
92
- const DEFAULT_CONTEXT_LENGTH = 128000
93
-
94
- // Define max tokens options
95
- type MaxTokensOption = {
96
- label: string
97
- value: number
98
- }
99
-
100
- const MAX_TOKENS_OPTIONS: MaxTokensOption[] = [
101
- { label: '1K tokens', value: 1024 },
102
- { label: '2K tokens', value: 2048 },
103
- { label: '4K tokens', value: 4096 },
104
- { label: '8K tokens (recommended)', value: 8192 },
105
- { label: '16K tokens', value: 16384 },
106
- { label: '32K tokens', value: 32768 },
107
- { label: '64K tokens', value: 65536 },
108
- { label: '128K tokens', value: 131072 },
109
- ]
110
-
111
- const DEFAULT_MAX_TOKENS = 8192
112
-
113
- // Custom hook to handle Escape key navigation
114
- function useEscapeNavigation(
115
- onEscape: () => void,
116
- abortController?: AbortController,
117
- ) {
118
- // Use a ref to track if we've handled the escape key
119
- const handledRef = useRef(false)
120
-
121
- useInput(
122
- (input, key) => {
123
- if (key.escape && !handledRef.current) {
124
- handledRef.current = true
125
- // Reset after a short delay to allow for multiple escapes
126
- setTimeout(() => {
127
- handledRef.current = false
128
- }, 100)
129
- onEscape()
130
- }
131
- },
132
- { isActive: true },
133
- )
134
- }
135
-
136
- function printModelConfig() {
137
- const config = getGlobalConfig()
138
- // Only show ModelProfile information - no legacy fields
139
- const modelProfiles = config.modelProfiles || []
140
- const activeProfiles = modelProfiles.filter(p => p.isActive)
141
-
142
- if (activeProfiles.length === 0) {
143
- console.log(chalk.gray(' ⎿ No active model profiles configured'))
144
- return
145
- }
146
-
147
- const profileSummary = activeProfiles
148
- .map(p => `${p.name} (${p.provider}: ${p.modelName})`)
149
- .join(' | ')
150
- console.log(chalk.gray(` ⎿ ${profileSummary}`))
151
- }
152
-
153
- export function ModelSelector({
154
- onDone: onDoneProp,
155
- abortController,
156
- targetPointer,
157
- isOnboarding = false,
158
- onCancel,
159
- skipModelType = false,
160
- }: Props): React.ReactNode {
161
- const config = getGlobalConfig()
162
- const theme = getTheme()
163
- const onDone = () => {
164
- printModelConfig()
165
- onDoneProp()
166
- }
167
- // Initialize the exit hook but don't use it for Escape key
168
- const exitState = useExitOnCtrlCD(() => process.exit(0))
169
-
170
- // Always start with provider selection in new system
171
- const getInitialScreen = (): string => {
172
- return 'provider'
173
- }
174
-
175
- // Screen navigation stack
176
- const [screenStack, setScreenStack] = useState<
177
- Array<
178
- | 'provider'
179
- | 'anthropicSubMenu'
180
- | 'apiKey'
181
- | 'resourceName'
182
- | 'baseUrl'
183
- | 'model'
184
- | 'modelInput'
185
- | 'modelParams'
186
- | 'contextLength'
187
- | 'connectionTest'
188
- | 'confirmation'
189
- >
190
- >([getInitialScreen()])
191
-
192
- // Current screen is always the last item in the stack
193
- const currentScreen = screenStack[screenStack.length - 1]
194
-
195
- // Function to navigate to a new screen
196
- const navigateTo = (
197
- screen:
198
- | 'provider'
199
- | 'anthropicSubMenu'
200
- | 'apiKey'
201
- | 'resourceName'
202
- | 'baseUrl'
203
- | 'model'
204
- | 'modelInput'
205
- | 'modelParams'
206
- | 'contextLength'
207
- | 'connectionTest'
208
- | 'confirmation',
209
- ) => {
210
- setScreenStack(prev => [...prev, screen])
211
- }
212
-
213
- // Function to go back to the previous screen
214
- const goBack = () => {
215
- if (screenStack.length > 1) {
216
- // Remove the current screen from the stack
217
- setScreenStack(prev => prev.slice(0, -1))
218
- } else {
219
- // If we're at the first screen, call onDone to exit
220
- onDone()
221
- }
222
- }
223
-
224
- // State for model configuration
225
- const [selectedProvider, setSelectedProvider] = useState<ProviderType>(
226
- config.primaryProvider ?? 'anthropic',
227
- )
228
-
229
- // State for Anthropic provider sub-menu
230
- const [anthropicProviderType, setAnthropicProviderType] = useState<
231
- 'official' | 'bigdream' | 'opendev' | 'custom'
232
- >('official')
233
- const [selectedModel, setSelectedModel] = useState<string>('')
234
- const [apiKey, setApiKey] = useState<string>('')
235
-
236
- // New state for model parameters
237
- const [maxTokens, setMaxTokens] = useState<string>(
238
- config.maxTokens?.toString() || DEFAULT_MAX_TOKENS.toString(),
239
- )
240
- const [maxTokensMode, setMaxTokensMode] = useState<'preset' | 'custom'>(
241
- 'preset',
242
- )
243
- const [selectedMaxTokensPreset, setSelectedMaxTokensPreset] =
244
- useState<number>(config.maxTokens || DEFAULT_MAX_TOKENS)
245
- const [reasoningEffort, setReasoningEffort] =
246
- useState<ReasoningEffortOption>('medium')
247
- const [supportsReasoningEffort, setSupportsReasoningEffort] =
248
- useState<boolean>(false)
249
-
250
- // Context length state (use default instead of legacy config)
251
- const [contextLength, setContextLength] = useState<number>(
252
- DEFAULT_CONTEXT_LENGTH,
253
- )
254
-
255
- // Form focus state
256
- const [activeFieldIndex, setActiveFieldIndex] = useState(0)
257
- const [maxTokensCursorOffset, setMaxTokensCursorOffset] = useState<number>(0)
258
-
259
- // UI state
260
-
261
- // Search and model loading state
262
- const [availableModels, setAvailableModels] = useState<ModelInfo[]>([])
263
- const [isLoadingModels, setIsLoadingModels] = useState(false)
264
- const [modelLoadError, setModelLoadError] = useState<string | null>(null)
265
- const [modelSearchQuery, setModelSearchQuery] = useState<string>('')
266
- const [modelSearchCursorOffset, setModelSearchCursorOffset] =
267
- useState<number>(0)
268
- const [cursorOffset, setCursorOffset] = useState<number>(0)
269
- const [apiKeyEdited, setApiKeyEdited] = useState<boolean>(false)
270
-
271
- // Retry logic state
272
- const [fetchRetryCount, setFetchRetryCount] = useState<number>(0)
273
- const [isRetrying, setIsRetrying] = useState<boolean>(false)
274
-
275
- // Connection test state
276
- const [isTestingConnection, setIsTestingConnection] = useState<boolean>(false)
277
- const [connectionTestResult, setConnectionTestResult] = useState<{
278
- success: boolean
279
- message: string
280
- endpoint?: string
281
- details?: string
282
- } | null>(null)
283
-
284
- // Validation error state for duplicate model detection
285
- const [validationError, setValidationError] = useState<string | null>(null)
286
-
287
- // State for Azure-specific configuration
288
- const [resourceName, setResourceName] = useState<string>('')
289
- const [resourceNameCursorOffset, setResourceNameCursorOffset] =
290
- useState<number>(0)
291
- const [customModelName, setCustomModelName] = useState<string>('')
292
- const [customModelNameCursorOffset, setCustomModelNameCursorOffset] =
293
- useState<number>(0)
294
-
295
- // State for Ollama-specific configuration
296
- const [ollamaBaseUrl, setOllamaBaseUrl] = useState<string>(
297
- 'http://localhost:11434/v1',
298
- )
299
- const [ollamaBaseUrlCursorOffset, setOllamaBaseUrlCursorOffset] =
300
- useState<number>(0)
301
-
302
- // State for custom OpenAI-compatible API configuration
303
- const [customBaseUrl, setCustomBaseUrl] = useState<string>('')
304
- const [customBaseUrlCursorOffset, setCustomBaseUrlCursorOffset] =
305
- useState<number>(0)
306
-
307
- // State for provider base URL configuration (used for all providers)
308
- const [providerBaseUrl, setProviderBaseUrl] = useState<string>('')
309
- const [providerBaseUrlCursorOffset, setProviderBaseUrlCursorOffset] =
310
- useState<number>(0)
311
-
312
- // Reasoning effort options
313
- const reasoningEffortOptions = [
314
- { label: 'Low - Faster responses, less thorough reasoning', value: 'low' },
315
- { label: 'Medium - Balanced speed and reasoning depth', value: 'medium' },
316
- {
317
- label: 'High - Slower responses, more thorough reasoning',
318
- value: 'high',
319
- },
320
- ]
321
-
322
- // Get available providers from models.ts, excluding community Claude providers (now in Anthropic sub-menu)
323
- const availableProviders = Object.keys(providers).filter(
324
- provider => provider !== 'bigdream' && provider !== 'opendev',
325
- )
326
-
327
- // Create provider options with nice labels
328
- const providerOptions = availableProviders.map(provider => {
329
- const modelCount = models[provider]?.length || 0
330
- const label = getProviderLabel(provider, modelCount)
331
- return {
332
- label,
333
- value: provider,
334
- }
335
- })
336
-
337
- useEffect(() => {
338
- if (!apiKeyEdited && selectedProvider) {
339
- if (process.env[selectedProvider.toUpperCase() + '_API_KEY']) {
340
- setApiKey(
341
- process.env[selectedProvider.toUpperCase() + '_API_KEY'] as string,
342
- )
343
- } else {
344
- setApiKey('')
345
- }
346
- }
347
- }, [selectedProvider, apiKey, apiKeyEdited])
348
-
349
- // Ensure contextLength is always set to a valid option when contextLength screen is displayed
350
- useEffect(() => {
351
- if (
352
- currentScreen === 'contextLength' &&
353
- !CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)
354
- ) {
355
- setContextLength(DEFAULT_CONTEXT_LENGTH)
356
- }
357
- }, [currentScreen, contextLength])
358
-
359
- // Create a set of model names from our constants/models.ts for the current provider
360
- const ourModelNames = new Set(
361
- (models[selectedProvider as keyof typeof models] || []).map(
362
- (model: any) => model.model,
363
- ),
364
- )
365
-
366
- // Create model options from available models, filtered by search query
367
- const filteredModels = modelSearchQuery
368
- ? availableModels.filter(model =>
369
- model.model?.toLowerCase().includes(modelSearchQuery.toLowerCase()),
370
- )
371
- : availableModels
372
-
373
- // Sort models with priority for specific keywords
374
- const sortModelsByPriority = (models: ModelInfo[]) => {
375
- const priorityKeywords = [
376
- 'claude',
377
- 'kimi',
378
- 'deepseek',
379
- 'minimax',
380
- 'o3',
381
- 'gpt',
382
- 'qwen',
383
- ]
384
-
385
- return models.sort((a, b) => {
386
- // Add safety checks for undefined model names
387
- const aModelLower = a.model?.toLowerCase() || ''
388
- const bModelLower = b.model?.toLowerCase() || ''
389
-
390
- // Check if models contain priority keywords
391
- const aHasPriority = priorityKeywords.some(keyword =>
392
- aModelLower.includes(keyword),
393
- )
394
- const bHasPriority = priorityKeywords.some(keyword =>
395
- bModelLower.includes(keyword),
396
- )
397
-
398
- // If one has priority and the other doesn't, prioritize the one with keywords
399
- if (aHasPriority && !bHasPriority) return -1
400
- if (!aHasPriority && bHasPriority) return 1
401
-
402
- // If both have priority or neither has priority, sort alphabetically
403
- return a.model.localeCompare(b.model)
404
- })
405
- }
406
-
407
- const sortedFilteredModels = sortModelsByPriority(filteredModels)
408
-
409
- const modelOptions = sortedFilteredModels.map(model => {
410
- // Check if this model is in our constants/models.ts list
411
- const isInOurModels = ourModelNames.has(model.model)
412
-
413
- return {
414
- label: `${model.model}${getModelDetails(model)}`,
415
- value: model.model,
416
- }
417
- })
418
-
419
- function getModelDetails(model: ModelInfo): string {
420
- const details = []
421
-
422
- if (model.max_tokens) {
423
- details.push(`${formatNumber(model.max_tokens)} tokens`)
424
- }
425
-
426
- if (model.supports_vision) {
427
- details.push('vision')
428
- }
429
-
430
- if (model.supports_function_calling) {
431
- details.push('tools')
432
- }
433
-
434
- return details.length > 0 ? ` (${details.join(', ')})` : ''
435
- }
436
-
437
- function formatNumber(num: number): string {
438
- if (num >= 1000000) {
439
- return `${(num / 1000000).toFixed(1)}M`
440
- } else if (num >= 1000) {
441
- return `${(num / 1000).toFixed(0)}K`
442
- }
443
- return num.toString()
444
- }
445
-
446
- function getProviderLabel(provider: string, modelCount: number): string {
447
- // Use provider names from the providers object if available
448
- if (providers[provider]) {
449
- return `${providers[provider].name} ${providers[provider].status === 'wip' ? '(WIP)' : ''} (${modelCount} models)`
450
- }
451
- return `${provider}`
452
- }
453
-
454
- function handleProviderSelection(provider: string) {
455
- const providerType = provider as ProviderType
456
- setSelectedProvider(providerType)
457
-
458
- if (provider === 'custom') {
459
- // For custom provider, save and exit
460
- saveConfiguration(providerType, selectedModel || '')
461
- onDone()
462
- } else if (provider === 'anthropic') {
463
- // For Anthropic provider, go to sub-menu to choose between official, community proxies, or custom
464
- navigateTo('anthropicSubMenu')
465
- } else {
466
- // For all other providers, go to base URL configuration first
467
- // Initialize with the default base URL for the provider
468
- const defaultBaseUrl = providers[providerType]?.baseURL || ''
469
- setProviderBaseUrl(defaultBaseUrl)
470
- navigateTo('baseUrl')
471
- }
472
- }
473
-
474
- // Local implementation of fetchAnthropicModels for UI
475
- async function fetchAnthropicModels(baseURL: string, apiKey: string) {
476
- try {
477
- const response = await fetch(`${baseURL}/v1/models`, {
478
- method: 'GET',
479
- headers: {
480
- 'x-api-key': apiKey,
481
- 'anthropic-version': '2023-06-01',
482
- 'Content-Type': 'application/json',
483
- 'Authorization': `Bearer ${apiKey}`
484
- },
485
- })
486
-
487
- if (!response.ok) {
488
- if (response.status === 401) {
489
- throw new Error(
490
- 'Invalid API key. Please check your API key and try again.',
491
- )
492
- } else if (response.status === 403) {
493
- throw new Error('API key does not have permission to access models.')
494
- } else if (response.status === 404) {
495
- throw new Error(
496
- 'API endpoint not found. This provider may not support model listing.',
497
- )
498
- } else if (response.status === 429) {
499
- throw new Error(
500
- 'Too many requests. Please wait a moment and try again.',
501
- )
502
- } else if (response.status >= 500) {
503
- throw new Error(
504
- 'API service is temporarily unavailable. Please try again later.',
505
- )
506
- } else {
507
- throw new Error(`Unable to connect to API (${response.status}).`)
508
- }
509
- }
510
-
511
- const data = await response.json()
512
-
513
- // Handle different response formats
514
- let models = []
515
- if (data && data.data && Array.isArray(data.data)) {
516
- models = data.data
517
- } else if (Array.isArray(data)) {
518
- models = data
519
- } else if (data && data.models && Array.isArray(data.models)) {
520
- models = data.models
521
- } else {
522
- throw new Error('API returned unexpected response format.')
523
- }
524
-
525
- return models
526
- } catch (error) {
527
- if (
528
- error instanceof Error &&
529
- (error.message.includes('API key') ||
530
- error.message.includes('API endpoint') ||
531
- error.message.includes('API service') ||
532
- error.message.includes('response format'))
533
- ) {
534
- throw error
535
- }
536
-
537
- if (error instanceof Error && error.message.includes('fetch')) {
538
- throw new Error(
539
- 'Unable to connect to the API. Please check the base URL and your internet connection.',
540
- )
541
- }
542
-
543
- throw new Error(
544
- 'Failed to fetch models from API. Please check your configuration and try again.',
545
- )
546
- }
547
- }
548
-
549
- // 通用的Anthropic兼容模型获取函数,实现三层降级策略
550
- async function fetchAnthropicCompatibleModelsWithFallback(
551
- baseURL: string,
552
- provider: string,
553
- apiKeyUrl: string,
554
- ) {
555
- let lastError: Error | null = null
556
-
557
- // 第一层:尝试使用 Anthropic 风格的 API
558
- try {
559
- const models = await fetchAnthropicModels(baseURL, apiKey)
560
- return models.map((model: any) => ({
561
- model: model.modelName || model.id || model.name || model.model || 'unknown',
562
- provider: provider,
563
- max_tokens: model.max_tokens || 8192,
564
- supports_vision: model.supports_vision || true,
565
- supports_function_calling: model.supports_function_calling || true,
566
- supports_reasoning_effort: false,
567
- }))
568
- } catch (error) {
569
- lastError = error as Error
570
- console.log(
571
- `Anthropic API failed for ${provider}, trying OpenAI format:`,
572
- error,
573
- )
574
- }
575
-
576
- // 第二层:尝试使用 OpenAI 风格的 API
577
- try {
578
- const models = await fetchCustomModels(baseURL, apiKey)
579
- return models.map((model: any) => ({
580
- model: model.modelName || model.id || model.name || model.model || 'unknown',
581
- provider: provider,
582
- max_tokens: model.max_tokens || 8192,
583
- supports_vision: model.supports_vision || false,
584
- supports_function_calling: model.supports_function_calling || true,
585
- supports_reasoning_effort: false,
586
- }))
587
- } catch (error) {
588
- lastError = error as Error
589
- console.log(
590
- `OpenAI API failed for ${provider}, falling back to manual input:`,
591
- error,
592
- )
593
- }
594
-
595
- // 第三层:抛出错误,触发手动输入模式
596
- let errorMessage = `Failed to fetch ${provider} models using both Anthropic and OpenAI API formats`
597
-
598
- if (lastError) {
599
- errorMessage = lastError.message
600
- }
601
-
602
- // 添加有用的建议
603
- if (errorMessage.includes('API key')) {
604
- errorMessage += `\n\n💡 Tip: Get your API key from ${apiKeyUrl}`
605
- } else if (errorMessage.includes('permission')) {
606
- errorMessage += `\n\n💡 Tip: Make sure your API key has access to the ${provider} API`
607
- } else if (errorMessage.includes('connection')) {
608
- errorMessage += '\n\n💡 Tip: Check your internet connection and try again'
609
- }
610
-
611
- setModelLoadError(errorMessage)
612
- throw new Error(errorMessage)
613
- }
614
-
615
- // 统一处理所有Anthropic兼容提供商的模型获取
616
- async function fetchAnthropicCompatibleProviderModels() {
617
- // 根据anthropicProviderType确定默认baseURL和API key获取地址
618
- let defaultBaseURL: string
619
- let apiKeyUrl: string
620
- let actualProvider: string
621
-
622
- switch (anthropicProviderType) {
623
- case 'official':
624
- defaultBaseURL = 'https://api.anthropic.com'
625
- apiKeyUrl = 'https://console.anthropic.com/settings/keys'
626
- actualProvider = 'anthropic'
627
- break
628
- case 'bigdream':
629
- defaultBaseURL = 'https://api-key.info'
630
- apiKeyUrl = 'https://api-key.info/register?aff=MSl4'
631
- actualProvider = 'bigdream'
632
- break
633
- case 'opendev':
634
- defaultBaseURL = 'https://api.openai-next.com'
635
- apiKeyUrl = 'https://api.openai-next.com/register/?aff_code=4xo7'
636
- actualProvider = 'opendev'
637
- break
638
- case 'custom':
639
- defaultBaseURL = providerBaseUrl
640
- apiKeyUrl = 'your custom API provider'
641
- actualProvider = 'anthropic'
642
- break
643
- default:
644
- throw new Error(
645
- `Unsupported Anthropic provider type: ${anthropicProviderType}`,
646
- )
647
- }
648
-
649
- const baseURL =
650
- anthropicProviderType === 'custom'
651
- ? providerBaseUrl
652
- : providerBaseUrl || defaultBaseURL
653
- return await fetchAnthropicCompatibleModelsWithFallback(
654
- baseURL,
655
- actualProvider,
656
- apiKeyUrl,
657
- )
658
- }
659
-
660
- // Remove duplicate function definitions - using unified fetchAnthropicCompatibleProviderModels instead
661
-
662
- async function fetchKimiModels() {
663
- try {
664
- const baseURL = providerBaseUrl || 'https://api.moonshot.cn/v1'
665
- const models = await fetchCustomModels(baseURL, apiKey)
666
-
667
- const kimiModels = models.map((model: any) => ({
668
- model: model.modelName || model.id || model.name || model.model || 'unknown',
669
- provider: 'kimi',
670
- max_tokens: model.max_tokens || 8192,
671
- supports_vision: false, // Default to false, could be enhanced
672
- supports_function_calling: true,
673
- supports_reasoning_effort: false,
674
- }))
675
-
676
- return kimiModels
677
- } catch (error) {
678
- let errorMessage = 'Failed to fetch Kimi models'
679
-
680
- if (error instanceof Error) {
681
- errorMessage = error.message
682
- }
683
-
684
- // Add helpful suggestions based on error type
685
- if (errorMessage.includes('API key')) {
686
- errorMessage +=
687
- '\n\n💡 Tip: Get your API key from https://platform.moonshot.cn/console/api-keys'
688
- } else if (errorMessage.includes('permission')) {
689
- errorMessage +=
690
- '\n\n💡 Tip: Make sure your API key has access to the Kimi API'
691
- } else if (errorMessage.includes('connection')) {
692
- errorMessage +=
693
- '\n\n💡 Tip: Check your internet connection and try again'
694
- }
695
-
696
- setModelLoadError(errorMessage)
697
- throw error
698
- }
699
- }
700
-
701
- async function fetchDeepSeekModels() {
702
- try {
703
- const baseURL = providerBaseUrl || 'https://api.deepseek.com'
704
- const models = await fetchCustomModels(baseURL, apiKey)
705
-
706
- const deepseekModels = models.map((model: any) => ({
707
- model: model.modelName || model.id || model.name || model.model || 'unknown',
708
- provider: 'deepseek',
709
- max_tokens: model.max_tokens || 8192,
710
- supports_vision: false, // Default to false, could be enhanced
711
- supports_function_calling: true,
712
- supports_reasoning_effort: false,
713
- }))
714
-
715
- return deepseekModels
716
- } catch (error) {
717
- let errorMessage = 'Failed to fetch DeepSeek models'
718
-
719
- if (error instanceof Error) {
720
- errorMessage = error.message
721
- }
722
-
723
- // Add helpful suggestions based on error type
724
- if (errorMessage.includes('API key')) {
725
- errorMessage +=
726
- '\n\n💡 Tip: Get your API key from https://platform.deepseek.com/api_keys'
727
- } else if (errorMessage.includes('permission')) {
728
- errorMessage +=
729
- '\n\n💡 Tip: Make sure your API key has access to the DeepSeek API'
730
- } else if (errorMessage.includes('connection')) {
731
- errorMessage +=
732
- '\n\n💡 Tip: Check your internet connection and try again'
733
- }
734
-
735
- setModelLoadError(errorMessage)
736
- throw error
737
- }
738
- }
739
-
740
- async function fetchSiliconFlowModels() {
741
- try {
742
- const baseURL = providerBaseUrl || 'https://api.siliconflow.cn/v1'
743
- const models = await fetchCustomModels(baseURL, apiKey)
744
-
745
- const siliconflowModels = models.map((model: any) => ({
746
- model: model.modelName || model.id || model.name || model.model || 'unknown',
747
- provider: 'siliconflow',
748
- max_tokens: model.max_tokens || 8192,
749
- supports_vision: false, // Default to false, could be enhanced
750
- supports_function_calling: true,
751
- supports_reasoning_effort: false,
752
- }))
753
-
754
- return siliconflowModels
755
- } catch (error) {
756
- let errorMessage = 'Failed to fetch SiliconFlow models'
757
-
758
- if (error instanceof Error) {
759
- errorMessage = error.message
760
- }
761
-
762
- // Add helpful suggestions based on error type
763
- if (errorMessage.includes('API key')) {
764
- errorMessage +=
765
- '\n\n💡 Tip: Get your API key from https://cloud.siliconflow.cn/i/oJWsm6io'
766
- } else if (errorMessage.includes('permission')) {
767
- errorMessage +=
768
- '\n\n💡 Tip: Make sure your API key has access to the SiliconFlow API'
769
- } else if (errorMessage.includes('connection')) {
770
- errorMessage +=
771
- '\n\n💡 Tip: Check your internet connection and try again'
772
- }
773
-
774
- setModelLoadError(errorMessage)
775
- throw error
776
- }
777
- }
778
-
779
- async function fetchQwenModels() {
780
- try {
781
- const baseURL =
782
- providerBaseUrl || 'https://dashscope.aliyuncs.com/compatible-mode/v1'
783
- const models = await fetchCustomModels(baseURL, apiKey)
784
-
785
- const qwenModels = models.map((model: any) => ({
786
- model: model.modelName || model.id || model.name || model.model || 'unknown',
787
- provider: 'qwen',
788
- max_tokens: model.max_tokens || 8192,
789
- supports_vision: false,
790
- supports_function_calling: true,
791
- supports_reasoning_effort: false,
792
- }))
793
-
794
- return qwenModels
795
- } catch (error) {
796
- let errorMessage = 'Failed to fetch Qwen models'
797
-
798
- if (error instanceof Error) {
799
- errorMessage = error.message
800
- }
801
-
802
- if (errorMessage.includes('API key')) {
803
- errorMessage +=
804
- '\n\n💡 Tip: Get your API key from https://bailian.console.aliyun.com/?tab=model#/api-key'
805
- } else if (errorMessage.includes('permission')) {
806
- errorMessage +=
807
- '\n\n💡 Tip: Make sure your API key has access to the Qwen API'
808
- } else if (errorMessage.includes('connection')) {
809
- errorMessage +=
810
- '\n\n💡 Tip: Check your internet connection and try again'
811
- }
812
-
813
- setModelLoadError(errorMessage)
814
- throw error
815
- }
816
- }
817
-
818
- async function fetchGLMModels() {
819
- try {
820
- const baseURL = providerBaseUrl || 'https://open.bigmodel.cn/api/paas/v4'
821
- const models = await fetchCustomModels(baseURL, apiKey)
822
-
823
- const glmModels = models.map((model: any) => ({
824
- model: model.modelName || model.id || model.name || model.model || 'unknown',
825
- provider: 'glm',
826
- max_tokens: model.max_tokens || 8192,
827
- supports_vision: false,
828
- supports_function_calling: true,
829
- supports_reasoning_effort: false,
830
- }))
831
-
832
- return glmModels
833
- } catch (error) {
834
- let errorMessage = 'Failed to fetch GLM models'
835
-
836
- if (error instanceof Error) {
837
- errorMessage = error.message
838
- }
839
-
840
- if (errorMessage.includes('API key')) {
841
- errorMessage +=
842
- '\n\n💡 Tip: Get your API key from https://open.bigmodel.cn (API Keys section)'
843
- } else if (errorMessage.includes('permission')) {
844
- errorMessage +=
845
- '\n\n💡 Tip: Make sure your API key has access to the GLM API'
846
- } else if (errorMessage.includes('connection')) {
847
- errorMessage +=
848
- '\n\n💡 Tip: Check your internet connection and try again'
849
- }
850
-
851
- setModelLoadError(errorMessage)
852
- throw error
853
- }
854
- }
855
-
856
- async function fetchMinimaxModels() {
857
- try {
858
- const baseURL = providerBaseUrl || 'https://api.minimaxi.com/v1'
859
- const models = await fetchCustomModels(baseURL, apiKey)
860
-
861
- const minimaxModels = models.map((model: any) => ({
862
- model: model.modelName || model.id || model.name || model.model || 'unknown',
863
- provider: 'minimax',
864
- max_tokens: model.max_tokens || 8192,
865
- supports_vision: false,
866
- supports_function_calling: true,
867
- supports_reasoning_effort: false,
868
- }))
869
-
870
- return minimaxModels
871
- } catch (error) {
872
- let errorMessage = 'Failed to fetch MiniMax models'
873
-
874
- if (error instanceof Error) {
875
- errorMessage = error.message
876
- }
877
-
878
- if (errorMessage.includes('API key')) {
879
- errorMessage +=
880
- '\n\n💡 Tip: Get your API key from https://www.minimax.io/platform/user-center/basic-information'
881
- } else if (errorMessage.includes('permission')) {
882
- errorMessage +=
883
- '\n\n💡 Tip: Make sure your API key has access to the MiniMax API'
884
- } else if (errorMessage.includes('connection')) {
885
- errorMessage +=
886
- '\n\n💡 Tip: Check your internet connection and try again'
887
- }
888
-
889
- setModelLoadError(errorMessage)
890
- throw error
891
- }
892
- }
893
-
894
- async function fetchBaiduQianfanModels() {
895
- try {
896
- const baseURL = providerBaseUrl || 'https://qianfan.baidubce.com/v2'
897
- const models = await fetchCustomModels(baseURL, apiKey)
898
-
899
- const baiduModels = models.map((model: any) => ({
900
- model: model.modelName || model.id || model.name || model.model || 'unknown',
901
- provider: 'baidu-qianfan',
902
- max_tokens: model.max_tokens || 8192,
903
- supports_vision: false,
904
- supports_function_calling: true,
905
- supports_reasoning_effort: false,
906
- }))
907
-
908
- return baiduModels
909
- } catch (error) {
910
- let errorMessage = 'Failed to fetch Baidu Qianfan models'
911
-
912
- if (error instanceof Error) {
913
- errorMessage = error.message
914
- }
915
-
916
- if (errorMessage.includes('API key')) {
917
- errorMessage +=
918
- '\n\n💡 Tip: Get your API key from https://console.bce.baidu.com/iam/#/iam/accesslist'
919
- } else if (errorMessage.includes('permission')) {
920
- errorMessage +=
921
- '\n\n💡 Tip: Make sure your API key has access to the Baidu Qianfan API'
922
- } else if (errorMessage.includes('connection')) {
923
- errorMessage +=
924
- '\n\n💡 Tip: Check your internet connection and try again'
925
- }
926
-
927
- setModelLoadError(errorMessage)
928
- throw error
929
- }
930
- }
931
-
932
- async function fetchCustomOpenAIModels() {
933
- try {
934
- const models = await fetchCustomModels(customBaseUrl, apiKey)
935
-
936
- const customModels = models.map((model: any) => ({
937
- model: model.modelName || model.id || model.name || model.model || 'unknown',
938
- provider: 'custom-openai',
939
- max_tokens: model.max_tokens || 4096,
940
- supports_vision: false, // Default to false, could be enhanced
941
- supports_function_calling: true,
942
- supports_reasoning_effort: false,
943
- }))
944
-
945
- return customModels
946
- } catch (error) {
947
- let errorMessage = 'Failed to fetch custom API models'
948
-
949
- if (error instanceof Error) {
950
- errorMessage = error.message
951
- }
952
-
953
- // Add helpful suggestions based on error type
954
- if (errorMessage.includes('API key')) {
955
- errorMessage +=
956
- '\n\n💡 Tip: Check that your API key is valid for this endpoint'
957
- } else if (errorMessage.includes('endpoint not found')) {
958
- errorMessage +=
959
- '\n\n💡 Tip: Make sure the base URL ends with /v1 and supports OpenAI-compatible API'
960
- } else if (errorMessage.includes('connect')) {
961
- errorMessage +=
962
- '\n\n💡 Tip: Verify the base URL is correct and accessible'
963
- } else if (errorMessage.includes('response format')) {
964
- errorMessage +=
965
- '\n\n💡 Tip: This API may not be fully OpenAI-compatible'
966
- }
967
-
968
- setModelLoadError(errorMessage)
969
- throw error
970
- }
971
- }
972
-
973
- async function fetchGeminiModels() {
974
- try {
975
- const response = await fetch(
976
- `https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`,
977
- )
978
-
979
- if (!response.ok) {
980
- const errorData = await response.json()
981
- throw new Error(
982
- errorData.error?.message || `API error: ${response.status}`,
983
- )
984
- }
985
-
986
- const { models } = await response.json()
987
-
988
- const geminiModels = models
989
- .filter((model: any) =>
990
- model.supportedGenerationMethods.includes('generateContent'),
991
- )
992
- .map((model: any) => ({
993
- model: model.name.replace('models/', ''),
994
- provider: 'gemini',
995
- max_tokens: model.outputTokenLimit,
996
- supports_vision:
997
- model.supportedGenerationMethods.includes('generateContent'),
998
- supports_function_calling:
999
- model.supportedGenerationMethods.includes('generateContent'),
1000
- }))
1001
-
1002
- return geminiModels
1003
- } catch (error) {
1004
- setModelLoadError(
1005
- error instanceof Error ? error.message : 'Unknown error',
1006
- )
1007
- throw error
1008
- }
1009
- }
1010
-
1011
- async function fetchOllamaModels() {
1012
- try {
1013
- const response = await fetch(`${ollamaBaseUrl}/models`)
1014
-
1015
- if (!response.ok) {
1016
- throw new Error(`HTTP error ${response.status}: ${response.statusText}`)
1017
- }
1018
-
1019
- const responseData = await response.json()
1020
-
1021
- // Properly handle Ollama API response format
1022
- // Ollama API can return models in different formats based on version
1023
- let models = []
1024
-
1025
- // Check if data field exists (newer Ollama versions)
1026
- if (responseData.data && Array.isArray(responseData.data)) {
1027
- models = responseData.data
1028
- }
1029
- // Check if models array is directly at the root (older Ollama versions)
1030
- else if (Array.isArray(responseData.models)) {
1031
- models = responseData.models
1032
- }
1033
- // If response is already an array
1034
- else if (Array.isArray(responseData)) {
1035
- models = responseData
1036
- } else {
1037
- throw new Error(
1038
- 'Invalid response from Ollama API: missing models array',
1039
- )
1040
- }
1041
-
1042
- // Transform Ollama models to our format
1043
- const ollamaModels = models.map((model: any) => ({
1044
- model:
1045
- model.name ??
1046
- model.modelName ??
1047
- (typeof model === 'string' ? model : ''),
1048
- provider: 'ollama',
1049
- max_tokens: 4096, // Default value
1050
- supports_vision: false,
1051
- supports_function_calling: true,
1052
- supports_reasoning_effort: false,
1053
- }))
1054
-
1055
- // Filter out models with empty names
1056
- const validModels = ollamaModels.filter(model => model.model)
1057
-
1058
- setAvailableModels(validModels)
1059
-
1060
- // Only navigate if we have models
1061
- if (validModels.length > 0) {
1062
- navigateTo('model')
1063
- } else {
1064
- setModelLoadError('No models found in your Ollama installation')
1065
- }
1066
-
1067
- return validModels
1068
- } catch (error) {
1069
- const errorMessage =
1070
- error instanceof Error ? error.message : String(error)
1071
-
1072
- if (errorMessage.includes('fetch')) {
1073
- setModelLoadError(
1074
- `Could not connect to Ollama server at ${ollamaBaseUrl}. Make sure Ollama is running and the URL is correct.`,
1075
- )
1076
- } else {
1077
- setModelLoadError(`Error loading Ollama models: ${errorMessage}`)
1078
- }
1079
-
1080
- console.error('Error fetching Ollama models:', error)
1081
- return []
1082
- }
1083
- }
1084
-
1085
- async function fetchModelsWithRetry() {
1086
- const MAX_RETRIES = 2
1087
- let lastError: Error | null = null
1088
-
1089
- for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
1090
- setFetchRetryCount(attempt)
1091
- setIsRetrying(attempt > 1)
1092
-
1093
- if (attempt > 1) {
1094
- // Show retry message
1095
- setModelLoadError(
1096
- `Attempt ${attempt}/${MAX_RETRIES}: Retrying model discovery...`,
1097
- )
1098
- // Wait 1 second before retrying
1099
- await new Promise(resolve => setTimeout(resolve, 1000))
1100
- }
1101
-
1102
- try {
1103
- const models = await fetchModels()
1104
- // Success! Reset retry state and return models
1105
- setFetchRetryCount(0)
1106
- setIsRetrying(false)
1107
- setModelLoadError(null)
1108
- return models
1109
- } catch (error) {
1110
- lastError = error instanceof Error ? error : new Error(String(error))
1111
- console.log(`Model fetch attempt ${attempt} failed:`, lastError.message)
1112
-
1113
- if (attempt === MAX_RETRIES) {
1114
- // Final attempt failed, break to handle fallback
1115
- break
1116
- }
1117
- }
1118
- }
1119
-
1120
- // All retries failed, handle fallback to manual input
1121
- setIsRetrying(false)
1122
- const errorMessage = lastError?.message || 'Unknown error'
1123
-
1124
- // Check if provider supports manual input fallback
1125
- const supportsManualInput = [
1126
- 'anthropic',
1127
- 'kimi',
1128
- 'deepseek',
1129
- 'siliconflow',
1130
- 'qwen',
1131
- 'glm',
1132
- 'minimax',
1133
- 'baidu-qianfan',
1134
- 'custom-openai',
1135
- ].includes(selectedProvider)
1136
-
1137
- if (supportsManualInput) {
1138
- setModelLoadError(
1139
- `Failed to auto-discover models after ${MAX_RETRIES} attempts: ${errorMessage}\n\n⚡ Automatically switching to manual model configuration...`,
1140
- )
1141
-
1142
- // Automatically switch to manual input after 2 seconds
1143
- setTimeout(() => {
1144
- setModelLoadError(null)
1145
- navigateTo('modelInput')
1146
- }, 2000)
1147
- } else {
1148
- setModelLoadError(
1149
- `Failed to load models after ${MAX_RETRIES} attempts: ${errorMessage}`,
1150
- )
1151
- }
1152
-
1153
- return []
1154
- }
1155
-
1156
- async function fetchModels() {
1157
- setIsLoadingModels(true)
1158
- setModelLoadError(null)
1159
-
1160
- try {
1161
- // For Anthropic provider (including official and community proxies via sub-menu), use the same logic
1162
- if (selectedProvider === 'anthropic') {
1163
- const anthropicModels = await fetchAnthropicCompatibleProviderModels()
1164
- setAvailableModels(anthropicModels)
1165
- navigateTo('model')
1166
- return anthropicModels
1167
- }
1168
-
1169
- // For custom OpenAI-compatible APIs, use the fetchCustomOpenAIModels function
1170
- if (selectedProvider === 'custom-openai') {
1171
- const customModels = await fetchCustomOpenAIModels()
1172
- setAvailableModels(customModels)
1173
- navigateTo('model')
1174
- return customModels
1175
- }
1176
-
1177
- // For Gemini, use the separate fetchGeminiModels function
1178
- if (selectedProvider === 'gemini') {
1179
- const geminiModels = await fetchGeminiModels()
1180
- setAvailableModels(geminiModels)
1181
- navigateTo('model')
1182
- return geminiModels
1183
- }
1184
-
1185
- // For Kimi, use the fetchKimiModels function
1186
- if (selectedProvider === 'kimi') {
1187
- const kimiModels = await fetchKimiModels()
1188
- setAvailableModels(kimiModels)
1189
- navigateTo('model')
1190
- return kimiModels
1191
- }
1192
-
1193
- // For DeepSeek, use the fetchDeepSeekModels function
1194
- if (selectedProvider === 'deepseek') {
1195
- const deepseekModels = await fetchDeepSeekModels()
1196
- setAvailableModels(deepseekModels)
1197
- navigateTo('model')
1198
- return deepseekModels
1199
- }
1200
-
1201
- // For SiliconFlow, use the fetchSiliconFlowModels function
1202
- if (selectedProvider === 'siliconflow') {
1203
- const siliconflowModels = await fetchSiliconFlowModels()
1204
- setAvailableModels(siliconflowModels)
1205
- navigateTo('model')
1206
- return siliconflowModels
1207
- }
1208
-
1209
- // For Qwen, use the fetchQwenModels function
1210
- if (selectedProvider === 'qwen') {
1211
- const qwenModels = await fetchQwenModels()
1212
- setAvailableModels(qwenModels)
1213
- navigateTo('model')
1214
- return qwenModels
1215
- }
1216
-
1217
- // For GLM, use the fetchGLMModels function
1218
- if (selectedProvider === 'glm') {
1219
- const glmModels = await fetchGLMModels()
1220
- setAvailableModels(glmModels)
1221
- navigateTo('model')
1222
- return glmModels
1223
- }
1224
-
1225
- // For Baidu Qianfan, use the fetchBaiduQianfanModels function
1226
- if (selectedProvider === 'baidu-qianfan') {
1227
- const baiduModels = await fetchBaiduQianfanModels()
1228
- setAvailableModels(baiduModels)
1229
- navigateTo('model')
1230
- return baiduModels
1231
- }
1232
-
1233
- // For Azure, skip model fetching and go directly to model input
1234
- if (selectedProvider === 'azure') {
1235
- navigateTo('modelInput')
1236
- return []
1237
- }
1238
-
1239
- // For all other providers, use the OpenAI client
1240
- let baseURL = providerBaseUrl || providers[selectedProvider]?.baseURL
1241
-
1242
- // For custom-openai provider, use the custom base URL
1243
- if (selectedProvider === 'custom-openai') {
1244
- baseURL = customBaseUrl
1245
- }
1246
-
1247
- const openai = new OpenAI({
1248
- apiKey: apiKey || 'dummy-key-for-ollama', // Ollama doesn't need a real key
1249
- baseURL: baseURL,
1250
- dangerouslyAllowBrowser: true,
1251
- })
1252
-
1253
- // Fetch the models
1254
- const response = await openai.models.list()
1255
-
1256
- // Transform the response into our ModelInfo format
1257
- const fetchedModels = []
1258
- for (const model of response.data) {
1259
- const modelName = (model as any).modelName || (model as any).id || (model as any).name || (model as any).model || 'unknown'
1260
- const modelInfo = models[selectedProvider as keyof typeof models]?.find(
1261
- m => m.model === modelName,
1262
- )
1263
- fetchedModels.push({
1264
- model: modelName,
1265
- provider: selectedProvider,
1266
- max_tokens: modelInfo?.max_output_tokens,
1267
- supports_vision: modelInfo?.supports_vision || false,
1268
- supports_function_calling:
1269
- modelInfo?.supports_function_calling || false,
1270
- supports_reasoning_effort:
1271
- modelInfo?.supports_reasoning_effort || false,
1272
- })
1273
- }
1274
-
1275
- setAvailableModels(fetchedModels)
1276
-
1277
- // Navigate to model selection screen if models were loaded successfully
1278
- navigateTo('model')
1279
-
1280
- return fetchedModels
1281
- } catch (error) {
1282
- // Log for debugging
1283
- console.error('Error fetching models:', error)
1284
-
1285
- // Re-throw the error so that fetchModelsWithRetry can handle it properly
1286
- throw error
1287
- } finally {
1288
- setIsLoadingModels(false)
1289
- }
1290
- }
1291
-
1292
- function handleApiKeySubmit(key: string) {
1293
- setApiKey(key)
1294
-
1295
- // For Azure, go to resource name input next
1296
- if (selectedProvider === 'azure') {
1297
- navigateTo('resourceName')
1298
- return
1299
- }
1300
-
1301
- // Fetch models with the provided API key
1302
- fetchModelsWithRetry().catch(error => {
1303
- // The retry logic in fetchModelsWithRetry already handles the error display
1304
- // This catch is just to prevent unhandled promise rejection
1305
- console.error('Final error after retries:', error)
1306
- })
1307
- }
1308
-
1309
- function handleResourceNameSubmit(name: string) {
1310
- setResourceName(name)
1311
- navigateTo('modelInput')
1312
- }
1313
-
1314
- function handleOllamaBaseUrlSubmit(url: string) {
1315
- setOllamaBaseUrl(url)
1316
- setIsLoadingModels(true)
1317
- setModelLoadError(null)
1318
-
1319
- // Use the dedicated Ollama model fetch function
1320
- fetchOllamaModels().finally(() => {
1321
- setIsLoadingModels(false)
1322
- })
1323
- }
1324
-
1325
- function handleCustomBaseUrlSubmit(url: string) {
1326
- // Automatically remove trailing slash from baseURL
1327
- const cleanUrl = url.replace(/\/+$/, '')
1328
- setCustomBaseUrl(cleanUrl)
1329
- // After setting custom base URL, go to API key input
1330
- navigateTo('apiKey')
1331
- }
1332
-
1333
- function handleProviderBaseUrlSubmit(url: string) {
1334
- // Automatically remove trailing slash from baseURL
1335
- const cleanUrl = url.replace(/\/+$/, '')
1336
- setProviderBaseUrl(cleanUrl)
1337
-
1338
- // For Ollama, handle differently - it tries to fetch models immediately
1339
- if (selectedProvider === 'ollama') {
1340
- setOllamaBaseUrl(cleanUrl)
1341
- setIsLoadingModels(true)
1342
- setModelLoadError(null)
1343
-
1344
- // Use the dedicated Ollama model fetch function
1345
- fetchOllamaModels().finally(() => {
1346
- setIsLoadingModels(false)
1347
- })
1348
- } else {
1349
- // For all other providers, go to API key input next
1350
- navigateTo('apiKey')
1351
- }
1352
- }
1353
-
1354
- function handleAnthropicProviderSelection(
1355
- providerType: 'official' | 'bigdream' | 'custom',
1356
- ) {
1357
- setAnthropicProviderType(providerType)
1358
-
1359
- if (providerType === 'custom') {
1360
- // For custom Anthropic provider, go to base URL configuration
1361
- setProviderBaseUrl('')
1362
- navigateTo('baseUrl')
1363
- } else {
1364
- // For official/community proxy providers, set default base URL and go to API key
1365
- const defaultUrls = {
1366
- official: 'https://api.anthropic.com',
1367
- bigdream: 'https://api-key.info',
1368
- opendev: 'https://api.openai-next.com',
1369
- }
1370
- setProviderBaseUrl(defaultUrls[providerType])
1371
- navigateTo('apiKey')
1372
- }
1373
- }
1374
-
1375
- function handleCustomModelSubmit(model: string) {
1376
- setCustomModelName(model)
1377
- setSelectedModel(model)
1378
-
1379
- // No model info available, so set default values
1380
- setSupportsReasoningEffort(false)
1381
- setReasoningEffort(null)
1382
-
1383
- // Use default max tokens for manually entered models
1384
- setMaxTokensMode('preset')
1385
- setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1386
- setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1387
- setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1388
-
1389
- // Go to model parameters screen
1390
- navigateTo('modelParams')
1391
- // Reset active field index
1392
- setActiveFieldIndex(0)
1393
- }
1394
-
1395
- function handleModelSelection(model: string) {
1396
- setSelectedModel(model)
1397
-
1398
- // Check if the selected model supports reasoning_effort
1399
- const modelInfo = availableModels.find(m => m.model === model)
1400
- setSupportsReasoningEffort(modelInfo?.supports_reasoning_effort || false)
1401
-
1402
- if (!modelInfo?.supports_reasoning_effort) {
1403
- setReasoningEffort(null)
1404
- }
1405
-
1406
- // Set max tokens based on model info or default
1407
- if (modelInfo?.max_tokens) {
1408
- const modelMaxTokens = modelInfo.max_tokens
1409
- // Check if the model's max tokens matches any of our presets
1410
- const matchingPreset = MAX_TOKENS_OPTIONS.find(
1411
- option => option.value === modelMaxTokens,
1412
- )
1413
-
1414
- if (matchingPreset) {
1415
- setMaxTokensMode('preset')
1416
- setSelectedMaxTokensPreset(modelMaxTokens)
1417
- setMaxTokens(modelMaxTokens.toString())
1418
- } else {
1419
- setMaxTokensMode('custom')
1420
- setMaxTokens(modelMaxTokens.toString())
1421
- }
1422
- setMaxTokensCursorOffset(modelMaxTokens.toString().length)
1423
- } else {
1424
- // No model-specific max tokens, use default
1425
- setMaxTokensMode('preset')
1426
- setSelectedMaxTokensPreset(DEFAULT_MAX_TOKENS)
1427
- setMaxTokens(DEFAULT_MAX_TOKENS.toString())
1428
- setMaxTokensCursorOffset(DEFAULT_MAX_TOKENS.toString().length)
1429
- }
1430
-
1431
- // Go to model parameters screen
1432
- navigateTo('modelParams')
1433
- // Reset active field index
1434
- setActiveFieldIndex(0)
1435
- }
1436
-
1437
- const handleModelParamsSubmit = () => {
1438
- // Values are already in state, no need to extract from form
1439
- // Ensure contextLength is set to a valid option before navigating
1440
- if (!CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength)) {
1441
- setContextLength(DEFAULT_CONTEXT_LENGTH)
1442
- }
1443
- // Navigate to context length screen
1444
- navigateTo('contextLength')
1445
- }
1446
-
1447
- async function testConnection(): Promise<{
1448
- success: boolean
1449
- message: string
1450
- endpoint?: string
1451
- details?: string
1452
- }> {
1453
- setIsTestingConnection(true)
1454
- setConnectionTestResult(null)
1455
-
1456
- try {
1457
- // Determine the base URL to test
1458
- let testBaseURL =
1459
- providerBaseUrl || providers[selectedProvider]?.baseURL || ''
1460
-
1461
- if (selectedProvider === 'azure') {
1462
- testBaseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${selectedModel}`
1463
- } else if (selectedProvider === 'custom-openai') {
1464
- testBaseURL = customBaseUrl
1465
- }
1466
-
1467
- // For OpenAI-compatible providers, try multiple endpoints in order of preference
1468
- const isOpenAICompatible = [
1469
- 'minimax',
1470
- 'kimi',
1471
- 'deepseek',
1472
- 'siliconflow',
1473
- 'qwen',
1474
- 'glm',
1475
- 'baidu-qianfan',
1476
- 'openai',
1477
- 'mistral',
1478
- 'xai',
1479
- 'groq',
1480
- 'custom-openai',
1481
- ].includes(selectedProvider)
1482
-
1483
- if (isOpenAICompatible) {
1484
- // 🔥 Use specialized GPT-5 connection test for GPT-5 models
1485
- const isGPT5 = selectedModel?.toLowerCase().includes('gpt-5')
1486
-
1487
- if (isGPT5) {
1488
- console.log(`🚀 Using specialized GPT-5 connection test for model: ${selectedModel}`)
1489
-
1490
- // Validate configuration first
1491
- const configValidation = validateGPT5Config({
1492
- model: selectedModel,
1493
- apiKey: apiKey,
1494
- baseURL: testBaseURL,
1495
- maxTokens: parseInt(maxTokens) || 8192,
1496
- provider: selectedProvider,
1497
- })
1498
-
1499
- if (!configValidation.valid) {
1500
- return {
1501
- success: false,
1502
- message: '❌ GPT-5 configuration validation failed',
1503
- details: configValidation.errors.join('\n'),
1504
- }
1505
- }
1506
-
1507
- // Use specialized GPT-5 test service
1508
- const gpt5Result = await testGPT5Connection({
1509
- model: selectedModel,
1510
- apiKey: apiKey,
1511
- baseURL: testBaseURL,
1512
- maxTokens: parseInt(maxTokens) || 8192,
1513
- provider: selectedProvider,
1514
- })
1515
-
1516
- return gpt5Result
1517
- }
1518
-
1519
- // For non-GPT-5 OpenAI-compatible models, use existing logic
1520
- const endpointsToTry = []
1521
-
1522
- if (selectedProvider === 'minimax') {
1523
- endpointsToTry.push(
1524
- {
1525
- path: '/text/chatcompletion_v2',
1526
- name: 'MiniMax v2 (recommended)',
1527
- },
1528
- { path: '/chat/completions', name: 'Standard OpenAI' },
1529
- )
1530
- } else {
1531
- endpointsToTry.push({
1532
- path: '/chat/completions',
1533
- name: 'Standard OpenAI',
1534
- })
1535
- }
1536
-
1537
- let lastError = null
1538
- for (const endpoint of endpointsToTry) {
1539
- try {
1540
- const testResult = await testChatEndpoint(
1541
- testBaseURL,
1542
- endpoint.path,
1543
- endpoint.name,
1544
- )
1545
-
1546
- if (testResult.success) {
1547
- return testResult
1548
- }
1549
- lastError = testResult
1550
- } catch (error) {
1551
- lastError = {
1552
- success: false,
1553
- message: `Failed to test ${endpoint.name}`,
1554
- endpoint: endpoint.path,
1555
- details: error instanceof Error ? error.message : String(error),
1556
- }
1557
- }
1558
- }
1559
-
1560
- return (
1561
- lastError || {
1562
- success: false,
1563
- message: 'All endpoints failed',
1564
- details: 'No endpoints could be reached',
1565
- }
1566
- )
1567
- } else {
1568
- // For non-OpenAI providers (like Anthropic, Gemini), use different test approach
1569
- return await testProviderSpecificEndpoint(testBaseURL)
1570
- }
1571
- } catch (error) {
1572
- return {
1573
- success: false,
1574
- message: 'Connection test failed',
1575
- details: error instanceof Error ? error.message : String(error),
1576
- }
1577
- } finally {
1578
- setIsTestingConnection(false)
1579
- }
1580
- }
1581
-
1582
- async function testChatEndpoint(
1583
- baseURL: string,
1584
- endpointPath: string,
1585
- endpointName: string,
1586
- ): Promise<{
1587
- success: boolean
1588
- message: string
1589
- endpoint?: string
1590
- details?: string
1591
- }> {
1592
- const testURL = `${baseURL.replace(/\/+$/, '')}${endpointPath}`
1593
-
1594
- // Create a test message that expects a specific response
1595
- const testPayload: any = {
1596
- model: selectedModel,
1597
- messages: [
1598
- {
1599
- role: 'user',
1600
- content:
1601
- 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.',
1602
- },
1603
- ],
1604
- max_tokens: Math.max(parseInt(maxTokens) || 8192, 8192), // Ensure minimum 8192 tokens for connection test
1605
- temperature: 0,
1606
- stream: false,
1607
- }
1608
-
1609
- // GPT-5 parameter compatibility fix
1610
- if (selectedModel && selectedModel.toLowerCase().includes('gpt-5')) {
1611
- console.log(`Applying GPT-5 parameter fix for model: ${selectedModel}`)
1612
-
1613
- // GPT-5 requires max_completion_tokens instead of max_tokens
1614
- if (testPayload.max_tokens) {
1615
- testPayload.max_completion_tokens = testPayload.max_tokens
1616
- delete testPayload.max_tokens
1617
- console.log(`Transformed max_tokens → max_completion_tokens: ${testPayload.max_completion_tokens}`)
1618
- }
1619
-
1620
- // GPT-5 temperature handling - ensure it's 1 or undefined
1621
- if (testPayload.temperature !== undefined && testPayload.temperature !== 1) {
1622
- console.log(`Adjusting temperature from ${testPayload.temperature} to 1 for GPT-5`)
1623
- testPayload.temperature = 1
1624
- }
1625
- }
1626
-
1627
- const headers: Record<string, string> = {
1628
- 'Content-Type': 'application/json',
1629
- }
1630
-
1631
- // Add authorization headers
1632
- if (selectedProvider === 'azure') {
1633
- headers['api-key'] = apiKey
1634
- } else {
1635
- headers['Authorization'] = `Bearer ${apiKey}`
1636
- }
1637
-
1638
- try {
1639
- const response = await fetch(testURL, {
1640
- method: 'POST',
1641
- headers,
1642
- body: JSON.stringify(testPayload),
1643
- })
1644
-
1645
- if (response.ok) {
1646
- const data = await response.json()
1647
- console.log(
1648
- '[DEBUG] Connection test response:',
1649
- JSON.stringify(data, null, 2),
1650
- )
1651
-
1652
- // Check if we got a valid response with content
1653
- let responseContent = ''
1654
-
1655
- if (data.choices && data.choices.length > 0) {
1656
- responseContent = data.choices[0]?.message?.content || ''
1657
- } else if (data.reply) {
1658
- // Handle MiniMax format
1659
- responseContent = data.reply
1660
- } else if (data.output) {
1661
- // Handle other formats
1662
- responseContent = data.output?.text || data.output || ''
1663
- }
1664
-
1665
- console.log('[DEBUG] Extracted response content:', responseContent)
1666
-
1667
- // Check if response contains "YES" (case insensitive)
1668
- const containsYes = responseContent.toLowerCase().includes('yes')
1669
-
1670
- if (containsYes) {
1671
- return {
1672
- success: true,
1673
- message: `✅ Connection test passed with ${endpointName}`,
1674
- endpoint: endpointPath,
1675
- details: `Model responded correctly: "${responseContent.trim()}"`,
1676
- }
1677
- } else {
1678
- return {
1679
- success: false,
1680
- message: `⚠️ ${endpointName} connected but model response unexpected`,
1681
- endpoint: endpointPath,
1682
- details: `Expected "YES" but got: "${responseContent.trim() || '(empty response)'}"`,
1683
- }
1684
- }
1685
- } else {
1686
- const errorData = await response.json().catch(() => null)
1687
- const errorMessage =
1688
- errorData?.error?.message || errorData?.message || response.statusText
1689
-
1690
- return {
1691
- success: false,
1692
- message: `❌ ${endpointName} failed (${response.status})`,
1693
- endpoint: endpointPath,
1694
- details: `Error: ${errorMessage}`,
1695
- }
1696
- }
1697
- } catch (error) {
1698
- return {
1699
- success: false,
1700
- message: `❌ ${endpointName} connection failed`,
1701
- endpoint: endpointPath,
1702
- details: error instanceof Error ? error.message : String(error),
1703
- }
1704
- }
1705
- }
1706
-
1707
- async function testResponsesEndpoint(
1708
- baseURL: string,
1709
- endpointPath: string,
1710
- endpointName: string,
1711
- ): Promise<{
1712
- success: boolean
1713
- message: string
1714
- endpoint?: string
1715
- details?: string
1716
- }> {
1717
- const testURL = `${baseURL.replace(/\/+$/, '')}${endpointPath}`
1718
-
1719
- // 🔧 Enhanced GPT-5 Responses API test payload
1720
- const testPayload: any = {
1721
- model: selectedModel,
1722
- input: [
1723
- {
1724
- role: 'user',
1725
- content:
1726
- 'Please respond with exactly "YES" (in capital letters) to confirm this connection is working.',
1727
- },
1728
- ],
1729
- max_completion_tokens: Math.max(parseInt(maxTokens) || 8192, 8192),
1730
- temperature: 1, // GPT-5 only supports temperature=1
1731
- // 🚀 Add reasoning configuration for better GPT-5 performance
1732
- reasoning: {
1733
- effort: 'low', // Fast response for connection test
1734
- },
1735
- }
1736
-
1737
- console.log(`🔧 Testing GPT-5 Responses API for model: ${selectedModel}`)
1738
- console.log(`🔧 Test URL: ${testURL}`)
1739
- console.log(`🔧 Test payload:`, JSON.stringify(testPayload, null, 2))
1740
-
1741
- const headers: Record<string, string> = {
1742
- 'Content-Type': 'application/json',
1743
- 'Authorization': `Bearer ${apiKey}`,
1744
- }
1745
-
1746
- try {
1747
- const response = await fetch(testURL, {
1748
- method: 'POST',
1749
- headers,
1750
- body: JSON.stringify(testPayload),
1751
- })
1752
-
1753
- if (response.ok) {
1754
- const data = await response.json()
1755
- console.log(
1756
- '[DEBUG] Responses API connection test response:',
1757
- JSON.stringify(data, null, 2),
1758
- )
1759
-
1760
- // Extract content from Responses API format
1761
- let responseContent = ''
1762
-
1763
- if (data.output_text) {
1764
- responseContent = data.output_text
1765
- } else if (data.output) {
1766
- responseContent = typeof data.output === 'string' ? data.output : data.output.text || ''
1767
- }
1768
-
1769
- console.log('[DEBUG] Extracted response content:', responseContent)
1770
-
1771
- // Check if response contains "YES" (case insensitive)
1772
- const containsYes = responseContent.toLowerCase().includes('yes')
1773
-
1774
- if (containsYes) {
1775
- return {
1776
- success: true,
1777
- message: `✅ Connection test passed with ${endpointName}`,
1778
- endpoint: endpointPath,
1779
- details: `GPT-5 responded correctly via Responses API: "${responseContent.trim()}"`,
1780
- }
1781
- } else {
1782
- return {
1783
- success: false,
1784
- message: `⚠️ ${endpointName} connected but model response unexpected`,
1785
- endpoint: endpointPath,
1786
- details: `Expected "YES" but got: "${responseContent.trim() || '(empty response)'}"`,
1787
- }
1788
- }
1789
- } else {
1790
- // 🔧 Enhanced error handling with detailed debugging
1791
- const errorData = await response.json().catch(() => null)
1792
- const errorMessage =
1793
- errorData?.error?.message || errorData?.message || response.statusText
1794
-
1795
- console.log(`🚨 GPT-5 Responses API Error (${response.status}):`, errorData)
1796
-
1797
- // 🔧 Provide specific guidance for common GPT-5 errors
1798
- let details = `Responses API Error: ${errorMessage}`
1799
- if (response.status === 400 && errorMessage.includes('max_tokens')) {
1800
- details += '\n🔧 Note: This appears to be a parameter compatibility issue. The fallback to Chat Completions should handle this.'
1801
- } else if (response.status === 404) {
1802
- details += '\n🔧 Note: Responses API endpoint may not be available for this model or provider.'
1803
- } else if (response.status === 401) {
1804
- details += '\n🔧 Note: API key authentication failed.'
1805
- }
1806
-
1807
- return {
1808
- success: false,
1809
- message: `❌ ${endpointName} failed (${response.status})`,
1810
- endpoint: endpointPath,
1811
- details: details,
1812
- }
1813
- }
1814
- } catch (error) {
1815
- return {
1816
- success: false,
1817
- message: `❌ ${endpointName} connection failed`,
1818
- endpoint: endpointPath,
1819
- details: error instanceof Error ? error.message : String(error),
1820
- }
1821
- }
1822
- }
1823
-
1824
- async function testProviderSpecificEndpoint(baseURL: string): Promise<{
1825
- success: boolean
1826
- message: string
1827
- endpoint?: string
1828
- details?: string
1829
- }> {
1830
- // For Anthropic and Anthropic-compatible providers, use the official SDK for testing
1831
- if (selectedProvider === 'anthropic' || selectedProvider === 'bigdream') {
1832
- try {
1833
- console.log(
1834
- `[DEBUG] Testing ${selectedProvider} connection using official Anthropic SDK...`,
1835
- )
1836
-
1837
- // Determine the baseURL for testing
1838
- let testBaseURL: string | undefined = undefined
1839
- if (selectedProvider === 'bigdream') {
1840
- testBaseURL = baseURL || 'https://api-key.info'
1841
- } else if (selectedProvider === 'anthropic') {
1842
- // For anthropic, use user-provided baseURL if available, otherwise undefined (official API)
1843
- testBaseURL =
1844
- baseURL && baseURL !== 'https://api.anthropic.com'
1845
- ? baseURL
1846
- : undefined
1847
- }
1848
-
1849
- // Use the verifyApiKey function which uses the official Anthropic SDK
1850
- const isValid = await verifyApiKey(apiKey, testBaseURL, selectedProvider)
1851
-
1852
- if (isValid) {
1853
- return {
1854
- success: true,
1855
- message: `✅ ${selectedProvider} connection test passed`,
1856
- endpoint: '/messages',
1857
- details: 'API key verified using official Anthropic SDK',
1858
- }
1859
- } else {
1860
- return {
1861
- success: false,
1862
- message: `❌ ${selectedProvider} API key verification failed`,
1863
- endpoint: '/messages',
1864
- details:
1865
- 'Invalid API key. Please check your API key and try again.',
1866
- }
1867
- }
1868
- } catch (error) {
1869
- console.log(`[DEBUG] ${selectedProvider} connection test error:`, error)
1870
- return {
1871
- success: false,
1872
- message: `❌ ${selectedProvider} connection failed`,
1873
- endpoint: '/messages',
1874
- details: error instanceof Error ? error.message : String(error),
1875
- }
1876
- }
1877
- }
1878
-
1879
- // For other providers, return a placeholder success (we can extend this later)
1880
- return {
1881
- success: true,
1882
- message: `✅ Configuration saved for ${selectedProvider}`,
1883
- details: 'Provider-specific testing not implemented yet',
1884
- }
1885
- }
1886
-
1887
- async function handleConnectionTest() {
1888
- const result = await testConnection()
1889
- setConnectionTestResult(result)
1890
-
1891
- if (result.success) {
1892
- // Auto-advance to confirmation after a short delay
1893
- setTimeout(() => {
1894
- navigateTo('confirmation')
1895
- }, 2000)
1896
- }
1897
- }
1898
-
1899
- const handleContextLengthSubmit = () => {
1900
- // Context length value is already in state
1901
- // Navigate to connection test screen
1902
- navigateTo('connectionTest')
1903
- }
1904
-
1905
- async function saveConfiguration(
1906
- provider: ProviderType,
1907
- model: string,
1908
- ): Promise<string | null> {
1909
- let baseURL = providerBaseUrl || providers[provider]?.baseURL || ''
1910
- let actualProvider = provider
1911
-
1912
- // For Anthropic provider, determine the actual provider based on sub-menu selection
1913
- if (provider === 'anthropic') {
1914
- switch (anthropicProviderType) {
1915
- case 'official':
1916
- actualProvider = 'anthropic'
1917
- baseURL = baseURL || 'https://api.anthropic.com'
1918
- break
1919
- case 'bigdream':
1920
- actualProvider = 'bigdream'
1921
- baseURL = baseURL || 'https://api-key.info'
1922
- break
1923
- case 'custom':
1924
- actualProvider = 'anthropic' // Use anthropic for custom endpoints
1925
- // baseURL is already set from user input
1926
- break
1927
- }
1928
- }
1929
-
1930
- // For Azure, construct the baseURL using the resource name
1931
- if (provider === 'azure') {
1932
- baseURL = `https://${resourceName}.openai.azure.com/openai/deployments/${model}`
1933
- }
1934
- // For custom OpenAI-compatible API, use the custom base URL
1935
- else if (provider === 'custom-openai') {
1936
- baseURL = customBaseUrl
1937
- }
1938
-
1939
- try {
1940
- // Use ModelManager's addModel method for duplicate validation
1941
- const modelManager = getModelManager()
1942
-
1943
- const modelConfig = {
1944
- name: `${actualProvider} ${model}`,
1945
- provider: actualProvider,
1946
- modelName: model,
1947
- baseURL: baseURL,
1948
- apiKey: apiKey || '',
1949
- maxTokens: parseInt(maxTokens) || DEFAULT_MAX_TOKENS,
1950
- contextLength: contextLength || DEFAULT_CONTEXT_LENGTH,
1951
- reasoningEffort,
1952
- }
1953
-
1954
- // addModel method will throw error if duplicate exists
1955
- return await modelManager.addModel(modelConfig)
1956
- } catch (error) {
1957
- // Validation failed - show error to user
1958
- setValidationError(
1959
- error instanceof Error ? error.message : 'Failed to add model',
1960
- )
1961
- return null
1962
- }
1963
- }
1964
-
1965
- async function handleConfirmation() {
1966
- // Clear any previous validation errors
1967
- setValidationError(null)
1968
-
1969
- // Save the configuration and exit
1970
- const modelId = await saveConfiguration(selectedProvider, selectedModel)
1971
-
1972
- // If validation failed (modelId is null), don't proceed
1973
- if (!modelId) {
1974
- return // Error is already set in saveConfiguration
1975
- }
1976
-
1977
- // Handle model pointer assignment for new system
1978
- if (modelId && (isOnboarding || targetPointer)) {
1979
- if (isOnboarding) {
1980
- // First-time setup: set all pointers to this model
1981
- setAllPointersToModel(modelId)
1982
- } else if (targetPointer) {
1983
- // Specific pointer configuration: only set target pointer
1984
- setModelPointer(targetPointer, modelId)
1985
- }
1986
- }
1987
-
1988
- onDone()
1989
- }
1990
-
1991
- // Handle back navigation based on current screen
1992
- const handleBack = () => {
1993
- if (currentScreen === 'provider') {
1994
- // If we're at the first screen, exit
1995
- if (onCancel) {
1996
- onCancel()
1997
- } else {
1998
- onDone()
1999
- }
2000
- } else {
2001
- // Remove the current screen from the stack
2002
- setScreenStack(prev => prev.slice(0, -1))
2003
- }
2004
- }
2005
-
2006
- // Use escape navigation hook
2007
- useEscapeNavigation(handleBack, abortController)
2008
-
2009
- // Handle cursor offset changes
2010
- function handleCursorOffsetChange(offset: number) {
2011
- setCursorOffset(offset)
2012
- }
2013
-
2014
- // Handle API key changes
2015
- function handleApiKeyChange(value: string) {
2016
- setApiKeyEdited(true)
2017
- setApiKey(value)
2018
- }
2019
-
2020
- // Handle model search query changes
2021
- function handleModelSearchChange(value: string) {
2022
- setModelSearchQuery(value)
2023
- // Update cursor position to end of text when typing
2024
- setModelSearchCursorOffset(value.length)
2025
- }
2026
-
2027
- // Handle model search cursor offset changes
2028
- function handleModelSearchCursorOffsetChange(offset: number) {
2029
- setModelSearchCursorOffset(offset)
2030
- }
2031
-
2032
- // Handle input for Resource Name screen
2033
- useInput((input, key) => {
2034
- // Handle API key submission on Enter
2035
- if (currentScreen === 'apiKey' && key.return) {
2036
- if (apiKey) {
2037
- handleApiKeySubmit(apiKey)
2038
- }
2039
- return
2040
- }
2041
-
2042
- if (currentScreen === 'apiKey' && key.tab) {
2043
- // For providers that support manual model input, skip to manual model input
2044
- if (
2045
- selectedProvider === 'anthropic' ||
2046
- selectedProvider === 'kimi' ||
2047
- selectedProvider === 'deepseek' ||
2048
- selectedProvider === 'qwen' ||
2049
- selectedProvider === 'glm' ||
2050
- selectedProvider === 'minimax' ||
2051
- selectedProvider === 'baidu-qianfan' ||
2052
- selectedProvider === 'siliconflow' ||
2053
- selectedProvider === 'custom-openai'
2054
- ) {
2055
- navigateTo('modelInput')
2056
- return
2057
- }
2058
-
2059
- // For other providers, try to fetch models without API key
2060
- fetchModelsWithRetry().catch(error => {
2061
- // The retry logic in fetchModelsWithRetry already handles the error display
2062
- // This catch is just to prevent unhandled promise rejection
2063
- console.error('Final error after retries:', error)
2064
- })
2065
- return
2066
- }
2067
-
2068
- // Handle Resource Name submission on Enter
2069
- if (currentScreen === 'resourceName' && key.return) {
2070
- if (resourceName) {
2071
- handleResourceNameSubmit(resourceName)
2072
- }
2073
- return
2074
- }
2075
-
2076
- // Handle Base URL submission on Enter
2077
- if (currentScreen === 'baseUrl' && key.return) {
2078
- if (selectedProvider === 'custom-openai') {
2079
- handleCustomBaseUrlSubmit(customBaseUrl)
2080
- } else {
2081
- // For all other providers (including ollama), use the general handler
2082
- handleProviderBaseUrlSubmit(providerBaseUrl)
2083
- }
2084
- return
2085
- }
2086
-
2087
- // Handle Custom Model Name submission on Enter
2088
- if (currentScreen === 'modelInput' && key.return) {
2089
- if (customModelName) {
2090
- handleCustomModelSubmit(customModelName)
2091
- }
2092
- return
2093
- }
2094
-
2095
- // Handle confirmation on Enter
2096
- if (currentScreen === 'confirmation' && key.return) {
2097
- handleConfirmation().catch(error => {
2098
- console.error('Error in handleConfirmation:', error)
2099
- setValidationError(
2100
- error instanceof Error ? error.message : 'Unexpected error occurred',
2101
- )
2102
- })
2103
- return
2104
- }
2105
-
2106
- // Handle connection test
2107
- if (currentScreen === 'connectionTest') {
2108
- if (key.return) {
2109
- if (!isTestingConnection && !connectionTestResult) {
2110
- handleConnectionTest()
2111
- } else if (connectionTestResult && connectionTestResult.success) {
2112
- navigateTo('confirmation')
2113
- } else if (connectionTestResult && !connectionTestResult.success) {
2114
- // Retry the test
2115
- handleConnectionTest()
2116
- }
2117
- return
2118
- }
2119
- }
2120
-
2121
- // Handle context length selection
2122
- if (currentScreen === 'contextLength') {
2123
- if (key.return) {
2124
- handleContextLengthSubmit()
2125
- return
2126
- }
2127
-
2128
- if (key.upArrow) {
2129
- const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
2130
- opt => opt.value === contextLength,
2131
- )
2132
- const newIndex =
2133
- currentIndex > 0
2134
- ? currentIndex - 1
2135
- : currentIndex === -1
2136
- ? CONTEXT_LENGTH_OPTIONS.findIndex(
2137
- opt => opt.value === DEFAULT_CONTEXT_LENGTH,
2138
- ) || 0
2139
- : CONTEXT_LENGTH_OPTIONS.length - 1
2140
- setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
2141
- return
2142
- }
2143
-
2144
- if (key.downArrow) {
2145
- const currentIndex = CONTEXT_LENGTH_OPTIONS.findIndex(
2146
- opt => opt.value === contextLength,
2147
- )
2148
- const newIndex =
2149
- currentIndex === -1
2150
- ? CONTEXT_LENGTH_OPTIONS.findIndex(
2151
- opt => opt.value === DEFAULT_CONTEXT_LENGTH,
2152
- ) || 0
2153
- : (currentIndex + 1) % CONTEXT_LENGTH_OPTIONS.length
2154
- setContextLength(CONTEXT_LENGTH_OPTIONS[newIndex].value)
2155
- return
2156
- }
2157
- }
2158
-
2159
- // Handle paste event (Ctrl+V or Cmd+V)
2160
- if (
2161
- currentScreen === 'apiKey' &&
2162
- ((key.ctrl && input === 'v') || (key.meta && input === 'v'))
2163
- ) {
2164
- // We can't directly access clipboard in terminal, but we can show a message
2165
- setModelLoadError(
2166
- "Please use your terminal's paste functionality or type the API key manually",
2167
- )
2168
- return
2169
- }
2170
-
2171
- // Handle Tab key for form navigation in model params screen
2172
- if (currentScreen === 'modelParams' && key.tab) {
2173
- const formFields = getFormFieldsForModelParams()
2174
- // Move to next field
2175
- setActiveFieldIndex(current => (current + 1) % formFields.length)
2176
- return
2177
- }
2178
-
2179
- // Handle Enter key for form submission in model params screen
2180
- if (currentScreen === 'modelParams' && key.return) {
2181
- const formFields = getFormFieldsForModelParams()
2182
- const currentField = formFields[activeFieldIndex]
2183
-
2184
- if (
2185
- currentField.name === 'submit' ||
2186
- activeFieldIndex === formFields.length - 1
2187
- ) {
2188
- // If on the Continue button, submit the form
2189
- handleModelParamsSubmit()
2190
- } else if (currentField.component === 'select') {
2191
- // For select fields, move to the next field (since selection should be handled by Select component)
2192
- setActiveFieldIndex(current =>
2193
- Math.min(current + 1, formFields.length - 1),
2194
- )
2195
- }
2196
- return
2197
- }
2198
- })
2199
-
2200
- // Helper function to get form fields for model params
2201
- function getFormFieldsForModelParams() {
2202
- return [
2203
- {
2204
- name: 'maxTokens',
2205
- label: 'Maximum Tokens',
2206
- description: 'Select the maximum number of tokens to generate.',
2207
- value: parseInt(maxTokens),
2208
- component: 'select',
2209
- options: MAX_TOKENS_OPTIONS.map(option => ({
2210
- label: option.label,
2211
- value: option.value.toString(),
2212
- })),
2213
- defaultValue: maxTokens,
2214
- },
2215
- ...(supportsReasoningEffort
2216
- ? [
2217
- {
2218
- name: 'reasoningEffort',
2219
- label: 'Reasoning Effort',
2220
- description: 'Controls reasoning depth for complex problems.',
2221
- value: reasoningEffort,
2222
- component: 'select',
2223
- },
2224
- ]
2225
- : []),
2226
- {
2227
- name: 'submit',
2228
- label: 'Continue →',
2229
- component: 'button',
2230
- },
2231
- ]
2232
- }
2233
-
2234
- // Render API Key Input Screen
2235
- if (currentScreen === 'apiKey') {
2236
- const modelTypeText = 'this model profile'
2237
-
2238
- return (
2239
- <Box flexDirection="column" gap={1}>
2240
- <Box
2241
- flexDirection="column"
2242
- gap={1}
2243
- borderStyle="round"
2244
- borderColor={theme.secondaryBorder}
2245
- paddingX={2}
2246
- paddingY={1}
2247
- >
2248
- <Text bold>
2249
- API Key Setup{' '}
2250
- {exitState.pending
2251
- ? `(press ${exitState.keyName} again to exit)`
2252
- : ''}
2253
- </Text>
2254
- <Box flexDirection="column" gap={1}>
2255
- <Text bold>
2256
- Enter your {getProviderLabel(selectedProvider, 0).split(' (')[0]}{' '}
2257
- API key for {modelTypeText}:
2258
- </Text>
2259
- <Box flexDirection="column" width={70}>
2260
- <Text color={theme.secondaryText}>
2261
- This key will be stored locally and used to access the{' '}
2262
- {selectedProvider} API.
2263
- <Newline />
2264
- Your key is never sent to our servers.
2265
- <Newline />
2266
- <Newline />
2267
- {selectedProvider === 'kimi' && (
2268
- <>
2269
- 💡 Get your API key from:{' '}
2270
- <Text color={theme.suggestion}>
2271
- https://platform.moonshot.cn/console/api-keys
2272
- </Text>
2273
- </>
2274
- )}
2275
- {selectedProvider === 'deepseek' && (
2276
- <>
2277
- 💡 Get your API key from:{' '}
2278
- <Text color={theme.suggestion}>
2279
- https://platform.deepseek.com/api_keys
2280
- </Text>
2281
- </>
2282
- )}
2283
- {selectedProvider === 'siliconflow' && (
2284
- <>
2285
- 💡 Get your API key from:{' '}
2286
- <Text color={theme.suggestion}>
2287
- https://cloud.siliconflow.cn/i/oJWsm6io
2288
- </Text>
2289
- </>
2290
- )}
2291
- {selectedProvider === 'qwen' && (
2292
- <>
2293
- 💡 Get your API key from:{' '}
2294
- <Text color={theme.suggestion}>
2295
- https://bailian.console.aliyun.com/?tab=model#/api-key
2296
- </Text>
2297
- </>
2298
- )}
2299
- {selectedProvider === 'glm' && (
2300
- <>
2301
- 💡 Get your API key from:{' '}
2302
- <Text color={theme.suggestion}>
2303
- https://open.bigmodel.cn (API Keys section)
2304
- </Text>
2305
- </>
2306
- )}
2307
- {selectedProvider === 'minimax' && (
2308
- <>
2309
- 💡 Get your API key from:{' '}
2310
- <Text color={theme.suggestion}>
2311
- https://www.minimax.io/platform/user-center/basic-information
2312
- </Text>
2313
- </>
2314
- )}
2315
- {selectedProvider === 'baidu-qianfan' && (
2316
- <>
2317
- 💡 Get your API key from:{' '}
2318
- <Text color={theme.suggestion}>
2319
- https://console.bce.baidu.com/iam/#/iam/accesslist
2320
- </Text>
2321
- </>
2322
- )}
2323
- {selectedProvider === 'anthropic' && (
2324
- <>
2325
- 💡 Get your API key from:{' '}
2326
- <Text color={theme.suggestion}>
2327
- {anthropicProviderType === 'official'
2328
- ? 'https://console.anthropic.com/settings/keys'
2329
- : anthropicProviderType === 'bigdream'
2330
- ? 'https://api-key.info/register?aff=MSl4'
2331
- : anthropicProviderType === 'opendev'
2332
- ? 'https://api.openai-next.com/register/?aff_code=4xo7'
2333
- : 'your custom API provider'}
2334
- </Text>
2335
- </>
2336
- )}
2337
- {selectedProvider === 'openai' && (
2338
- <>
2339
- 💡 Get your API key from:{' '}
2340
- <Text color={theme.suggestion}>
2341
- https://platform.openai.com/api-keys
2342
- </Text>
2343
- </>
2344
- )}
2345
- </Text>
2346
- </Box>
2347
-
2348
- <Box>
2349
- <TextInput
2350
- placeholder="sk-..."
2351
- value={apiKey}
2352
- onChange={handleApiKeyChange}
2353
- onSubmit={handleApiKeySubmit}
2354
- mask="*"
2355
- columns={500}
2356
- cursorOffset={cursorOffset}
2357
- onChangeCursorOffset={handleCursorOffsetChange}
2358
- showCursor={true}
2359
- />
2360
- </Box>
2361
-
2362
- <Box marginTop={1}>
2363
- <Text>
2364
- <Text color={theme.suggestion} dimColor={!apiKey}>
2365
- [Submit API Key]
2366
- </Text>
2367
- <Text>
2368
- {' '}
2369
- - Press Enter or click to continue with this API key
2370
- </Text>
2371
- </Text>
2372
- </Box>
2373
-
2374
- {isLoadingModels && (
2375
- <Box>
2376
- <Text color={theme.suggestion}>
2377
- Loading available models...
2378
- </Text>
2379
- </Box>
2380
- )}
2381
- {modelLoadError && (
2382
- <Box>
2383
- <Text color="red">Error: {modelLoadError}</Text>
2384
- </Box>
2385
- )}
2386
- <Box marginTop={1}>
2387
- <Text dimColor>
2388
- Press <Text color={theme.suggestion}>Enter</Text> to continue,{' '}
2389
- <Text color={theme.suggestion}>Tab</Text> to{' '}
2390
- {selectedProvider === 'anthropic' ||
2391
- selectedProvider === 'kimi' ||
2392
- selectedProvider === 'deepseek' ||
2393
- selectedProvider === 'qwen' ||
2394
- selectedProvider === 'glm' ||
2395
- selectedProvider === 'minimax' ||
2396
- selectedProvider === 'baidu-qianfan' ||
2397
- selectedProvider === 'siliconflow' ||
2398
- selectedProvider === 'custom-openai'
2399
- ? 'skip to manual model input'
2400
- : 'skip using a key'}
2401
- , or <Text color={theme.suggestion}>Esc</Text> to go back
2402
- </Text>
2403
- </Box>
2404
- </Box>
2405
- </Box>
2406
- </Box>
2407
- )
2408
- }
2409
-
2410
- // Render Model Selection Screen
2411
- if (currentScreen === 'model') {
2412
- const modelTypeText = 'this model profile'
2413
-
2414
- return (
2415
- <Box flexDirection="column" gap={1}>
2416
- <Box
2417
- flexDirection="column"
2418
- gap={1}
2419
- borderStyle="round"
2420
- borderColor={theme.secondaryBorder}
2421
- paddingX={2}
2422
- paddingY={1}
2423
- >
2424
- <Text bold>
2425
- Model Selection{' '}
2426
- {exitState.pending
2427
- ? `(press ${exitState.keyName} again to exit)`
2428
- : ''}
2429
- </Text>
2430
- <Box flexDirection="column" gap={1}>
2431
- <Text bold>
2432
- Select a model from{' '}
2433
- {
2434
- getProviderLabel(
2435
- selectedProvider,
2436
- availableModels.length,
2437
- ).split(' (')[0]
2438
- }{' '}
2439
- for {modelTypeText}:
2440
- </Text>
2441
- <Box flexDirection="column" width={70}>
2442
- <Text color={theme.secondaryText}>
2443
- This model profile can be assigned to different pointers (main,
2444
- task, reasoning, quick) for various use cases.
2445
- </Text>
2446
- </Box>
2447
-
2448
- <Box marginY={1}>
2449
- <Text bold>Search models:</Text>
2450
- <TextInput
2451
- placeholder="Type to filter models..."
2452
- value={modelSearchQuery}
2453
- onChange={handleModelSearchChange}
2454
- columns={100}
2455
- cursorOffset={modelSearchCursorOffset}
2456
- onChangeCursorOffset={handleModelSearchCursorOffsetChange}
2457
- showCursor={true}
2458
- focus={true}
2459
- />
2460
- </Box>
2461
-
2462
- {modelOptions.length > 0 ? (
2463
- <>
2464
- <Select
2465
- options={modelOptions}
2466
- onChange={handleModelSelection}
2467
- />
2468
- <Text dimColor>
2469
- Showing {modelOptions.length} of {availableModels.length}{' '}
2470
- models
2471
- </Text>
2472
- </>
2473
- ) : (
2474
- <Box>
2475
- {availableModels.length > 0 ? (
2476
- <Text color="yellow">
2477
- No models match your search. Try a different query.
2478
- </Text>
2479
- ) : (
2480
- <Text color="yellow">
2481
- No models available for this provider.
2482
- </Text>
2483
- )}
2484
- </Box>
2485
- )}
2486
-
2487
- <Box marginTop={1}>
2488
- <Text dimColor>
2489
- Press <Text color={theme.suggestion}>Esc</Text> to go back to
2490
- API key input
2491
- </Text>
2492
- </Box>
2493
- </Box>
2494
- </Box>
2495
- </Box>
2496
- )
2497
- }
2498
-
2499
- if (currentScreen === 'modelParams') {
2500
- // Define form fields
2501
- const formFields = getFormFieldsForModelParams()
2502
-
2503
- return (
2504
- <Box flexDirection="column" gap={1}>
2505
- <Box
2506
- flexDirection="column"
2507
- gap={1}
2508
- borderStyle="round"
2509
- borderColor={theme.secondaryBorder}
2510
- paddingX={2}
2511
- paddingY={1}
2512
- >
2513
- <Text bold>
2514
- Model Parameters{' '}
2515
- {exitState.pending
2516
- ? `(press ${exitState.keyName} again to exit)`
2517
- : ''}
2518
- </Text>
2519
- <Box flexDirection="column" gap={1}>
2520
- <Text bold>Configure parameters for {selectedModel}:</Text>
2521
- <Box flexDirection="column" width={70}>
2522
- <Text color={theme.secondaryText}>
2523
- Use <Text color={theme.suggestion}>Tab</Text> to navigate
2524
- between fields. Press{' '}
2525
- <Text color={theme.suggestion}>Enter</Text> to submit.
2526
- </Text>
2527
- </Box>
2528
-
2529
- <Box flexDirection="column">
2530
- {formFields.map((field, index) => (
2531
- <Box flexDirection="column" marginY={1} key={field.name}>
2532
- {field.component !== 'button' ? (
2533
- <>
2534
- <Text
2535
- bold
2536
- color={
2537
- activeFieldIndex === index ? theme.success : undefined
2538
- }
2539
- >
2540
- {field.label}
2541
- </Text>
2542
- {field.description && (
2543
- <Text color={theme.secondaryText}>
2544
- {field.description}
2545
- </Text>
2546
- )}
2547
- </>
2548
- ) : (
2549
- <Text
2550
- bold
2551
- color={
2552
- activeFieldIndex === index ? theme.success : undefined
2553
- }
2554
- >
2555
- {field.label}
2556
- </Text>
2557
- )}
2558
- <Box marginY={1}>
2559
- {activeFieldIndex === index ? (
2560
- field.component === 'select' ? (
2561
- field.name === 'maxTokens' ? (
2562
- <Select
2563
- options={field.options || []}
2564
- onChange={value => {
2565
- const numValue = parseInt(value)
2566
- setMaxTokens(numValue.toString())
2567
- setSelectedMaxTokensPreset(numValue)
2568
- setMaxTokensCursorOffset(
2569
- numValue.toString().length,
2570
- )
2571
- // Move to next field after selection
2572
- setTimeout(() => {
2573
- setActiveFieldIndex(index + 1)
2574
- }, 100)
2575
- }}
2576
- defaultValue={field.defaultValue}
2577
- />
2578
- ) : (
2579
- <Select
2580
- options={reasoningEffortOptions}
2581
- onChange={value => {
2582
- setReasoningEffort(value as ReasoningEffortOption)
2583
- // Move to next field after selection
2584
- setTimeout(() => {
2585
- setActiveFieldIndex(index + 1)
2586
- }, 100)
2587
- }}
2588
- defaultValue={reasoningEffort}
2589
- />
2590
- )
2591
- ) : null
2592
- ) : field.name === 'maxTokens' ? (
2593
- <Text color={theme.secondaryText}>
2594
- Current:{' '}
2595
- <Text color={theme.suggestion}>
2596
- {MAX_TOKENS_OPTIONS.find(
2597
- opt => opt.value === parseInt(maxTokens),
2598
- )?.label || `${maxTokens} tokens`}
2599
- </Text>
2600
- </Text>
2601
- ) : field.name === 'reasoningEffort' ? (
2602
- <Text color={theme.secondaryText}>
2603
- Current:{' '}
2604
- <Text color={theme.suggestion}>{reasoningEffort}</Text>
2605
- </Text>
2606
- ) : null}
2607
- </Box>
2608
- </Box>
2609
- ))}
2610
-
2611
- <Box marginTop={1}>
2612
- <Text dimColor>
2613
- Press <Text color={theme.suggestion}>Tab</Text> to navigate,{' '}
2614
- <Text color={theme.suggestion}>Enter</Text> to continue, or{' '}
2615
- <Text color={theme.suggestion}>Esc</Text> to go back
2616
- </Text>
2617
- </Box>
2618
- </Box>
2619
- </Box>
2620
- </Box>
2621
- </Box>
2622
- )
2623
- }
2624
-
2625
- // Render Resource Name Input Screen
2626
- if (currentScreen === 'resourceName') {
2627
- return (
2628
- <Box flexDirection="column" gap={1}>
2629
- <Box
2630
- flexDirection="column"
2631
- gap={1}
2632
- borderStyle="round"
2633
- borderColor={theme.secondaryBorder}
2634
- paddingX={2}
2635
- paddingY={1}
2636
- >
2637
- <Text bold>
2638
- Azure Resource Setup{' '}
2639
- {exitState.pending
2640
- ? `(press ${exitState.keyName} again to exit)`
2641
- : ''}
2642
- </Text>
2643
- <Box flexDirection="column" gap={1}>
2644
- <Text bold>Enter your Azure OpenAI resource name:</Text>
2645
- <Box flexDirection="column" width={70}>
2646
- <Text color={theme.secondaryText}>
2647
- This is the name of your Azure OpenAI resource (without the full
2648
- domain).
2649
- <Newline />
2650
- For example, if your endpoint is
2651
- "https://myresource.openai.azure.com", enter "myresource".
2652
- </Text>
2653
- </Box>
2654
-
2655
- <Box>
2656
- <TextInput
2657
- placeholder="myazureresource"
2658
- value={resourceName}
2659
- onChange={setResourceName}
2660
- onSubmit={handleResourceNameSubmit}
2661
- columns={100}
2662
- cursorOffset={resourceNameCursorOffset}
2663
- onChangeCursorOffset={setResourceNameCursorOffset}
2664
- showCursor={true}
2665
- />
2666
- </Box>
2667
-
2668
- <Box marginTop={1}>
2669
- <Text>
2670
- <Text color={theme.suggestion} dimColor={!resourceName}>
2671
- [Submit Resource Name]
2672
- </Text>
2673
- <Text> - Press Enter or click to continue</Text>
2674
- </Text>
2675
- </Box>
2676
-
2677
- <Box marginTop={1}>
2678
- <Text dimColor>
2679
- Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2680
- <Text color={theme.suggestion}>Esc</Text> to go back
2681
- </Text>
2682
- </Box>
2683
- </Box>
2684
- </Box>
2685
- </Box>
2686
- )
2687
- }
2688
-
2689
- // Render Base URL Input Screen (for all providers)
2690
- if (currentScreen === 'baseUrl') {
2691
- const isCustomOpenAI = selectedProvider === 'custom-openai'
2692
-
2693
- // For custom-openai, we still use the old logic with customBaseUrl
2694
- if (isCustomOpenAI) {
2695
- return (
2696
- <Box flexDirection="column" gap={1}>
2697
- <Box
2698
- flexDirection="column"
2699
- gap={1}
2700
- borderStyle="round"
2701
- borderColor={theme.secondaryBorder}
2702
- paddingX={2}
2703
- paddingY={1}
2704
- >
2705
- <Text bold>
2706
- Custom API Server Setup{' '}
2707
- {exitState.pending
2708
- ? `(press ${exitState.keyName} again to exit)`
2709
- : ''}
2710
- </Text>
2711
- <Box flexDirection="column" gap={1}>
2712
- <Text bold>Enter your custom API URL:</Text>
2713
- <Box flexDirection="column" width={70}>
2714
- <Text color={theme.secondaryText}>
2715
- This is the base URL for your OpenAI-compatible API.
2716
- <Newline />
2717
- For example: https://api.example.com/v1
2718
- </Text>
2719
- </Box>
2720
-
2721
- <Box>
2722
- <TextInput
2723
- placeholder="https://api.example.com/v1"
2724
- value={customBaseUrl}
2725
- onChange={setCustomBaseUrl}
2726
- onSubmit={handleCustomBaseUrlSubmit}
2727
- columns={100}
2728
- cursorOffset={customBaseUrlCursorOffset}
2729
- onChangeCursorOffset={setCustomBaseUrlCursorOffset}
2730
- showCursor={!isLoadingModels}
2731
- focus={!isLoadingModels}
2732
- />
2733
- </Box>
2734
-
2735
- <Box marginTop={1}>
2736
- <Text>
2737
- <Text
2738
- color={
2739
- isLoadingModels ? theme.secondaryText : theme.suggestion
2740
- }
2741
- >
2742
- [Submit Base URL]
2743
- </Text>
2744
- <Text> - Press Enter or click to continue</Text>
2745
- </Text>
2746
- </Box>
2747
-
2748
- <Box marginTop={1}>
2749
- <Text dimColor>
2750
- Press <Text color={theme.suggestion}>Enter</Text> to continue
2751
- or <Text color={theme.suggestion}>Esc</Text> to go back
2752
- </Text>
2753
- </Box>
2754
- </Box>
2755
- </Box>
2756
- </Box>
2757
- )
2758
- }
2759
-
2760
- // For all other providers, use the new general provider URL configuration
2761
- const providerName = providers[selectedProvider]?.name || selectedProvider
2762
- const defaultUrl = providers[selectedProvider]?.baseURL || ''
2763
-
2764
- return (
2765
- <Box flexDirection="column" gap={1}>
2766
- <Box
2767
- flexDirection="column"
2768
- gap={1}
2769
- borderStyle="round"
2770
- borderColor={theme.secondaryBorder}
2771
- paddingX={2}
2772
- paddingY={1}
2773
- >
2774
- <Text bold>
2775
- {providerName} API Configuration{' '}
2776
- {exitState.pending
2777
- ? `(press ${exitState.keyName} again to exit)`
2778
- : ''}
2779
- </Text>
2780
- <Box flexDirection="column" gap={1}>
2781
- <Text bold>Configure the API endpoint for {providerName}:</Text>
2782
- <Box flexDirection="column" width={70}>
2783
- <Text color={theme.secondaryText}>
2784
- {selectedProvider === 'ollama' ? (
2785
- <>
2786
- This is the URL of your Ollama server.
2787
- <Newline />
2788
- Default is http://localhost:11434/v1 for local Ollama
2789
- installations.
2790
- </>
2791
- ) : (
2792
- <>
2793
- This is the base URL for the {providerName} API.
2794
- <Newline />
2795
- You can modify this URL or press Enter to use the default.
2796
- </>
2797
- )}
2798
- </Text>
2799
- </Box>
2800
-
2801
- <Box>
2802
- <TextInput
2803
- placeholder={defaultUrl}
2804
- value={providerBaseUrl}
2805
- onChange={setProviderBaseUrl}
2806
- onSubmit={handleProviderBaseUrlSubmit}
2807
- columns={100}
2808
- cursorOffset={providerBaseUrlCursorOffset}
2809
- onChangeCursorOffset={setProviderBaseUrlCursorOffset}
2810
- showCursor={!isLoadingModels}
2811
- focus={!isLoadingModels}
2812
- />
2813
- </Box>
2814
-
2815
- <Box marginTop={1}>
2816
- <Text>
2817
- <Text
2818
- color={
2819
- isLoadingModels ? theme.secondaryText : theme.suggestion
2820
- }
2821
- >
2822
- [Submit Base URL]
2823
- </Text>
2824
- <Text> - Press Enter or click to continue</Text>
2825
- </Text>
2826
- </Box>
2827
-
2828
- {isLoadingModels && (
2829
- <Box marginTop={1}>
2830
- <Text color={theme.success}>
2831
- {selectedProvider === 'ollama'
2832
- ? 'Connecting to Ollama server...'
2833
- : `Connecting to ${providerName}...`}
2834
- </Text>
2835
- </Box>
2836
- )}
2837
-
2838
- {modelLoadError && (
2839
- <Box marginTop={1}>
2840
- <Text color="red">Error: {modelLoadError}</Text>
2841
- </Box>
2842
- )}
2843
-
2844
- <Box marginTop={1}>
2845
- <Text dimColor>
2846
- Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
2847
- <Text color={theme.suggestion}>Esc</Text> to go back
2848
- </Text>
2849
- </Box>
2850
- </Box>
2851
- </Box>
2852
- </Box>
2853
- )
2854
- }
2855
-
2856
- // Render Custom Model Input Screen
2857
- if (currentScreen === 'modelInput') {
2858
- const modelTypeText = 'this model profile'
2859
-
2860
- // Determine the screen title and description based on provider
2861
- let screenTitle = 'Manual Model Setup'
2862
- let description = 'Enter the model name manually'
2863
- let placeholder = 'gpt-4'
2864
- let examples = 'For example: "gpt-4", "gpt-3.5-turbo", etc.'
2865
-
2866
- if (selectedProvider === 'azure') {
2867
- screenTitle = 'Azure Model Setup'
2868
- description = `Enter your Azure OpenAI deployment name for ${modelTypeText}:`
2869
- examples = 'For example: "gpt-4", "gpt-35-turbo", etc.'
2870
- placeholder = 'gpt-4'
2871
- } else if (selectedProvider === 'anthropic') {
2872
- screenTitle = 'Claude Model Setup'
2873
- description = `Enter the Claude model name for ${modelTypeText}:`
2874
- examples =
2875
- 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2876
- placeholder = 'claude-3-5-sonnet-latest'
2877
- } else if (selectedProvider === 'bigdream') {
2878
- screenTitle = 'BigDream Model Setup'
2879
- description = `Enter the BigDream model name for ${modelTypeText}:`
2880
- examples =
2881
- 'For example: "claude-3-5-sonnet-latest", "claude-3-5-haiku-latest", etc.'
2882
- placeholder = 'claude-3-5-sonnet-latest'
2883
- } else if (selectedProvider === 'kimi') {
2884
- screenTitle = 'Kimi Model Setup'
2885
- description = `Enter the Kimi model name for ${modelTypeText}:`
2886
- examples = 'For example: "kimi-k2-0711-preview"'
2887
- placeholder = 'kimi-k2-0711-preview'
2888
- } else if (selectedProvider === 'deepseek') {
2889
- screenTitle = 'DeepSeek Model Setup'
2890
- description = `Enter the DeepSeek model name for ${modelTypeText}:`
2891
- examples =
2892
- 'For example: "deepseek-chat", "deepseek-coder", "deepseek-reasoner", etc.'
2893
- placeholder = 'deepseek-chat'
2894
- } else if (selectedProvider === 'siliconflow') {
2895
- screenTitle = 'SiliconFlow Model Setup'
2896
- description = `Enter the SiliconFlow model name for ${modelTypeText}:`
2897
- examples =
2898
- 'For example: "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", etc.'
2899
- placeholder = 'Qwen/Qwen2.5-72B-Instruct'
2900
- } else if (selectedProvider === 'qwen') {
2901
- screenTitle = 'Qwen Model Setup'
2902
- description = `Enter the Qwen model name for ${modelTypeText}:`
2903
- examples = 'For example: "qwen-plus", "qwen-turbo", "qwen-max", etc.'
2904
- placeholder = 'qwen-plus'
2905
- } else if (selectedProvider === 'glm') {
2906
- screenTitle = 'GLM Model Setup'
2907
- description = `Enter the GLM model name for ${modelTypeText}:`
2908
- examples = 'For example: "glm-4", "glm-4v", "glm-3-turbo", etc.'
2909
- placeholder = 'glm-4'
2910
- } else if (selectedProvider === 'minimax') {
2911
- screenTitle = 'MiniMax Model Setup'
2912
- description = `Enter the MiniMax model name for ${modelTypeText}:`
2913
- examples =
2914
- 'For example: "abab6.5s-chat", "abab6.5g-chat", "abab5.5s-chat", etc.'
2915
- placeholder = 'abab6.5s-chat'
2916
- } else if (selectedProvider === 'baidu-qianfan') {
2917
- screenTitle = 'Baidu Qianfan Model Setup'
2918
- description = `Enter the Baidu Qianfan model name for ${modelTypeText}:`
2919
- examples =
2920
- 'For example: "ERNIE-4.0-8K", "ERNIE-3.5-8K", "ERNIE-Speed-128K", etc.'
2921
- placeholder = 'ERNIE-4.0-8K'
2922
- } else if (selectedProvider === 'custom-openai') {
2923
- screenTitle = 'Custom API Model Setup'
2924
- description = `Enter the model name for ${modelTypeText}:`
2925
- examples = 'Enter the exact model name as supported by your API endpoint.'
2926
- placeholder = 'model-name'
2927
- }
2928
-
2929
- return (
2930
- <Box flexDirection="column" gap={1}>
2931
- <Box
2932
- flexDirection="column"
2933
- gap={1}
2934
- borderStyle="round"
2935
- borderColor={theme.secondaryBorder}
2936
- paddingX={2}
2937
- paddingY={1}
2938
- >
2939
- <Text bold>
2940
- {screenTitle}{' '}
2941
- {exitState.pending
2942
- ? `(press ${exitState.keyName} again to exit)`
2943
- : ''}
2944
- </Text>
2945
- <Box flexDirection="column" gap={1}>
2946
- <Text bold>{description}</Text>
2947
- <Box flexDirection="column" width={70}>
2948
- <Text color={theme.secondaryText}>
2949
- {selectedProvider === 'azure'
2950
- ? 'This is the deployment name you configured in your Azure OpenAI resource.'
2951
- : selectedProvider === 'anthropic'
2952
- ? 'This should be a valid Claude model identifier from Claude.'
2953
- : selectedProvider === 'bigdream'
2954
- ? 'This should be a valid Claude model identifier supported by BigDream.'
2955
- : selectedProvider === 'kimi'
2956
- ? 'This should be a valid Kimi model identifier from Moonshot AI.'
2957
- : selectedProvider === 'deepseek'
2958
- ? 'This should be a valid DeepSeek model identifier.'
2959
- : selectedProvider === 'siliconflow'
2960
- ? 'This should be a valid SiliconFlow model identifier.'
2961
- : selectedProvider === 'qwen'
2962
- ? 'This should be a valid Qwen model identifier from Alibaba Cloud.'
2963
- : selectedProvider === 'glm'
2964
- ? 'This should be a valid GLM model identifier from Zhipu AI.'
2965
- : selectedProvider === 'minimax'
2966
- ? 'This should be a valid MiniMax model identifier.'
2967
- : selectedProvider === 'baidu-qianfan'
2968
- ? 'This should be a valid Baidu Qianfan model identifier.'
2969
- : 'This should match the model name supported by your API endpoint.'}
2970
- <Newline />
2971
- {examples}
2972
- </Text>
2973
- </Box>
2974
-
2975
- <Box>
2976
- <TextInput
2977
- placeholder={placeholder}
2978
- value={customModelName}
2979
- onChange={setCustomModelName}
2980
- onSubmit={handleCustomModelSubmit}
2981
- columns={100}
2982
- cursorOffset={customModelNameCursorOffset}
2983
- onChangeCursorOffset={setCustomModelNameCursorOffset}
2984
- showCursor={true}
2985
- />
2986
- </Box>
2987
-
2988
- <Box marginTop={1}>
2989
- <Text>
2990
- <Text color={theme.suggestion} dimColor={!customModelName}>
2991
- [Submit Model Name]
2992
- </Text>
2993
- <Text> - Press Enter or click to continue</Text>
2994
- </Text>
2995
- </Box>
2996
-
2997
- <Box marginTop={1}>
2998
- <Text dimColor>
2999
- Press <Text color={theme.suggestion}>Enter</Text> to continue or{' '}
3000
- <Text color={theme.suggestion}>Esc</Text> to go back
3001
- </Text>
3002
- </Box>
3003
- </Box>
3004
- </Box>
3005
- </Box>
3006
- )
3007
- }
3008
-
3009
- // Render Context Length Selection Screen
3010
- if (currentScreen === 'contextLength') {
3011
- const selectedOption =
3012
- CONTEXT_LENGTH_OPTIONS.find(opt => opt.value === contextLength) ||
3013
- CONTEXT_LENGTH_OPTIONS[2] // Default to 128K
3014
-
3015
- return (
3016
- <Box flexDirection="column" gap={1}>
3017
- <Box
3018
- flexDirection="column"
3019
- gap={1}
3020
- borderStyle="round"
3021
- borderColor={theme.secondaryBorder}
3022
- paddingX={2}
3023
- paddingY={1}
3024
- >
3025
- <Text bold>
3026
- Context Length Configuration{' '}
3027
- {exitState.pending
3028
- ? `(press ${exitState.keyName} again to exit)`
3029
- : ''}
3030
- </Text>
3031
- <Box flexDirection="column" gap={1}>
3032
- <Text bold>Choose the context window length for your model:</Text>
3033
- <Box flexDirection="column" width={70}>
3034
- <Text color={theme.secondaryText}>
3035
- This determines how much conversation history and context the
3036
- model can process at once. Higher values allow for longer
3037
- conversations but may increase costs.
3038
- </Text>
3039
- </Box>
3040
-
3041
- <Box flexDirection="column" marginY={1}>
3042
- {CONTEXT_LENGTH_OPTIONS.map((option, index) => {
3043
- const isSelected = option.value === contextLength
3044
- return (
3045
- <Box key={option.value} flexDirection="row">
3046
- <Text color={isSelected ? 'blue' : undefined}>
3047
- {isSelected ? '→ ' : ' '}
3048
- {option.label}
3049
- {option.value === DEFAULT_CONTEXT_LENGTH
3050
- ? ' (recommended)'
3051
- : ''}
3052
- </Text>
3053
- </Box>
3054
- )
3055
- })}
3056
- </Box>
3057
-
3058
- <Box flexDirection="column" marginY={1}>
3059
- <Text dimColor>
3060
- Selected:{' '}
3061
- <Text color={theme.suggestion}>{selectedOption.label}</Text>
3062
- </Text>
3063
- </Box>
3064
- </Box>
3065
- </Box>
3066
-
3067
- <Box marginLeft={1}>
3068
- <Text dimColor>
3069
- ↑/↓ to select · Enter to continue · Esc to go back
3070
- </Text>
3071
- </Box>
3072
- </Box>
3073
- )
3074
- }
3075
-
3076
- // Render Connection Test Screen
3077
- if (currentScreen === 'connectionTest') {
3078
- const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
3079
- ' (',
3080
- )[0]
3081
-
3082
- return (
3083
- <Box flexDirection="column" gap={1}>
3084
- <Box
3085
- flexDirection="column"
3086
- gap={1}
3087
- borderStyle="round"
3088
- borderColor={theme.secondaryBorder}
3089
- paddingX={2}
3090
- paddingY={1}
3091
- >
3092
- <Text bold>
3093
- Connection Test{' '}
3094
- {exitState.pending
3095
- ? `(press ${exitState.keyName} again to exit)`
3096
- : ''}
3097
- </Text>
3098
- <Box flexDirection="column" gap={1}>
3099
- <Text bold>Testing connection to {providerDisplayName}...</Text>
3100
- <Box flexDirection="column" width={70}>
3101
- <Text color={theme.secondaryText}>
3102
- This will verify your configuration by sending a test request to
3103
- the API.
3104
- {selectedProvider === 'minimax' && (
3105
- <>
3106
- <Newline />
3107
- For MiniMax, we'll test both v2 and v1 endpoints to find the
3108
- best one.
3109
- </>
3110
- )}
3111
- </Text>
3112
- </Box>
3113
-
3114
- {!connectionTestResult && !isTestingConnection && (
3115
- <Box marginY={1}>
3116
- <Text>
3117
- <Text color={theme.suggestion}>Press Enter</Text> to start the
3118
- connection test
3119
- </Text>
3120
- </Box>
3121
- )}
3122
-
3123
- {isTestingConnection && (
3124
- <Box marginY={1}>
3125
- <Text color={theme.suggestion}>🔄 Testing connection...</Text>
3126
- </Box>
3127
- )}
3128
-
3129
- {connectionTestResult && (
3130
- <Box flexDirection="column" marginY={1} paddingX={1}>
3131
- <Text
3132
- color={connectionTestResult.success ? theme.success : 'red'}
3133
- >
3134
- {connectionTestResult.message}
3135
- </Text>
3136
-
3137
- {connectionTestResult.endpoint && (
3138
- <Text color={theme.secondaryText}>
3139
- Endpoint: {connectionTestResult.endpoint}
3140
- </Text>
3141
- )}
3142
-
3143
- {connectionTestResult.details && (
3144
- <Text color={theme.secondaryText}>
3145
- Details: {connectionTestResult.details}
3146
- </Text>
3147
- )}
3148
-
3149
- {connectionTestResult.success ? (
3150
- <Box marginTop={1}>
3151
- <Text color={theme.success}>
3152
- ✅ Automatically proceeding to confirmation...
3153
- </Text>
3154
- </Box>
3155
- ) : (
3156
- <Box marginTop={1}>
3157
- <Text>
3158
- <Text color={theme.suggestion}>Press Enter</Text> to retry
3159
- test, or <Text color={theme.suggestion}>Esc</Text> to go
3160
- back
3161
- </Text>
3162
- </Box>
3163
- )}
3164
- </Box>
3165
- )}
3166
-
3167
- <Box marginTop={1}>
3168
- <Text dimColor>
3169
- Press <Text color={theme.suggestion}>Esc</Text> to go back to
3170
- context length
3171
- </Text>
3172
- </Box>
3173
- </Box>
3174
- </Box>
3175
- </Box>
3176
- )
3177
- }
3178
-
3179
- // Render Confirmation Screen
3180
- if (currentScreen === 'confirmation') {
3181
- // Show model profile being created
3182
-
3183
- // Get provider display name
3184
- const providerDisplayName = getProviderLabel(selectedProvider, 0).split(
3185
- ' (',
3186
- )[0]
3187
-
3188
- // Determine if provider requires API key
3189
- const showsApiKey = selectedProvider !== 'ollama'
3190
-
3191
- return (
3192
- <Box flexDirection="column" gap={1}>
3193
- <Box
3194
- flexDirection="column"
3195
- gap={1}
3196
- borderStyle="round"
3197
- borderColor={theme.secondaryBorder}
3198
- paddingX={2}
3199
- paddingY={1}
3200
- >
3201
- <Text bold>
3202
- Configuration Confirmation{' '}
3203
- {exitState.pending
3204
- ? `(press ${exitState.keyName} again to exit)`
3205
- : ''}
3206
- </Text>
3207
- <Box flexDirection="column" gap={1}>
3208
- <Text bold>Confirm your model configuration:</Text>
3209
- <Box flexDirection="column" width={70}>
3210
- <Text color={theme.secondaryText}>
3211
- Please review your selections before saving.
3212
- </Text>
3213
- </Box>
3214
-
3215
- {validationError && (
3216
- <Box flexDirection="column" marginY={1} paddingX={1}>
3217
- <Text color={theme.error} bold>
3218
- ⚠ Configuration Error:
3219
- </Text>
3220
- <Text color={theme.error}>{validationError}</Text>
3221
- </Box>
3222
- )}
3223
-
3224
- <Box flexDirection="column" marginY={1} paddingX={1}>
3225
- <Text>
3226
- <Text bold>Provider: </Text>
3227
- <Text color={theme.suggestion}>{providerDisplayName}</Text>
3228
- </Text>
3229
-
3230
- {selectedProvider === 'azure' && (
3231
- <Text>
3232
- <Text bold>Resource Name: </Text>
3233
- <Text color={theme.suggestion}>{resourceName}</Text>
3234
- </Text>
3235
- )}
3236
-
3237
- {selectedProvider === 'ollama' && (
3238
- <Text>
3239
- <Text bold>Server URL: </Text>
3240
- <Text color={theme.suggestion}>{ollamaBaseUrl}</Text>
3241
- </Text>
3242
- )}
3243
-
3244
- {selectedProvider === 'custom-openai' && (
3245
- <Text>
3246
- <Text bold>API Base URL: </Text>
3247
- <Text color={theme.suggestion}>{customBaseUrl}</Text>
3248
- </Text>
3249
- )}
3250
-
3251
- <Text>
3252
- <Text bold>Model: </Text>
3253
- <Text color={theme.suggestion}>{selectedModel}</Text>
3254
- </Text>
3255
-
3256
- {apiKey && showsApiKey && (
3257
- <Text>
3258
- <Text bold>API Key: </Text>
3259
- <Text color={theme.suggestion}>****{apiKey.slice(-4)}</Text>
3260
- </Text>
3261
- )}
3262
-
3263
- {maxTokens && (
3264
- <Text>
3265
- <Text bold>Max Tokens: </Text>
3266
- <Text color={theme.suggestion}>{maxTokens}</Text>
3267
- </Text>
3268
- )}
3269
-
3270
- <Text>
3271
- <Text bold>Context Length: </Text>
3272
- <Text color={theme.suggestion}>
3273
- {CONTEXT_LENGTH_OPTIONS.find(
3274
- opt => opt.value === contextLength,
3275
- )?.label || `${contextLength.toLocaleString()} tokens`}
3276
- </Text>
3277
- </Text>
3278
-
3279
- {supportsReasoningEffort && (
3280
- <Text>
3281
- <Text bold>Reasoning Effort: </Text>
3282
- <Text color={theme.suggestion}>{reasoningEffort}</Text>
3283
- </Text>
3284
- )}
3285
- </Box>
3286
-
3287
- <Box marginTop={1}>
3288
- <Text dimColor>
3289
- Press <Text color={theme.suggestion}>Esc</Text> to go back to
3290
- model parameters or <Text color={theme.suggestion}>Enter</Text>{' '}
3291
- to save configuration
3292
- </Text>
3293
- </Box>
3294
- </Box>
3295
- </Box>
3296
- </Box>
3297
- )
3298
- }
3299
-
3300
- // Render Anthropic Sub-Menu Selection Screen
3301
- if (currentScreen === 'anthropicSubMenu') {
3302
- const anthropicOptions = [
3303
- { label: 'Official Anthropic API', value: 'official' },
3304
- { label: 'BigDream (Community Proxy)', value: 'bigdream' },
3305
- { label: 'OpenDev (Community Proxy)', value: 'opendev' },
3306
- { label: 'Custom Anthropic-Compatible API', value: 'custom' },
3307
- ]
3308
-
3309
- return (
3310
- <Box flexDirection="column" gap={1}>
3311
- <Box
3312
- flexDirection="column"
3313
- gap={1}
3314
- borderStyle="round"
3315
- borderColor={theme.secondaryBorder}
3316
- paddingX={2}
3317
- paddingY={1}
3318
- >
3319
- <Text bold>
3320
- Claude Provider Selection{' '}
3321
- {exitState.pending
3322
- ? `(press ${exitState.keyName} again to exit)`
3323
- : ''}
3324
- </Text>
3325
- <Box flexDirection="column" gap={1}>
3326
- <Text bold>
3327
- Choose your Anthropic API access method for this model profile:
3328
- </Text>
3329
- <Box flexDirection="column" width={70}>
3330
- <Text color={theme.secondaryText}>
3331
- • <Text bold>Official Anthropic API:</Text> Direct access to
3332
- Anthropic's official API
3333
- <Newline />• <Text bold>BigDream:</Text> Community proxy
3334
- providing Claude access
3335
- <Newline />• <Text bold>Custom:</Text> Your own
3336
- Anthropic-compatible API endpoint
3337
- </Text>
3338
- </Box>
3339
-
3340
- <Select
3341
- options={anthropicOptions}
3342
- onChange={handleAnthropicProviderSelection}
3343
- />
3344
-
3345
- <Box marginTop={1}>
3346
- <Text dimColor>
3347
- Press <Text color={theme.suggestion}>Esc</Text> to go back to
3348
- provider selection
3349
- </Text>
3350
- </Box>
3351
- </Box>
3352
- </Box>
3353
- </Box>
3354
- )
3355
- }
3356
-
3357
- // Render Provider Selection Screen
3358
- return (
3359
- <ScreenContainer
3360
- title="Provider Selection"
3361
- exitState={exitState}
3362
- children={
3363
- <Box flexDirection="column" gap={1}>
3364
- <Text bold>
3365
- Select your preferred AI provider for this model profile:
3366
- </Text>
3367
- <Box flexDirection="column" width={70}>
3368
- <Text color={theme.secondaryText}>
3369
- Choose the provider you want to use for this model profile.
3370
- <Newline />
3371
- This will determine which models are available to you.
3372
- </Text>
3373
- </Box>
3374
-
3375
- <Select options={providerOptions} onChange={handleProviderSelection} />
3376
-
3377
- <Box marginTop={1}>
3378
- <Text dimColor>
3379
- You can change this later by running{' '}
3380
- <Text color={theme.suggestion}>/model</Text> again
3381
- </Text>
3382
- </Box>
3383
- </Box>
3384
- }
3385
- />
3386
- )
3387
- }