@shareai-lab/kode 1.0.69 → 1.0.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +205 -72
- package/README.zh-CN.md +246 -0
- package/cli.js +62 -0
- package/package.json +45 -25
- package/scripts/postinstall.js +56 -0
- package/src/ProjectOnboarding.tsx +180 -0
- package/src/Tool.ts +53 -0
- package/src/commands/approvedTools.ts +53 -0
- package/src/commands/bug.tsx +20 -0
- package/src/commands/clear.ts +43 -0
- package/src/commands/compact.ts +120 -0
- package/src/commands/config.tsx +19 -0
- package/src/commands/cost.ts +18 -0
- package/src/commands/ctx_viz.ts +209 -0
- package/src/commands/doctor.ts +24 -0
- package/src/commands/help.tsx +19 -0
- package/src/commands/init.ts +37 -0
- package/src/commands/listen.ts +42 -0
- package/src/commands/login.tsx +51 -0
- package/src/commands/logout.tsx +40 -0
- package/src/commands/mcp.ts +41 -0
- package/src/commands/model.tsx +40 -0
- package/src/commands/modelstatus.tsx +20 -0
- package/src/commands/onboarding.tsx +34 -0
- package/src/commands/pr_comments.ts +59 -0
- package/src/commands/refreshCommands.ts +54 -0
- package/src/commands/release-notes.ts +34 -0
- package/src/commands/resume.tsx +30 -0
- package/src/commands/review.ts +49 -0
- package/src/commands/terminalSetup.ts +221 -0
- package/src/commands.ts +136 -0
- package/src/components/ApproveApiKey.tsx +93 -0
- package/src/components/AsciiLogo.tsx +13 -0
- package/src/components/AutoUpdater.tsx +148 -0
- package/src/components/Bug.tsx +367 -0
- package/src/components/Config.tsx +289 -0
- package/src/components/ConsoleOAuthFlow.tsx +326 -0
- package/src/components/Cost.tsx +23 -0
- package/src/components/CostThresholdDialog.tsx +46 -0
- package/src/components/CustomSelect/option-map.ts +42 -0
- package/src/components/CustomSelect/select-option.tsx +52 -0
- package/src/components/CustomSelect/select.tsx +143 -0
- package/src/components/CustomSelect/use-select-state.ts +414 -0
- package/src/components/CustomSelect/use-select.ts +35 -0
- package/src/components/FallbackToolUseRejectedMessage.tsx +15 -0
- package/src/components/FileEditToolUpdatedMessage.tsx +66 -0
- package/src/components/Help.tsx +215 -0
- package/src/components/HighlightedCode.tsx +33 -0
- package/src/components/InvalidConfigDialog.tsx +113 -0
- package/src/components/Link.tsx +32 -0
- package/src/components/LogSelector.tsx +86 -0
- package/src/components/Logo.tsx +145 -0
- package/src/components/MCPServerApprovalDialog.tsx +100 -0
- package/src/components/MCPServerDialogCopy.tsx +25 -0
- package/src/components/MCPServerMultiselectDialog.tsx +109 -0
- package/src/components/Message.tsx +219 -0
- package/src/components/MessageResponse.tsx +15 -0
- package/src/components/MessageSelector.tsx +211 -0
- package/src/components/ModeIndicator.tsx +88 -0
- package/src/components/ModelConfig.tsx +301 -0
- package/src/components/ModelListManager.tsx +223 -0
- package/src/components/ModelSelector.tsx +3208 -0
- package/src/components/ModelStatusDisplay.tsx +228 -0
- package/src/components/Onboarding.tsx +274 -0
- package/src/components/PressEnterToContinue.tsx +11 -0
- package/src/components/PromptInput.tsx +710 -0
- package/src/components/SentryErrorBoundary.ts +33 -0
- package/src/components/Spinner.tsx +129 -0
- package/src/components/StructuredDiff.tsx +184 -0
- package/src/components/TextInput.tsx +246 -0
- package/src/components/TokenWarning.tsx +31 -0
- package/src/components/ToolUseLoader.tsx +40 -0
- package/src/components/TrustDialog.tsx +106 -0
- package/src/components/binary-feedback/BinaryFeedback.tsx +63 -0
- package/src/components/binary-feedback/BinaryFeedbackOption.tsx +111 -0
- package/src/components/binary-feedback/BinaryFeedbackView.tsx +172 -0
- package/src/components/binary-feedback/utils.ts +220 -0
- package/src/components/messages/AssistantBashOutputMessage.tsx +22 -0
- package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +45 -0
- package/src/components/messages/AssistantRedactedThinkingMessage.tsx +19 -0
- package/src/components/messages/AssistantTextMessage.tsx +144 -0
- package/src/components/messages/AssistantThinkingMessage.tsx +40 -0
- package/src/components/messages/AssistantToolUseMessage.tsx +123 -0
- package/src/components/messages/UserBashInputMessage.tsx +28 -0
- package/src/components/messages/UserCommandMessage.tsx +30 -0
- package/src/components/messages/UserKodingInputMessage.tsx +28 -0
- package/src/components/messages/UserPromptMessage.tsx +35 -0
- package/src/components/messages/UserTextMessage.tsx +39 -0
- package/src/components/messages/UserToolResultMessage/UserToolCanceledMessage.tsx +12 -0
- package/src/components/messages/UserToolResultMessage/UserToolErrorMessage.tsx +36 -0
- package/src/components/messages/UserToolResultMessage/UserToolRejectMessage.tsx +31 -0
- package/src/components/messages/UserToolResultMessage/UserToolResultMessage.tsx +57 -0
- package/src/components/messages/UserToolResultMessage/UserToolSuccessMessage.tsx +35 -0
- package/src/components/messages/UserToolResultMessage/utils.tsx +56 -0
- package/src/components/permissions/BashPermissionRequest/BashPermissionRequest.tsx +121 -0
- package/src/components/permissions/FallbackPermissionRequest.tsx +155 -0
- package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +182 -0
- package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +75 -0
- package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +164 -0
- package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +81 -0
- package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +242 -0
- package/src/components/permissions/PermissionRequest.tsx +103 -0
- package/src/components/permissions/PermissionRequestTitle.tsx +69 -0
- package/src/components/permissions/hooks.ts +44 -0
- package/src/components/permissions/toolUseOptions.ts +59 -0
- package/src/components/permissions/utils.ts +23 -0
- package/src/constants/betas.ts +5 -0
- package/src/constants/claude-asterisk-ascii-art.tsx +238 -0
- package/src/constants/figures.ts +4 -0
- package/src/constants/keys.ts +3 -0
- package/src/constants/macros.ts +6 -0
- package/src/constants/models.ts +935 -0
- package/src/constants/oauth.ts +18 -0
- package/src/constants/product.ts +17 -0
- package/src/constants/prompts.ts +177 -0
- package/src/constants/releaseNotes.ts +7 -0
- package/src/context/PermissionContext.tsx +149 -0
- package/src/context.ts +278 -0
- package/src/cost-tracker.ts +84 -0
- package/src/entrypoints/cli.tsx +1498 -0
- package/src/entrypoints/mcp.ts +176 -0
- package/src/history.ts +25 -0
- package/src/hooks/useApiKeyVerification.ts +59 -0
- package/src/hooks/useArrowKeyHistory.ts +55 -0
- package/src/hooks/useCanUseTool.ts +138 -0
- package/src/hooks/useCancelRequest.ts +39 -0
- package/src/hooks/useDoublePress.ts +42 -0
- package/src/hooks/useExitOnCtrlCD.ts +31 -0
- package/src/hooks/useInterval.ts +25 -0
- package/src/hooks/useLogMessages.ts +16 -0
- package/src/hooks/useLogStartupTime.ts +12 -0
- package/src/hooks/useNotifyAfterTimeout.ts +65 -0
- package/src/hooks/usePermissionRequestLogging.ts +44 -0
- package/src/hooks/useSlashCommandTypeahead.ts +137 -0
- package/src/hooks/useTerminalSize.ts +49 -0
- package/src/hooks/useTextInput.ts +315 -0
- package/src/messages.ts +37 -0
- package/src/permissions.ts +268 -0
- package/src/query.ts +704 -0
- package/src/screens/ConfigureNpmPrefix.tsx +197 -0
- package/src/screens/Doctor.tsx +219 -0
- package/src/screens/LogList.tsx +68 -0
- package/src/screens/REPL.tsx +792 -0
- package/src/screens/ResumeConversation.tsx +68 -0
- package/src/services/browserMocks.ts +66 -0
- package/src/services/claude.ts +1947 -0
- package/src/services/customCommands.ts +683 -0
- package/src/services/fileFreshness.ts +377 -0
- package/src/services/mcpClient.ts +564 -0
- package/src/services/mcpServerApproval.tsx +50 -0
- package/src/services/notifier.ts +40 -0
- package/src/services/oauth.ts +357 -0
- package/src/services/openai.ts +796 -0
- package/src/services/sentry.ts +3 -0
- package/src/services/statsig.ts +171 -0
- package/src/services/statsigStorage.ts +86 -0
- package/src/services/systemReminder.ts +406 -0
- package/src/services/vcr.ts +161 -0
- package/src/tools/ArchitectTool/ArchitectTool.tsx +122 -0
- package/src/tools/ArchitectTool/prompt.ts +15 -0
- package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +505 -0
- package/src/tools/BashTool/BashTool.tsx +270 -0
- package/src/tools/BashTool/BashToolResultMessage.tsx +38 -0
- package/src/tools/BashTool/OutputLine.tsx +48 -0
- package/src/tools/BashTool/prompt.ts +174 -0
- package/src/tools/BashTool/utils.ts +56 -0
- package/src/tools/FileEditTool/FileEditTool.tsx +316 -0
- package/src/tools/FileEditTool/prompt.ts +51 -0
- package/src/tools/FileEditTool/utils.ts +58 -0
- package/src/tools/FileReadTool/FileReadTool.tsx +371 -0
- package/src/tools/FileReadTool/prompt.ts +7 -0
- package/src/tools/FileWriteTool/FileWriteTool.tsx +297 -0
- package/src/tools/FileWriteTool/prompt.ts +10 -0
- package/src/tools/GlobTool/GlobTool.tsx +119 -0
- package/src/tools/GlobTool/prompt.ts +8 -0
- package/src/tools/GrepTool/GrepTool.tsx +147 -0
- package/src/tools/GrepTool/prompt.ts +11 -0
- package/src/tools/MCPTool/MCPTool.tsx +106 -0
- package/src/tools/MCPTool/prompt.ts +3 -0
- package/src/tools/MemoryReadTool/MemoryReadTool.tsx +127 -0
- package/src/tools/MemoryReadTool/prompt.ts +3 -0
- package/src/tools/MemoryWriteTool/MemoryWriteTool.tsx +89 -0
- package/src/tools/MemoryWriteTool/prompt.ts +3 -0
- package/src/tools/MultiEditTool/MultiEditTool.tsx +366 -0
- package/src/tools/MultiEditTool/prompt.ts +45 -0
- package/src/tools/NotebookEditTool/NotebookEditTool.tsx +298 -0
- package/src/tools/NotebookEditTool/prompt.ts +3 -0
- package/src/tools/NotebookReadTool/NotebookReadTool.tsx +266 -0
- package/src/tools/NotebookReadTool/prompt.ts +3 -0
- package/src/tools/StickerRequestTool/StickerRequestTool.tsx +93 -0
- package/src/tools/StickerRequestTool/prompt.ts +19 -0
- package/src/tools/TaskTool/TaskTool.tsx +382 -0
- package/src/tools/TaskTool/constants.ts +1 -0
- package/src/tools/TaskTool/prompt.ts +56 -0
- package/src/tools/ThinkTool/ThinkTool.tsx +56 -0
- package/src/tools/ThinkTool/prompt.ts +12 -0
- package/src/tools/TodoWriteTool/TodoWriteTool.tsx +289 -0
- package/src/tools/TodoWriteTool/prompt.ts +63 -0
- package/src/tools/lsTool/lsTool.tsx +269 -0
- package/src/tools/lsTool/prompt.ts +2 -0
- package/src/tools.ts +63 -0
- package/src/types/PermissionMode.ts +120 -0
- package/src/types/RequestContext.ts +72 -0
- package/src/utils/Cursor.ts +436 -0
- package/src/utils/PersistentShell.ts +373 -0
- package/src/utils/agentStorage.ts +97 -0
- package/src/utils/array.ts +3 -0
- package/src/utils/ask.tsx +98 -0
- package/src/utils/auth.ts +13 -0
- package/src/utils/autoCompactCore.ts +223 -0
- package/src/utils/autoUpdater.ts +318 -0
- package/src/utils/betas.ts +20 -0
- package/src/utils/browser.ts +14 -0
- package/src/utils/cleanup.ts +72 -0
- package/src/utils/commands.ts +261 -0
- package/src/utils/config.ts +771 -0
- package/src/utils/conversationRecovery.ts +54 -0
- package/src/utils/debugLogger.ts +1123 -0
- package/src/utils/diff.ts +42 -0
- package/src/utils/env.ts +57 -0
- package/src/utils/errors.ts +21 -0
- package/src/utils/exampleCommands.ts +108 -0
- package/src/utils/execFileNoThrow.ts +51 -0
- package/src/utils/expertChatStorage.ts +136 -0
- package/src/utils/file.ts +402 -0
- package/src/utils/fileRecoveryCore.ts +71 -0
- package/src/utils/format.tsx +44 -0
- package/src/utils/generators.ts +62 -0
- package/src/utils/git.ts +92 -0
- package/src/utils/globalLogger.ts +77 -0
- package/src/utils/http.ts +10 -0
- package/src/utils/imagePaste.ts +38 -0
- package/src/utils/json.ts +13 -0
- package/src/utils/log.ts +382 -0
- package/src/utils/markdown.ts +213 -0
- package/src/utils/messageContextManager.ts +289 -0
- package/src/utils/messages.tsx +938 -0
- package/src/utils/model.ts +836 -0
- package/src/utils/permissions/filesystem.ts +118 -0
- package/src/utils/ripgrep.ts +167 -0
- package/src/utils/sessionState.ts +49 -0
- package/src/utils/state.ts +25 -0
- package/src/utils/style.ts +29 -0
- package/src/utils/terminal.ts +49 -0
- package/src/utils/theme.ts +122 -0
- package/src/utils/thinking.ts +144 -0
- package/src/utils/todoStorage.ts +431 -0
- package/src/utils/tokens.ts +43 -0
- package/src/utils/toolExecutionController.ts +163 -0
- package/src/utils/unaryLogging.ts +26 -0
- package/src/utils/user.ts +37 -0
- package/src/utils/validate.ts +165 -0
- package/cli.mjs +0 -1803
|
@@ -0,0 +1,796 @@
|
|
|
1
|
+
import { OpenAI } from 'openai'
|
|
2
|
+
import { getGlobalConfig, GlobalConfig } from '../utils/config'
|
|
3
|
+
import { ProxyAgent, fetch, Response } from 'undici'
|
|
4
|
+
import { setSessionState, getSessionState } from '../utils/sessionState'
|
|
5
|
+
import { logEvent } from '../services/statsig'
|
|
6
|
+
import { debug as debugLogger } from '../utils/debugLogger'
|
|
7
|
+
|
|
8
|
+
// Helper function to calculate retry delay with exponential backoff
|
|
9
|
+
function getRetryDelay(attempt: number, retryAfter?: string | null): number {
|
|
10
|
+
// If server suggests a retry-after time, use it
|
|
11
|
+
if (retryAfter) {
|
|
12
|
+
const retryAfterMs = parseInt(retryAfter) * 1000
|
|
13
|
+
if (!isNaN(retryAfterMs) && retryAfterMs > 0) {
|
|
14
|
+
return Math.min(retryAfterMs, 60000) // Cap at 60 seconds
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// Exponential backoff: base delay of 1 second, doubling each attempt
|
|
19
|
+
const baseDelay = 1000
|
|
20
|
+
const maxDelay = 32000 // Cap at 32 seconds
|
|
21
|
+
const delay = baseDelay * Math.pow(2, attempt - 1)
|
|
22
|
+
|
|
23
|
+
// Add some jitter to avoid thundering herd
|
|
24
|
+
const jitter = Math.random() * 0.1 * delay
|
|
25
|
+
|
|
26
|
+
return Math.min(delay + jitter, maxDelay)
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Helper function to create an abortable delay
|
|
30
|
+
function abortableDelay(delayMs: number, signal?: AbortSignal): Promise<void> {
|
|
31
|
+
return new Promise((resolve, reject) => {
|
|
32
|
+
// Check if already aborted
|
|
33
|
+
if (signal?.aborted) {
|
|
34
|
+
reject(new Error('Request was aborted'))
|
|
35
|
+
return
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const timeoutId = setTimeout(() => {
|
|
39
|
+
resolve()
|
|
40
|
+
}, delayMs)
|
|
41
|
+
|
|
42
|
+
// If signal is provided, listen for abort event
|
|
43
|
+
if (signal) {
|
|
44
|
+
const abortHandler = () => {
|
|
45
|
+
clearTimeout(timeoutId)
|
|
46
|
+
reject(new Error('Request was aborted'))
|
|
47
|
+
}
|
|
48
|
+
signal.addEventListener('abort', abortHandler, { once: true })
|
|
49
|
+
}
|
|
50
|
+
})
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
enum ModelErrorType {
|
|
54
|
+
MaxLength = '1024',
|
|
55
|
+
MaxCompletionTokens = 'max_completion_tokens',
|
|
56
|
+
StreamOptions = 'stream_options',
|
|
57
|
+
Citations = 'citations',
|
|
58
|
+
RateLimit = 'rate_limit',
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function getModelErrorKey(
|
|
62
|
+
baseURL: string,
|
|
63
|
+
model: string,
|
|
64
|
+
type: ModelErrorType,
|
|
65
|
+
): string {
|
|
66
|
+
return `${baseURL}:${model}:${type}`
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
function hasModelError(
|
|
70
|
+
baseURL: string,
|
|
71
|
+
model: string,
|
|
72
|
+
type: ModelErrorType,
|
|
73
|
+
): boolean {
|
|
74
|
+
return !!getSessionState('modelErrors')[
|
|
75
|
+
getModelErrorKey(baseURL, model, type)
|
|
76
|
+
]
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
function setModelError(
|
|
80
|
+
baseURL: string,
|
|
81
|
+
model: string,
|
|
82
|
+
type: ModelErrorType,
|
|
83
|
+
error: string,
|
|
84
|
+
) {
|
|
85
|
+
setSessionState('modelErrors', {
|
|
86
|
+
[getModelErrorKey(baseURL, model, type)]: error,
|
|
87
|
+
})
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// More flexible error detection system
|
|
91
|
+
type ErrorDetector = (errMsg: string) => boolean
|
|
92
|
+
type ErrorFixer = (
|
|
93
|
+
opts: OpenAI.ChatCompletionCreateParams,
|
|
94
|
+
) => Promise<void> | void
|
|
95
|
+
interface ErrorHandler {
|
|
96
|
+
type: ModelErrorType
|
|
97
|
+
detect: ErrorDetector
|
|
98
|
+
fix: ErrorFixer
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Standard error handlers
|
|
102
|
+
const ERROR_HANDLERS: ErrorHandler[] = [
|
|
103
|
+
{
|
|
104
|
+
type: ModelErrorType.MaxLength,
|
|
105
|
+
detect: errMsg =>
|
|
106
|
+
errMsg.includes('Expected a string with maximum length 1024'),
|
|
107
|
+
fix: async opts => {
|
|
108
|
+
const toolDescriptions = {}
|
|
109
|
+
for (const tool of opts.tools || []) {
|
|
110
|
+
if (tool.function.description.length <= 1024) continue
|
|
111
|
+
let str = ''
|
|
112
|
+
let remainder = ''
|
|
113
|
+
for (let line of tool.function.description.split('\n')) {
|
|
114
|
+
if (str.length + line.length < 1024) {
|
|
115
|
+
str += line + '\n'
|
|
116
|
+
} else {
|
|
117
|
+
remainder += line + '\n'
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
logEvent('truncated_tool_description', {
|
|
121
|
+
name: tool.function.name,
|
|
122
|
+
original_length: String(tool.function.description.length),
|
|
123
|
+
truncated_length: String(str.length),
|
|
124
|
+
remainder_length: String(remainder.length),
|
|
125
|
+
})
|
|
126
|
+
tool.function.description = str
|
|
127
|
+
toolDescriptions[tool.function.name] = remainder
|
|
128
|
+
}
|
|
129
|
+
if (Object.keys(toolDescriptions).length > 0) {
|
|
130
|
+
let content = '<additional-tool-usage-instructions>\n\n'
|
|
131
|
+
for (const [name, description] of Object.entries(toolDescriptions)) {
|
|
132
|
+
content += `<${name}>\n${description}\n</${name}>\n\n`
|
|
133
|
+
}
|
|
134
|
+
content += '</additional-tool-usage-instructions>'
|
|
135
|
+
|
|
136
|
+
for (let i = opts.messages.length - 1; i >= 0; i--) {
|
|
137
|
+
if (opts.messages[i].role === 'system') {
|
|
138
|
+
opts.messages.splice(i + 1, 0, {
|
|
139
|
+
role: 'system',
|
|
140
|
+
content,
|
|
141
|
+
})
|
|
142
|
+
break
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
},
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
type: ModelErrorType.MaxCompletionTokens,
|
|
150
|
+
detect: errMsg => errMsg.includes("Use 'max_completion_tokens'"),
|
|
151
|
+
fix: async opts => {
|
|
152
|
+
opts.max_completion_tokens = opts.max_tokens
|
|
153
|
+
delete opts.max_tokens
|
|
154
|
+
},
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
type: ModelErrorType.StreamOptions,
|
|
158
|
+
detect: errMsg => errMsg.includes('stream_options'),
|
|
159
|
+
fix: async opts => {
|
|
160
|
+
delete opts.stream_options
|
|
161
|
+
},
|
|
162
|
+
},
|
|
163
|
+
{
|
|
164
|
+
type: ModelErrorType.Citations,
|
|
165
|
+
detect: errMsg =>
|
|
166
|
+
errMsg.includes('Extra inputs are not permitted') &&
|
|
167
|
+
errMsg.includes('citations'),
|
|
168
|
+
fix: async opts => {
|
|
169
|
+
if (!opts.messages) return
|
|
170
|
+
|
|
171
|
+
for (const message of opts.messages) {
|
|
172
|
+
if (!message) continue
|
|
173
|
+
|
|
174
|
+
if (Array.isArray(message.content)) {
|
|
175
|
+
for (const item of message.content) {
|
|
176
|
+
// Convert to unknown first to safely access properties
|
|
177
|
+
if (item && typeof item === 'object') {
|
|
178
|
+
const itemObj = item as unknown as Record<string, unknown>
|
|
179
|
+
if ('citations' in itemObj) {
|
|
180
|
+
delete itemObj.citations
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
} else if (message.content && typeof message.content === 'object') {
|
|
185
|
+
// Convert to unknown first to safely access properties
|
|
186
|
+
const contentObj = message.content as unknown as Record<
|
|
187
|
+
string,
|
|
188
|
+
unknown
|
|
189
|
+
>
|
|
190
|
+
if ('citations' in contentObj) {
|
|
191
|
+
delete contentObj.citations
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
},
|
|
196
|
+
},
|
|
197
|
+
]
|
|
198
|
+
|
|
199
|
+
// Rate limit specific detection
|
|
200
|
+
function isRateLimitError(errMsg: string): boolean {
|
|
201
|
+
if (!errMsg) return false
|
|
202
|
+
const lowerMsg = errMsg.toLowerCase()
|
|
203
|
+
return (
|
|
204
|
+
lowerMsg.includes('rate limit') ||
|
|
205
|
+
lowerMsg.includes('too many requests') ||
|
|
206
|
+
lowerMsg.includes('429')
|
|
207
|
+
)
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Model-specific feature flags - can be extended with more properties as needed
|
|
211
|
+
interface ModelFeatures {
|
|
212
|
+
usesMaxCompletionTokens: boolean
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Map of model identifiers to their specific features
|
|
216
|
+
const MODEL_FEATURES: Record<string, ModelFeatures> = {
|
|
217
|
+
// OpenAI thinking models
|
|
218
|
+
o1: { usesMaxCompletionTokens: true },
|
|
219
|
+
'o1-preview': { usesMaxCompletionTokens: true },
|
|
220
|
+
'o1-mini': { usesMaxCompletionTokens: true },
|
|
221
|
+
'o1-pro': { usesMaxCompletionTokens: true },
|
|
222
|
+
'o3-mini': { usesMaxCompletionTokens: true },
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// Helper to get model features based on model ID/name
|
|
226
|
+
function getModelFeatures(modelName: string): ModelFeatures {
|
|
227
|
+
// Check for exact matches first
|
|
228
|
+
if (MODEL_FEATURES[modelName]) {
|
|
229
|
+
return MODEL_FEATURES[modelName]
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Check for partial matches (e.g., if modelName contains a known model ID)
|
|
233
|
+
for (const [key, features] of Object.entries(MODEL_FEATURES)) {
|
|
234
|
+
if (modelName.includes(key)) {
|
|
235
|
+
return features
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Default features for unknown models
|
|
240
|
+
return { usesMaxCompletionTokens: false }
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Apply model-specific parameter transformations based on model features
|
|
244
|
+
function applyModelSpecificTransformations(
|
|
245
|
+
opts: OpenAI.ChatCompletionCreateParams,
|
|
246
|
+
): void {
|
|
247
|
+
if (!opts.model || typeof opts.model !== 'string') {
|
|
248
|
+
return
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const features = getModelFeatures(opts.model)
|
|
252
|
+
|
|
253
|
+
// Apply transformations based on features
|
|
254
|
+
if (
|
|
255
|
+
features.usesMaxCompletionTokens &&
|
|
256
|
+
'max_tokens' in opts &&
|
|
257
|
+
!('max_completion_tokens' in opts)
|
|
258
|
+
) {
|
|
259
|
+
opts.max_completion_tokens = opts.max_tokens
|
|
260
|
+
delete opts.max_tokens
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// Add more transformations here as needed
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
async function applyModelErrorFixes(
|
|
267
|
+
opts: OpenAI.ChatCompletionCreateParams,
|
|
268
|
+
baseURL: string,
|
|
269
|
+
) {
|
|
270
|
+
for (const handler of ERROR_HANDLERS) {
|
|
271
|
+
if (hasModelError(baseURL, opts.model, handler.type)) {
|
|
272
|
+
await handler.fix(opts)
|
|
273
|
+
return
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// Helper function to try different endpoints for OpenAI-compatible providers
|
|
279
|
+
async function tryWithEndpointFallback(
|
|
280
|
+
baseURL: string,
|
|
281
|
+
opts: OpenAI.ChatCompletionCreateParams,
|
|
282
|
+
headers: Record<string, string>,
|
|
283
|
+
provider: string,
|
|
284
|
+
proxy: any,
|
|
285
|
+
signal?: AbortSignal, // 🔧 Add AbortSignal support
|
|
286
|
+
): Promise<{ response: Response; endpoint: string }> {
|
|
287
|
+
const endpointsToTry = []
|
|
288
|
+
|
|
289
|
+
if (provider === 'minimax') {
|
|
290
|
+
endpointsToTry.push('/text/chatcompletion_v2', '/chat/completions')
|
|
291
|
+
} else {
|
|
292
|
+
endpointsToTry.push('/chat/completions')
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
let lastError = null
|
|
296
|
+
|
|
297
|
+
for (const endpoint of endpointsToTry) {
|
|
298
|
+
try {
|
|
299
|
+
const response = await fetch(`${baseURL}${endpoint}`, {
|
|
300
|
+
method: 'POST',
|
|
301
|
+
headers,
|
|
302
|
+
body: JSON.stringify(opts.stream ? { ...opts, stream: true } : opts),
|
|
303
|
+
dispatcher: proxy,
|
|
304
|
+
signal: signal, // 🔧 Connect AbortSignal to fetch call
|
|
305
|
+
})
|
|
306
|
+
|
|
307
|
+
// If successful, return immediately
|
|
308
|
+
if (response.ok) {
|
|
309
|
+
return { response, endpoint }
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// If it's a 404, try the next endpoint
|
|
313
|
+
if (response.status === 404 && endpointsToTry.length > 1) {
|
|
314
|
+
console.log(
|
|
315
|
+
`Endpoint ${endpoint} returned 404, trying next endpoint...`,
|
|
316
|
+
)
|
|
317
|
+
continue
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// For other error codes, return this response (don't try fallback)
|
|
321
|
+
return { response, endpoint }
|
|
322
|
+
} catch (error) {
|
|
323
|
+
lastError = error
|
|
324
|
+
// Network errors might be temporary, try next endpoint
|
|
325
|
+
if (endpointsToTry.indexOf(endpoint) < endpointsToTry.length - 1) {
|
|
326
|
+
console.log(`Network error on ${endpoint}, trying next endpoint...`)
|
|
327
|
+
continue
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// If we get here, all endpoints failed
|
|
333
|
+
throw lastError || new Error('All endpoints failed')
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
export async function getCompletionWithProfile(
|
|
337
|
+
modelProfile: any,
|
|
338
|
+
opts: OpenAI.ChatCompletionCreateParams,
|
|
339
|
+
attempt: number = 0,
|
|
340
|
+
maxAttempts: number = 10,
|
|
341
|
+
signal?: AbortSignal, // 🔧 CRITICAL FIX: Add AbortSignal support
|
|
342
|
+
): Promise<OpenAI.ChatCompletion | AsyncIterable<OpenAI.ChatCompletionChunk>> {
|
|
343
|
+
if (attempt >= maxAttempts) {
|
|
344
|
+
throw new Error('Max attempts reached')
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
const provider = modelProfile?.provider || 'anthropic'
|
|
348
|
+
const baseURL = modelProfile?.baseURL
|
|
349
|
+
const apiKey = modelProfile?.apiKey
|
|
350
|
+
const proxy = getGlobalConfig().proxy
|
|
351
|
+
? new ProxyAgent(getGlobalConfig().proxy)
|
|
352
|
+
: undefined
|
|
353
|
+
|
|
354
|
+
const headers: Record<string, string> = {
|
|
355
|
+
'Content-Type': 'application/json',
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
if (apiKey) {
|
|
359
|
+
if (provider === 'azure') {
|
|
360
|
+
headers['api-key'] = apiKey
|
|
361
|
+
} else {
|
|
362
|
+
headers['Authorization'] = `Bearer ${apiKey}`
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
applyModelSpecificTransformations(opts)
|
|
367
|
+
await applyModelErrorFixes(opts, baseURL || '')
|
|
368
|
+
|
|
369
|
+
// 🔥 REAL-TIME API CALL DEBUG - 使用全局日志系统
|
|
370
|
+
debugLogger.api('OPENAI_API_CALL_START', {
|
|
371
|
+
endpoint: baseURL || 'DEFAULT_OPENAI',
|
|
372
|
+
model: opts.model,
|
|
373
|
+
provider,
|
|
374
|
+
apiKeyConfigured: !!apiKey,
|
|
375
|
+
apiKeyPrefix: apiKey ? apiKey.substring(0, 8) : null,
|
|
376
|
+
maxTokens: opts.max_tokens,
|
|
377
|
+
temperature: opts.temperature,
|
|
378
|
+
messageCount: opts.messages?.length || 0,
|
|
379
|
+
streamMode: opts.stream,
|
|
380
|
+
timestamp: new Date().toISOString(),
|
|
381
|
+
modelProfileName: modelProfile?.modelName,
|
|
382
|
+
modelProfileName: modelProfile?.name,
|
|
383
|
+
})
|
|
384
|
+
|
|
385
|
+
// Make sure all tool messages have string content
|
|
386
|
+
opts.messages = opts.messages.map(msg => {
|
|
387
|
+
if (msg.role === 'tool') {
|
|
388
|
+
if (Array.isArray(msg.content)) {
|
|
389
|
+
return {
|
|
390
|
+
...msg,
|
|
391
|
+
content:
|
|
392
|
+
msg.content
|
|
393
|
+
.map(c => c.text || '')
|
|
394
|
+
.filter(Boolean)
|
|
395
|
+
.join('\n\n') || '(empty content)',
|
|
396
|
+
}
|
|
397
|
+
} else if (typeof msg.content !== 'string') {
|
|
398
|
+
return {
|
|
399
|
+
...msg,
|
|
400
|
+
content:
|
|
401
|
+
typeof msg.content === 'undefined'
|
|
402
|
+
? '(empty content)'
|
|
403
|
+
: JSON.stringify(msg.content),
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
return msg
|
|
408
|
+
})
|
|
409
|
+
|
|
410
|
+
// Define Azure-specific API endpoint with version
|
|
411
|
+
const azureApiVersion = '2024-06-01'
|
|
412
|
+
let endpoint = '/chat/completions'
|
|
413
|
+
|
|
414
|
+
if (provider === 'azure') {
|
|
415
|
+
endpoint = `/chat/completions?api-version=${azureApiVersion}`
|
|
416
|
+
} else if (provider === 'minimax') {
|
|
417
|
+
endpoint = '/text/chatcompletion_v2'
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
try {
|
|
421
|
+
if (opts.stream) {
|
|
422
|
+
const isOpenAICompatible = [
|
|
423
|
+
'minimax',
|
|
424
|
+
'kimi',
|
|
425
|
+
'deepseek',
|
|
426
|
+
'siliconflow',
|
|
427
|
+
'qwen',
|
|
428
|
+
'glm',
|
|
429
|
+
'baidu-qianfan',
|
|
430
|
+
'openai',
|
|
431
|
+
'mistral',
|
|
432
|
+
'xai',
|
|
433
|
+
'groq',
|
|
434
|
+
'custom-openai',
|
|
435
|
+
].includes(provider)
|
|
436
|
+
|
|
437
|
+
let response: Response
|
|
438
|
+
let usedEndpoint: string
|
|
439
|
+
|
|
440
|
+
if (isOpenAICompatible && provider !== 'azure') {
|
|
441
|
+
const result = await tryWithEndpointFallback(
|
|
442
|
+
baseURL,
|
|
443
|
+
opts,
|
|
444
|
+
headers,
|
|
445
|
+
provider,
|
|
446
|
+
proxy,
|
|
447
|
+
signal, // 🔧 Pass AbortSignal to endpoint fallback
|
|
448
|
+
)
|
|
449
|
+
response = result.response
|
|
450
|
+
usedEndpoint = result.endpoint
|
|
451
|
+
} else {
|
|
452
|
+
response = await fetch(`${baseURL}${endpoint}`, {
|
|
453
|
+
method: 'POST',
|
|
454
|
+
headers,
|
|
455
|
+
body: JSON.stringify({ ...opts, stream: true }),
|
|
456
|
+
dispatcher: proxy,
|
|
457
|
+
signal: signal, // 🔧 CRITICAL FIX: Connect AbortSignal to fetch call
|
|
458
|
+
})
|
|
459
|
+
usedEndpoint = endpoint
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
if (!response.ok) {
|
|
463
|
+
// 🔧 CRITICAL FIX: Check abort signal BEFORE showing retry message
|
|
464
|
+
if (signal?.aborted) {
|
|
465
|
+
throw new Error('Request cancelled by user')
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
const delayMs = getRetryDelay(attempt)
|
|
469
|
+
console.log(
|
|
470
|
+
` ⎿ API error (${response.status}), retrying in ${Math.round(delayMs / 1000)}s... (attempt ${attempt + 1}/${maxAttempts})`,
|
|
471
|
+
)
|
|
472
|
+
try {
|
|
473
|
+
await abortableDelay(delayMs, signal)
|
|
474
|
+
} catch (error) {
|
|
475
|
+
// If aborted during delay, throw the error to stop retrying
|
|
476
|
+
if (error.message === 'Request was aborted') {
|
|
477
|
+
throw new Error('Request cancelled by user')
|
|
478
|
+
}
|
|
479
|
+
throw error
|
|
480
|
+
}
|
|
481
|
+
return getCompletionWithProfile(
|
|
482
|
+
modelProfile,
|
|
483
|
+
opts,
|
|
484
|
+
attempt + 1,
|
|
485
|
+
maxAttempts,
|
|
486
|
+
signal, // 🔧 Pass AbortSignal to recursive call
|
|
487
|
+
)
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
const stream = createStreamProcessor(response.body as any)
|
|
491
|
+
return stream
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// Non-streaming request
|
|
495
|
+
const isOpenAICompatible = [
|
|
496
|
+
'minimax',
|
|
497
|
+
'kimi',
|
|
498
|
+
'deepseek',
|
|
499
|
+
'siliconflow',
|
|
500
|
+
'qwen',
|
|
501
|
+
'glm',
|
|
502
|
+
'baidu-qianfan',
|
|
503
|
+
'openai',
|
|
504
|
+
'mistral',
|
|
505
|
+
'xai',
|
|
506
|
+
'groq',
|
|
507
|
+
'custom-openai',
|
|
508
|
+
].includes(provider)
|
|
509
|
+
|
|
510
|
+
let response: Response
|
|
511
|
+
let usedEndpoint: string
|
|
512
|
+
|
|
513
|
+
if (isOpenAICompatible && provider !== 'azure') {
|
|
514
|
+
const result = await tryWithEndpointFallback(
|
|
515
|
+
baseURL,
|
|
516
|
+
opts,
|
|
517
|
+
headers,
|
|
518
|
+
provider,
|
|
519
|
+
proxy,
|
|
520
|
+
signal, // 🔧 Pass AbortSignal to endpoint fallback
|
|
521
|
+
)
|
|
522
|
+
response = result.response
|
|
523
|
+
usedEndpoint = result.endpoint
|
|
524
|
+
} else {
|
|
525
|
+
response = await fetch(`${baseURL}${endpoint}`, {
|
|
526
|
+
method: 'POST',
|
|
527
|
+
headers,
|
|
528
|
+
body: JSON.stringify(opts),
|
|
529
|
+
dispatcher: proxy,
|
|
530
|
+
signal: signal, // 🔧 CRITICAL FIX: Connect AbortSignal to non-streaming fetch call
|
|
531
|
+
})
|
|
532
|
+
usedEndpoint = endpoint
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
if (!response.ok) {
|
|
536
|
+
// 🔧 CRITICAL FIX: Check abort signal BEFORE showing retry message
|
|
537
|
+
if (signal?.aborted) {
|
|
538
|
+
throw new Error('Request cancelled by user')
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
const delayMs = getRetryDelay(attempt)
|
|
542
|
+
console.log(
|
|
543
|
+
` ⎿ API error (${response.status}), retrying in ${Math.round(delayMs / 1000)}s... (attempt ${attempt + 1}/${maxAttempts})`,
|
|
544
|
+
)
|
|
545
|
+
try {
|
|
546
|
+
await abortableDelay(delayMs, signal)
|
|
547
|
+
} catch (error) {
|
|
548
|
+
// If aborted during delay, throw the error to stop retrying
|
|
549
|
+
if (error.message === 'Request was aborted') {
|
|
550
|
+
throw new Error('Request cancelled by user')
|
|
551
|
+
}
|
|
552
|
+
throw error
|
|
553
|
+
}
|
|
554
|
+
return getCompletionWithProfile(
|
|
555
|
+
modelProfile,
|
|
556
|
+
opts,
|
|
557
|
+
attempt + 1,
|
|
558
|
+
maxAttempts,
|
|
559
|
+
signal, // 🔧 Pass AbortSignal to recursive call
|
|
560
|
+
)
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
const responseData = (await response.json()) as OpenAI.ChatCompletion
|
|
564
|
+
return responseData
|
|
565
|
+
} catch (error) {
|
|
566
|
+
// 🔧 CRITICAL FIX: Check abort signal BEFORE showing retry message
|
|
567
|
+
if (signal?.aborted) {
|
|
568
|
+
throw new Error('Request cancelled by user')
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
if (attempt < maxAttempts) {
|
|
572
|
+
// 🔧 Double-check abort status to avoid showing misleading retry message
|
|
573
|
+
if (signal?.aborted) {
|
|
574
|
+
throw new Error('Request cancelled by user')
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
const delayMs = getRetryDelay(attempt)
|
|
578
|
+
console.log(
|
|
579
|
+
` ⎿ Network error, retrying in ${Math.round(delayMs / 1000)}s... (attempt ${attempt + 1}/${maxAttempts})`,
|
|
580
|
+
)
|
|
581
|
+
try {
|
|
582
|
+
await abortableDelay(delayMs, signal)
|
|
583
|
+
} catch (error) {
|
|
584
|
+
// If aborted during delay, throw the error to stop retrying
|
|
585
|
+
if (error.message === 'Request was aborted') {
|
|
586
|
+
throw new Error('Request cancelled by user')
|
|
587
|
+
}
|
|
588
|
+
throw error
|
|
589
|
+
}
|
|
590
|
+
return getCompletionWithProfile(
|
|
591
|
+
modelProfile,
|
|
592
|
+
opts,
|
|
593
|
+
attempt + 1,
|
|
594
|
+
maxAttempts,
|
|
595
|
+
signal, // 🔧 Pass AbortSignal to recursive call
|
|
596
|
+
)
|
|
597
|
+
}
|
|
598
|
+
throw error
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
export function createStreamProcessor(
|
|
603
|
+
stream: any,
|
|
604
|
+
): AsyncGenerator<OpenAI.ChatCompletionChunk, void, unknown> {
|
|
605
|
+
if (!stream) {
|
|
606
|
+
throw new Error('Stream is null or undefined')
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
return (async function* () {
|
|
610
|
+
const reader = stream.getReader()
|
|
611
|
+
const decoder = new TextDecoder('utf-8')
|
|
612
|
+
let buffer = ''
|
|
613
|
+
|
|
614
|
+
try {
|
|
615
|
+
while (true) {
|
|
616
|
+
let readResult
|
|
617
|
+
try {
|
|
618
|
+
readResult = await reader.read()
|
|
619
|
+
} catch (e) {
|
|
620
|
+
console.error('Error reading from stream:', e)
|
|
621
|
+
break
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
const { done, value } = readResult
|
|
625
|
+
if (done) {
|
|
626
|
+
break
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
const chunk = decoder.decode(value, { stream: true })
|
|
630
|
+
buffer += chunk
|
|
631
|
+
|
|
632
|
+
let lineEnd = buffer.indexOf('\n')
|
|
633
|
+
while (lineEnd !== -1) {
|
|
634
|
+
const line = buffer.substring(0, lineEnd).trim()
|
|
635
|
+
buffer = buffer.substring(lineEnd + 1)
|
|
636
|
+
|
|
637
|
+
if (line === 'data: [DONE]') {
|
|
638
|
+
continue
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
if (line.startsWith('data: ')) {
|
|
642
|
+
const data = line.slice(6).trim()
|
|
643
|
+
if (!data) continue
|
|
644
|
+
|
|
645
|
+
try {
|
|
646
|
+
const parsed = JSON.parse(data) as OpenAI.ChatCompletionChunk
|
|
647
|
+
yield parsed
|
|
648
|
+
} catch (e) {
|
|
649
|
+
console.error('Error parsing JSON:', data, e)
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
lineEnd = buffer.indexOf('\n')
|
|
654
|
+
}
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
// Process any remaining data in the buffer
|
|
658
|
+
if (buffer.trim()) {
|
|
659
|
+
const lines = buffer.trim().split('\n')
|
|
660
|
+
for (const line of lines) {
|
|
661
|
+
if (line.startsWith('data: ') && line !== 'data: [DONE]') {
|
|
662
|
+
const data = line.slice(6).trim()
|
|
663
|
+
if (!data) continue
|
|
664
|
+
|
|
665
|
+
try {
|
|
666
|
+
const parsed = JSON.parse(data) as OpenAI.ChatCompletionChunk
|
|
667
|
+
yield parsed
|
|
668
|
+
} catch (e) {
|
|
669
|
+
console.error('Error parsing final JSON:', data, e)
|
|
670
|
+
}
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
} catch (e) {
|
|
675
|
+
console.error('Unexpected error in stream processing:', e)
|
|
676
|
+
} finally {
|
|
677
|
+
try {
|
|
678
|
+
reader.releaseLock()
|
|
679
|
+
} catch (e) {
|
|
680
|
+
console.error('Error releasing reader lock:', e)
|
|
681
|
+
}
|
|
682
|
+
}
|
|
683
|
+
})()
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
export function streamCompletion(
|
|
687
|
+
stream: any,
|
|
688
|
+
): AsyncGenerator<OpenAI.ChatCompletionChunk, void, unknown> {
|
|
689
|
+
return createStreamProcessor(stream)
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
/**
|
|
693
|
+
* Fetch available models from custom OpenAI-compatible API
|
|
694
|
+
*/
|
|
695
|
+
export async function fetchCustomModels(
|
|
696
|
+
baseURL: string,
|
|
697
|
+
apiKey: string,
|
|
698
|
+
): Promise<any[]> {
|
|
699
|
+
try {
|
|
700
|
+
// Check if baseURL already contains version number (e.g., v1, v2, etc.)
|
|
701
|
+
const hasVersionNumber = /\/v\d+/.test(baseURL)
|
|
702
|
+
const cleanBaseURL = baseURL.replace(/\/+$/, '')
|
|
703
|
+
const modelsURL = hasVersionNumber
|
|
704
|
+
? `${cleanBaseURL}/models`
|
|
705
|
+
: `${cleanBaseURL}/v1/models`
|
|
706
|
+
|
|
707
|
+
const response = await fetch(modelsURL, {
|
|
708
|
+
method: 'GET',
|
|
709
|
+
headers: {
|
|
710
|
+
Authorization: `Bearer ${apiKey}`,
|
|
711
|
+
'Content-Type': 'application/json',
|
|
712
|
+
},
|
|
713
|
+
})
|
|
714
|
+
|
|
715
|
+
if (!response.ok) {
|
|
716
|
+
// Provide user-friendly error messages based on status code
|
|
717
|
+
if (response.status === 401) {
|
|
718
|
+
throw new Error(
|
|
719
|
+
'Invalid API key. Please check your API key and try again.',
|
|
720
|
+
)
|
|
721
|
+
} else if (response.status === 403) {
|
|
722
|
+
throw new Error(
|
|
723
|
+
'API key does not have permission to access models. Please check your API key permissions.',
|
|
724
|
+
)
|
|
725
|
+
} else if (response.status === 404) {
|
|
726
|
+
throw new Error(
|
|
727
|
+
'API endpoint not found. Please check if the base URL is correct and supports the /models endpoint.',
|
|
728
|
+
)
|
|
729
|
+
} else if (response.status === 429) {
|
|
730
|
+
throw new Error(
|
|
731
|
+
'Too many requests. Please wait a moment and try again.',
|
|
732
|
+
)
|
|
733
|
+
} else if (response.status >= 500) {
|
|
734
|
+
throw new Error(
|
|
735
|
+
'API service is temporarily unavailable. Please try again later.',
|
|
736
|
+
)
|
|
737
|
+
} else {
|
|
738
|
+
throw new Error(
|
|
739
|
+
`Unable to connect to API (${response.status}). Please check your base URL, API key, and internet connection.`,
|
|
740
|
+
)
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
const data = await response.json()
|
|
745
|
+
|
|
746
|
+
// Validate response format and extract models array
|
|
747
|
+
let models = []
|
|
748
|
+
|
|
749
|
+
if (data && data.data && Array.isArray(data.data)) {
|
|
750
|
+
// Standard OpenAI format: { data: [...] }
|
|
751
|
+
models = data.data
|
|
752
|
+
} else if (Array.isArray(data)) {
|
|
753
|
+
// Direct array format
|
|
754
|
+
models = data
|
|
755
|
+
} else if (data && data.models && Array.isArray(data.models)) {
|
|
756
|
+
// Alternative format: { models: [...] }
|
|
757
|
+
models = data.models
|
|
758
|
+
} else {
|
|
759
|
+
throw new Error(
|
|
760
|
+
'API returned unexpected response format. Expected an array of models or an object with a "data" or "models" array.',
|
|
761
|
+
)
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
// Ensure we have an array and validate it contains model objects
|
|
765
|
+
if (!Array.isArray(models)) {
|
|
766
|
+
throw new Error('API response format error: models data is not an array.')
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
return models
|
|
770
|
+
} catch (error) {
|
|
771
|
+
// If it's already our custom error, pass it through
|
|
772
|
+
if (
|
|
773
|
+
error instanceof Error &&
|
|
774
|
+
(error.message.includes('API key') ||
|
|
775
|
+
error.message.includes('API endpoint') ||
|
|
776
|
+
error.message.includes('API service') ||
|
|
777
|
+
error.message.includes('response format'))
|
|
778
|
+
) {
|
|
779
|
+
throw error
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
// For network errors or other issues
|
|
783
|
+
console.error('Failed to fetch custom API models:', error)
|
|
784
|
+
|
|
785
|
+
// Check if it's a network error
|
|
786
|
+
if (error instanceof Error && error.message.includes('fetch')) {
|
|
787
|
+
throw new Error(
|
|
788
|
+
'Unable to connect to the API. Please check the base URL and your internet connection.',
|
|
789
|
+
)
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
throw new Error(
|
|
793
|
+
'Failed to fetch models from custom API. Please check your configuration and try again.',
|
|
794
|
+
)
|
|
795
|
+
}
|
|
796
|
+
}
|