coding-tool-x 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +599 -0
- package/LICENSE +21 -0
- package/README.md +439 -0
- package/bin/ctx.js +8 -0
- package/dist/web/assets/Analytics-DN_YsnkW.js +39 -0
- package/dist/web/assets/Analytics-DuYvId7u.css +1 -0
- package/dist/web/assets/ConfigTemplates-Bidwfdf2.css +1 -0
- package/dist/web/assets/ConfigTemplates-DpXIMy0p.js +1 -0
- package/dist/web/assets/Home-38JTUlYt.js +1 -0
- package/dist/web/assets/Home-CjupSEWE.css +1 -0
- package/dist/web/assets/PluginManager-CX2tgq2H.js +1 -0
- package/dist/web/assets/PluginManager-ROyoZ-6m.css +1 -0
- package/dist/web/assets/ProjectList-C1lDcsn6.js +1 -0
- package/dist/web/assets/ProjectList-oJIyIRkP.css +1 -0
- package/dist/web/assets/SessionList-C55tjV7i.css +1 -0
- package/dist/web/assets/SessionList-CZ7T6rVx.js +1 -0
- package/dist/web/assets/SkillManager-D7pd-d_P.css +1 -0
- package/dist/web/assets/SkillManager-DLN9f79y.js +1 -0
- package/dist/web/assets/WorkspaceManager-CrwgQgmP.css +1 -0
- package/dist/web/assets/WorkspaceManager-DxlHZkpZ.js +1 -0
- package/dist/web/assets/icons-DRrXwWZi.js +1 -0
- package/dist/web/assets/index-CetESrXw.css +1 -0
- package/dist/web/assets/index-Cfvn-2Gb.js +2 -0
- package/dist/web/assets/markdown-BfC0goYb.css +10 -0
- package/dist/web/assets/markdown-C9MYpaSi.js +1 -0
- package/dist/web/assets/naive-ui-DlpKk-8M.js +1 -0
- package/dist/web/assets/vendors-DMjSfzlv.js +7 -0
- package/dist/web/assets/vue-vendor-DET08QYg.js +45 -0
- package/dist/web/favicon.ico +0 -0
- package/dist/web/index.html +20 -0
- package/dist/web/logo.png +0 -0
- package/docs/bannel.png +0 -0
- package/docs/home.png +0 -0
- package/docs/logo.png +0 -0
- package/docs/model-redirection.md +251 -0
- package/docs/multi-channel-load-balancing.md +249 -0
- package/package.json +80 -0
- package/src/commands/channels.js +551 -0
- package/src/commands/cli-type.js +101 -0
- package/src/commands/daemon.js +365 -0
- package/src/commands/doctor.js +333 -0
- package/src/commands/export-config.js +205 -0
- package/src/commands/list.js +222 -0
- package/src/commands/logs.js +261 -0
- package/src/commands/plugin.js +585 -0
- package/src/commands/port-config.js +135 -0
- package/src/commands/proxy-control.js +264 -0
- package/src/commands/proxy.js +152 -0
- package/src/commands/resume.js +137 -0
- package/src/commands/search.js +190 -0
- package/src/commands/security.js +37 -0
- package/src/commands/stats.js +398 -0
- package/src/commands/switch.js +48 -0
- package/src/commands/toggle-proxy.js +247 -0
- package/src/commands/ui.js +99 -0
- package/src/commands/update.js +97 -0
- package/src/commands/workspace.js +454 -0
- package/src/config/default.js +69 -0
- package/src/config/loader.js +149 -0
- package/src/config/model-metadata.js +167 -0
- package/src/config/model-metadata.json +125 -0
- package/src/config/model-pricing.js +35 -0
- package/src/config/paths.js +190 -0
- package/src/index.js +680 -0
- package/src/plugins/constants.js +15 -0
- package/src/plugins/event-bus.js +54 -0
- package/src/plugins/manifest-validator.js +129 -0
- package/src/plugins/plugin-api.js +128 -0
- package/src/plugins/plugin-installer.js +601 -0
- package/src/plugins/plugin-loader.js +229 -0
- package/src/plugins/plugin-manager.js +170 -0
- package/src/plugins/registry.js +152 -0
- package/src/plugins/schema/plugin-manifest.json +115 -0
- package/src/reset-config.js +94 -0
- package/src/server/api/agents.js +826 -0
- package/src/server/api/aliases.js +36 -0
- package/src/server/api/channels.js +368 -0
- package/src/server/api/claude-hooks.js +480 -0
- package/src/server/api/codex-channels.js +417 -0
- package/src/server/api/codex-projects.js +104 -0
- package/src/server/api/codex-proxy.js +195 -0
- package/src/server/api/codex-sessions.js +483 -0
- package/src/server/api/codex-statistics.js +57 -0
- package/src/server/api/commands.js +482 -0
- package/src/server/api/config-export.js +212 -0
- package/src/server/api/config-registry.js +357 -0
- package/src/server/api/config-sync.js +155 -0
- package/src/server/api/config-templates.js +248 -0
- package/src/server/api/config.js +521 -0
- package/src/server/api/convert.js +260 -0
- package/src/server/api/dashboard.js +142 -0
- package/src/server/api/env.js +144 -0
- package/src/server/api/favorites.js +77 -0
- package/src/server/api/gemini-channels.js +366 -0
- package/src/server/api/gemini-projects.js +91 -0
- package/src/server/api/gemini-proxy.js +173 -0
- package/src/server/api/gemini-sessions.js +376 -0
- package/src/server/api/gemini-statistics.js +57 -0
- package/src/server/api/health-check.js +31 -0
- package/src/server/api/mcp.js +399 -0
- package/src/server/api/opencode-channels.js +419 -0
- package/src/server/api/opencode-projects.js +99 -0
- package/src/server/api/opencode-proxy.js +207 -0
- package/src/server/api/opencode-sessions.js +327 -0
- package/src/server/api/opencode-statistics.js +57 -0
- package/src/server/api/plugins.js +463 -0
- package/src/server/api/pm2-autostart.js +269 -0
- package/src/server/api/projects.js +124 -0
- package/src/server/api/prompts.js +279 -0
- package/src/server/api/proxy.js +306 -0
- package/src/server/api/security.js +53 -0
- package/src/server/api/sessions.js +514 -0
- package/src/server/api/settings.js +142 -0
- package/src/server/api/skills.js +570 -0
- package/src/server/api/statistics.js +238 -0
- package/src/server/api/ui-config.js +64 -0
- package/src/server/api/workspaces.js +456 -0
- package/src/server/codex-proxy-server.js +681 -0
- package/src/server/dev-server.js +26 -0
- package/src/server/gemini-proxy-server.js +610 -0
- package/src/server/index.js +422 -0
- package/src/server/opencode-proxy-server.js +4771 -0
- package/src/server/proxy-server.js +669 -0
- package/src/server/services/agents-service.js +1137 -0
- package/src/server/services/alias.js +71 -0
- package/src/server/services/channel-health.js +234 -0
- package/src/server/services/channel-scheduler.js +240 -0
- package/src/server/services/channels.js +447 -0
- package/src/server/services/codex-channels.js +705 -0
- package/src/server/services/codex-config.js +90 -0
- package/src/server/services/codex-parser.js +322 -0
- package/src/server/services/codex-sessions.js +936 -0
- package/src/server/services/codex-settings-manager.js +619 -0
- package/src/server/services/codex-speed-test-template.json +24 -0
- package/src/server/services/codex-statistics-service.js +161 -0
- package/src/server/services/commands-service.js +574 -0
- package/src/server/services/config-export-service.js +1165 -0
- package/src/server/services/config-registry-service.js +828 -0
- package/src/server/services/config-sync-manager.js +941 -0
- package/src/server/services/config-sync-service.js +504 -0
- package/src/server/services/config-templates-service.js +913 -0
- package/src/server/services/enhanced-cache.js +196 -0
- package/src/server/services/env-checker.js +409 -0
- package/src/server/services/env-manager.js +436 -0
- package/src/server/services/favorites.js +165 -0
- package/src/server/services/format-converter.js +620 -0
- package/src/server/services/gemini-channels.js +459 -0
- package/src/server/services/gemini-config.js +73 -0
- package/src/server/services/gemini-sessions.js +689 -0
- package/src/server/services/gemini-settings-manager.js +263 -0
- package/src/server/services/gemini-statistics-service.js +157 -0
- package/src/server/services/health-check.js +85 -0
- package/src/server/services/mcp-client.js +790 -0
- package/src/server/services/mcp-service.js +1732 -0
- package/src/server/services/model-detector.js +1245 -0
- package/src/server/services/network-access.js +80 -0
- package/src/server/services/opencode-channels.js +366 -0
- package/src/server/services/opencode-gateway-adapters.js +1168 -0
- package/src/server/services/opencode-gateway-converter.js +639 -0
- package/src/server/services/opencode-sessions.js +931 -0
- package/src/server/services/opencode-settings-manager.js +478 -0
- package/src/server/services/opencode-statistics-service.js +161 -0
- package/src/server/services/plugins-service.js +1268 -0
- package/src/server/services/prompts-service.js +534 -0
- package/src/server/services/proxy-runtime.js +79 -0
- package/src/server/services/repo-scanner-base.js +708 -0
- package/src/server/services/request-logger.js +130 -0
- package/src/server/services/response-decoder.js +21 -0
- package/src/server/services/security-config.js +131 -0
- package/src/server/services/session-cache.js +127 -0
- package/src/server/services/session-converter.js +577 -0
- package/src/server/services/sessions.js +900 -0
- package/src/server/services/settings-manager.js +163 -0
- package/src/server/services/skill-service.js +1482 -0
- package/src/server/services/speed-test.js +1146 -0
- package/src/server/services/statistics-service.js +1043 -0
- package/src/server/services/ui-config.js +132 -0
- package/src/server/services/workspace-service.js +830 -0
- package/src/server/utils/pricing.js +73 -0
- package/src/server/websocket-server.js +513 -0
- package/src/ui/menu.js +139 -0
- package/src/ui/prompts.js +100 -0
- package/src/utils/format.js +43 -0
- package/src/utils/port-helper.js +108 -0
- package/src/utils/session.js +240 -0
|
@@ -0,0 +1,1245 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model Detector Service
|
|
3
|
+
* Probes model availability for channels and caches results
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const fs = require('fs');
|
|
7
|
+
const path = require('path');
|
|
8
|
+
const os = require('os');
|
|
9
|
+
const https = require('https');
|
|
10
|
+
const http = require('http');
|
|
11
|
+
const { URL } = require('url');
|
|
12
|
+
const crypto = require('crypto');
|
|
13
|
+
const zlib = require('zlib');
|
|
14
|
+
const { loadConfig } = require('../../config/loader');
|
|
15
|
+
|
|
16
|
+
// 内置模型优先级(当配置缺失时兜底)
|
|
17
|
+
const MODEL_PRIORITY = {
|
|
18
|
+
claude: [
|
|
19
|
+
'claude-opus-4-6',
|
|
20
|
+
'claude-sonnet-4-6',
|
|
21
|
+
'claude-opus-4-5-20251101',
|
|
22
|
+
'claude-sonnet-4-5-20250929',
|
|
23
|
+
'claude-haiku-4-5-20251001'
|
|
24
|
+
],
|
|
25
|
+
codex: [
|
|
26
|
+
'gpt-5.2-codex',
|
|
27
|
+
'gpt-5.1-codex-max',
|
|
28
|
+
'gpt-5.1-codex',
|
|
29
|
+
'gpt-5.1-codex-mini',
|
|
30
|
+
'gpt-5-codex',
|
|
31
|
+
'gpt-5.2',
|
|
32
|
+
'gpt-5.1',
|
|
33
|
+
'gpt-5'
|
|
34
|
+
],
|
|
35
|
+
gemini: [
|
|
36
|
+
'gemini-3-pro-preview',
|
|
37
|
+
'gemini-3-flash-preview',
|
|
38
|
+
'gemini-2.5-pro',
|
|
39
|
+
'gemini-2.5-flash',
|
|
40
|
+
'gemini-2.5-flash-lite'
|
|
41
|
+
]
|
|
42
|
+
};
|
|
43
|
+
// openai_compatible 复用 codex 的模型列表
|
|
44
|
+
MODEL_PRIORITY.openai_compatible = MODEL_PRIORITY.codex;
|
|
45
|
+
|
|
46
|
+
function normalizeModelToolType(type) {
|
|
47
|
+
const value = String(type || '').trim().toLowerCase();
|
|
48
|
+
if (value === 'openai_compatible') return 'codex';
|
|
49
|
+
if (value === 'claude' || value === 'codex' || value === 'gemini' || value === 'opencode') {
|
|
50
|
+
return value;
|
|
51
|
+
}
|
|
52
|
+
return '';
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* 获取模型优先级列表(优先读取用户配置的 defaultModels)
|
|
57
|
+
* @param {string} channelType - 渠道类型
|
|
58
|
+
* @param {Object} options - 可选参数
|
|
59
|
+
* @param {string} options.toolType - 显式工具类型(claude/codex/gemini/opencode)
|
|
60
|
+
* @returns {string[]}
|
|
61
|
+
*/
|
|
62
|
+
function getModelPriority(channelType, options = {}) {
|
|
63
|
+
const preferredToolType = normalizeModelToolType(options.toolType);
|
|
64
|
+
const normalizedChannelType = normalizeModelToolType(channelType);
|
|
65
|
+
const candidateTypes = [];
|
|
66
|
+
|
|
67
|
+
if (preferredToolType) {
|
|
68
|
+
candidateTypes.push(preferredToolType);
|
|
69
|
+
}
|
|
70
|
+
if (normalizedChannelType && !candidateTypes.includes(normalizedChannelType)) {
|
|
71
|
+
candidateTypes.push(normalizedChannelType);
|
|
72
|
+
}
|
|
73
|
+
if (String(channelType || '').trim().toLowerCase() === 'openai_compatible' && !candidateTypes.includes('openai_compatible')) {
|
|
74
|
+
candidateTypes.push('openai_compatible');
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
try {
|
|
78
|
+
const config = loadConfig();
|
|
79
|
+
const defaultModels = config?.defaultModels || {};
|
|
80
|
+
for (const toolType of candidateTypes) {
|
|
81
|
+
const models = defaultModels[toolType];
|
|
82
|
+
if (Array.isArray(models) && models.length > 0) {
|
|
83
|
+
return normalizeModelCandidates(models);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
} catch (error) {
|
|
87
|
+
console.warn(`[ModelDetector] Failed to load default models config: ${error.message}`);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
return [];
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
function getModelDiscoveryConfig() {
|
|
94
|
+
try {
|
|
95
|
+
const config = loadConfig();
|
|
96
|
+
return {
|
|
97
|
+
useV1ModelsEndpoint: config?.modelDiscovery?.useV1ModelsEndpoint === true
|
|
98
|
+
};
|
|
99
|
+
} catch (error) {
|
|
100
|
+
console.warn(`[ModelDetector] Failed to load modelDiscovery config: ${error.message}`);
|
|
101
|
+
return {
|
|
102
|
+
useV1ModelsEndpoint: false
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
function shouldUseV1ModelsEndpoint(options = {}) {
|
|
108
|
+
if (typeof options.useV1ModelsEndpoint === 'boolean') {
|
|
109
|
+
return options.useV1ModelsEndpoint;
|
|
110
|
+
}
|
|
111
|
+
return getModelDiscoveryConfig().useV1ModelsEndpoint;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const PROVIDER_CAPABILITIES = {
|
|
115
|
+
claude: {
|
|
116
|
+
supportsModelList: false,
|
|
117
|
+
modelListEndpoint: null,
|
|
118
|
+
fallbackStrategy: 'probe'
|
|
119
|
+
},
|
|
120
|
+
codex: {
|
|
121
|
+
supportsModelList: true,
|
|
122
|
+
modelListEndpoint: '/v1/models',
|
|
123
|
+
authHeader: 'Authorization: Bearer'
|
|
124
|
+
},
|
|
125
|
+
gemini: {
|
|
126
|
+
supportsModelList: false,
|
|
127
|
+
modelListEndpoint: null,
|
|
128
|
+
fallbackStrategy: 'probe'
|
|
129
|
+
},
|
|
130
|
+
openai_compatible: {
|
|
131
|
+
supportsModelList: true,
|
|
132
|
+
modelListEndpoint: '/v1/models',
|
|
133
|
+
authHeader: 'Authorization: Bearer'
|
|
134
|
+
}
|
|
135
|
+
};
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Auto-detect channel type based on baseUrl
|
|
139
|
+
* @param {Object} channel - Channel configuration
|
|
140
|
+
* @returns {string} - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
|
|
141
|
+
*/
|
|
142
|
+
function detectChannelType(channel) {
|
|
143
|
+
try {
|
|
144
|
+
// Parse the URL to extract hostname
|
|
145
|
+
const parsedUrl = new URL(channel.baseUrl);
|
|
146
|
+
const hostname = parsedUrl.hostname.toLowerCase();
|
|
147
|
+
|
|
148
|
+
// Check if it's official Anthropic API (hostname only, not path)
|
|
149
|
+
if (hostname.includes('anthropic.com') || hostname.includes('claude.ai')) {
|
|
150
|
+
return 'claude';
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Check if it's Gemini (hostname only)
|
|
154
|
+
if (hostname.includes('generativelanguage.googleapis.com') || hostname.includes('gemini')) {
|
|
155
|
+
return 'gemini';
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Check if it's OpenAI official (hostname only)
|
|
159
|
+
if (hostname.includes('api.openai.com')) {
|
|
160
|
+
return 'codex';
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// All other third-party proxies default to OpenAI compatible
|
|
164
|
+
// Including: 88code, anyrouter, internal proxies, etc.
|
|
165
|
+
// This correctly handles URLs like https://code.newcli.com/claude/aws
|
|
166
|
+
return 'openai_compatible';
|
|
167
|
+
} catch (error) {
|
|
168
|
+
// If URL parsing fails, fall back to string matching on full URL
|
|
169
|
+
console.warn(`[ModelDetector] Failed to parse URL ${channel.baseUrl}: ${error.message}`);
|
|
170
|
+
const baseUrl = channel.baseUrl.toLowerCase();
|
|
171
|
+
|
|
172
|
+
if (baseUrl.includes('anthropic.com') || baseUrl.includes('claude.ai')) {
|
|
173
|
+
return 'claude';
|
|
174
|
+
}
|
|
175
|
+
if (baseUrl.includes('generativelanguage.googleapis.com')) {
|
|
176
|
+
return 'gemini';
|
|
177
|
+
}
|
|
178
|
+
if (baseUrl.includes('api.openai.com')) {
|
|
179
|
+
return 'codex';
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return 'openai_compatible';
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Model name normalization mapping
|
|
187
|
+
const MODEL_ALIASES = {
|
|
188
|
+
// Claude variants
|
|
189
|
+
'claude-3-5-haiku': 'claude-3-5-haiku-20241022',
|
|
190
|
+
'claude-haiku-3-5': 'claude-haiku-3-5-20241022',
|
|
191
|
+
'claude-3-haiku': 'claude-3-5-haiku-20241022',
|
|
192
|
+
'claude-sonnet-4': 'claude-sonnet-4-20250514',
|
|
193
|
+
'claude-4-sonnet': 'claude-sonnet-4-20250514',
|
|
194
|
+
'claude-sonnet-4-6': 'claude-sonnet-4-6',
|
|
195
|
+
'claude-4-6-sonnet': 'claude-sonnet-4-6',
|
|
196
|
+
'claude-sonnet-4-5': 'claude-sonnet-4-5-20250929',
|
|
197
|
+
'claude-4-5-sonnet': 'claude-sonnet-4-5-20250929',
|
|
198
|
+
'claude-opus-4': 'claude-opus-4-20250514',
|
|
199
|
+
'claude-4-opus': 'claude-opus-4-20250514',
|
|
200
|
+
|
|
201
|
+
// Codex variants
|
|
202
|
+
'gpt-4o': 'gpt-4o',
|
|
203
|
+
'gpt4o': 'gpt-4o',
|
|
204
|
+
'gpt-4-o': 'gpt-4o',
|
|
205
|
+
'gpt-4o-mini': 'gpt-4o-mini',
|
|
206
|
+
'gpt4o-mini': 'gpt-4o-mini',
|
|
207
|
+
'gpt-5': 'gpt-5-codex',
|
|
208
|
+
'gpt5': 'gpt-5-codex',
|
|
209
|
+
'o3': 'o3',
|
|
210
|
+
|
|
211
|
+
// Gemini variants
|
|
212
|
+
'gemini-2.5-flash': 'gemini-2.5-flash',
|
|
213
|
+
'gemini-flash': 'gemini-2.5-flash',
|
|
214
|
+
'gemini-2-5-flash': 'gemini-2.5-flash',
|
|
215
|
+
'gemini-2.5-pro': 'gemini-2.5-pro',
|
|
216
|
+
'gemini-pro': 'gemini-2.5-pro',
|
|
217
|
+
'gemini-2-5-pro': 'gemini-2.5-pro'
|
|
218
|
+
};
|
|
219
|
+
|
|
220
|
+
const TEST_TIMEOUT_MS = 10000; // 10 seconds per model test
|
|
221
|
+
const CLAUDE_CODE_BETA_HEADER = 'claude-code-20250219,interleaved-thinking-2025-05-14';
|
|
222
|
+
|
|
223
|
+
const MODEL_UNAVAILABLE_HINTS = [
|
|
224
|
+
'not found',
|
|
225
|
+
'does not exist',
|
|
226
|
+
'invalid model',
|
|
227
|
+
'unsupported model',
|
|
228
|
+
'not supported',
|
|
229
|
+
'model unavailable',
|
|
230
|
+
'deprecated',
|
|
231
|
+
'decommission',
|
|
232
|
+
'retired',
|
|
233
|
+
'offline',
|
|
234
|
+
'unknown model',
|
|
235
|
+
'下线',
|
|
236
|
+
'已下线',
|
|
237
|
+
'已停用',
|
|
238
|
+
'已废弃',
|
|
239
|
+
'已淘汰',
|
|
240
|
+
'模型不存在',
|
|
241
|
+
'无效模型',
|
|
242
|
+
'模型不可用',
|
|
243
|
+
'请切换'
|
|
244
|
+
];
|
|
245
|
+
|
|
246
|
+
/**
|
|
247
|
+
* Generate realistic User-Agent strings that mimic official SDKs
|
|
248
|
+
* @param {string} channelType - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
|
|
249
|
+
* @returns {string} - User-Agent string
|
|
250
|
+
*/
|
|
251
|
+
function getRealisticUserAgent(channelType) {
|
|
252
|
+
const nodeVersion = process.version.slice(1); // e.g., "18.17.0"
|
|
253
|
+
const platform = process.platform; // e.g., "darwin", "linux", "win32"
|
|
254
|
+
|
|
255
|
+
switch (channelType) {
|
|
256
|
+
case 'claude':
|
|
257
|
+
// Mimics official Anthropic Python SDK
|
|
258
|
+
return `anthropic-sdk-python/0.39.0 python/3.11.4 ${platform}`;
|
|
259
|
+
case 'gemini':
|
|
260
|
+
// Mimics official Google SDK
|
|
261
|
+
return `google-generativeai/0.8.2 python/3.11.4 ${platform}`;
|
|
262
|
+
case 'codex':
|
|
263
|
+
case 'openai_compatible':
|
|
264
|
+
default:
|
|
265
|
+
// Mimics official OpenAI Python SDK
|
|
266
|
+
return `OpenAI/Python/1.56.0`;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Add a small random delay between requests to avoid rate limiting
|
|
272
|
+
* and appear more human-like (100-300ms)
|
|
273
|
+
* @returns {Promise<void>}
|
|
274
|
+
*/
|
|
275
|
+
async function randomDelay() {
|
|
276
|
+
const delay = 100 + Math.random() * 200;
|
|
277
|
+
return new Promise(resolve => setTimeout(resolve, delay));
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
/**
|
|
281
|
+
* Build common headers for API requests that look like legitimate SDK clients
|
|
282
|
+
* @param {string} channelType - Channel type
|
|
283
|
+
* @param {Object} channel - Channel configuration
|
|
284
|
+
* @returns {Object} - Headers object
|
|
285
|
+
*/
|
|
286
|
+
function buildRequestHeaders(channelType, channel) {
|
|
287
|
+
const headers = {
|
|
288
|
+
'User-Agent': getRealisticUserAgent(channelType),
|
|
289
|
+
'Accept': 'application/json',
|
|
290
|
+
'Accept-Encoding': 'gzip, deflate',
|
|
291
|
+
'Connection': 'keep-alive',
|
|
292
|
+
'X-Request-Id': crypto.randomUUID()
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
// For OpenAI-compatible APIs, add additional headers
|
|
296
|
+
if (channelType === 'codex' || channelType === 'openai_compatible') {
|
|
297
|
+
headers['OpenAI-Beta'] = 'assistants=v2';
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return headers;
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
function buildOpenAiCompatibleUrl(baseUrl, endpoint) {
|
|
304
|
+
const trimmed = String(baseUrl || '').trim().replace(/\/+$/, '');
|
|
305
|
+
if (!trimmed) {
|
|
306
|
+
throw new Error('Invalid baseUrl');
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
const normalizedEndpoint = String(endpoint || '').startsWith('/')
|
|
310
|
+
? String(endpoint)
|
|
311
|
+
: `/${endpoint}`;
|
|
312
|
+
|
|
313
|
+
if (trimmed.endsWith(normalizedEndpoint)) {
|
|
314
|
+
return trimmed;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
const endpointWithoutV1 = normalizedEndpoint.startsWith('/v1/')
|
|
318
|
+
? normalizedEndpoint.slice(3)
|
|
319
|
+
: normalizedEndpoint;
|
|
320
|
+
|
|
321
|
+
if (trimmed.endsWith(endpointWithoutV1)) {
|
|
322
|
+
return trimmed;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
if (trimmed.endsWith('/v1') && normalizedEndpoint.startsWith('/v1/')) {
|
|
326
|
+
return `${trimmed}${normalizedEndpoint.slice(3)}`;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
return `${trimmed}${normalizedEndpoint}`;
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
function buildClaudeMessagesUrl(baseUrl, options = {}) {
|
|
333
|
+
const withBeta = options.withBeta !== false;
|
|
334
|
+
const parsed = new URL(String(baseUrl || '').trim());
|
|
335
|
+
let pathname = parsed.pathname.replace(/\/+$/, '');
|
|
336
|
+
|
|
337
|
+
if (!pathname || pathname === '/') {
|
|
338
|
+
pathname = '/v1/messages';
|
|
339
|
+
} else if (pathname.endsWith('/messages')) {
|
|
340
|
+
// noop
|
|
341
|
+
} else if (pathname.endsWith('/v1')) {
|
|
342
|
+
pathname = `${pathname}/messages`;
|
|
343
|
+
} else {
|
|
344
|
+
pathname = `${pathname}/v1/messages`;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
parsed.pathname = pathname;
|
|
348
|
+
if (withBeta) {
|
|
349
|
+
parsed.searchParams.set('beta', 'true');
|
|
350
|
+
}
|
|
351
|
+
return parsed.toString();
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
function buildGeminiGenerateContentUrl(baseUrl, model, apiKey = '') {
|
|
355
|
+
const modelName = String(model || '').trim();
|
|
356
|
+
if (!modelName) {
|
|
357
|
+
throw new Error('Model is required for Gemini probe');
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
const parsed = new URL(String(baseUrl || '').trim());
|
|
361
|
+
let pathname = parsed.pathname.replace(/\/+$/, '');
|
|
362
|
+
const modelsIndex = pathname.indexOf('/models');
|
|
363
|
+
if (modelsIndex >= 0) {
|
|
364
|
+
pathname = pathname.slice(0, modelsIndex);
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
let apiBasePath;
|
|
368
|
+
if (!pathname || pathname === '/') {
|
|
369
|
+
apiBasePath = '/v1beta';
|
|
370
|
+
} else if (pathname.endsWith('/v1beta') || pathname.endsWith('/v1')) {
|
|
371
|
+
apiBasePath = pathname;
|
|
372
|
+
} else {
|
|
373
|
+
apiBasePath = `${pathname}/v1beta`;
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
parsed.pathname = `${apiBasePath}/models/${encodeURIComponent(modelName)}:generateContent`;
|
|
377
|
+
if (apiKey) {
|
|
378
|
+
parsed.searchParams.set('key', apiKey);
|
|
379
|
+
}
|
|
380
|
+
return parsed.toString();
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
function buildClaudeProbePayload(model, options = {}) {
|
|
384
|
+
const includeSystem = options.includeSystem === true;
|
|
385
|
+
const systemAsArray = options.systemAsArray === true;
|
|
386
|
+
const includeMetadata = options.includeMetadata === true;
|
|
387
|
+
const sessionId = Math.random().toString(36).substring(2, 15);
|
|
388
|
+
|
|
389
|
+
const payload = {
|
|
390
|
+
model,
|
|
391
|
+
max_tokens: 1,
|
|
392
|
+
stream: false,
|
|
393
|
+
messages: [{ role: 'user', content: [{ type: 'text', text: 'ping' }] }]
|
|
394
|
+
};
|
|
395
|
+
|
|
396
|
+
if (includeSystem) {
|
|
397
|
+
const systemPrompt = "You are Claude Code, Anthropic's official CLI for Claude.";
|
|
398
|
+
payload.system = systemAsArray
|
|
399
|
+
? [{ type: 'text', text: systemPrompt }]
|
|
400
|
+
: systemPrompt;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
if (includeMetadata) {
|
|
404
|
+
payload.metadata = {
|
|
405
|
+
user_id: `user_0000000000000000000000000000000000000000000000000000000000000000_account__session_${sessionId}`
|
|
406
|
+
};
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
return JSON.stringify(payload);
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
function createClaudeProbeAttempts(channel, model) {
|
|
413
|
+
const apiKey = channel.apiKey || '';
|
|
414
|
+
const commonHeaders = {
|
|
415
|
+
...buildRequestHeaders('claude', channel),
|
|
416
|
+
'Content-Type': 'application/json',
|
|
417
|
+
'x-api-key': apiKey,
|
|
418
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
419
|
+
'anthropic-version': '2023-06-01',
|
|
420
|
+
'anthropic-beta': CLAUDE_CODE_BETA_HEADER,
|
|
421
|
+
'Accept-Encoding': 'gzip, deflate, br',
|
|
422
|
+
'User-Agent': 'claude-cli/2.0.53 (external, cli)'
|
|
423
|
+
};
|
|
424
|
+
|
|
425
|
+
const legacyBody = buildClaudeProbePayload(model, {
|
|
426
|
+
includeSystem: true,
|
|
427
|
+
systemAsArray: true,
|
|
428
|
+
includeMetadata: true
|
|
429
|
+
});
|
|
430
|
+
|
|
431
|
+
return [
|
|
432
|
+
{
|
|
433
|
+
label: 'claude-code-legacy-beta',
|
|
434
|
+
url: buildClaudeMessagesUrl(channel.baseUrl, { withBeta: true }),
|
|
435
|
+
body: legacyBody,
|
|
436
|
+
headers: {
|
|
437
|
+
...commonHeaders,
|
|
438
|
+
'anthropic-dangerous-direct-browser-access': 'true',
|
|
439
|
+
'x-app': 'cli',
|
|
440
|
+
'x-stainless-lang': 'js',
|
|
441
|
+
'x-stainless-runtime': 'node'
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
];
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
function createCodexProbeAttempts(channel, model) {
|
|
448
|
+
const apiKey = channel.apiKey || '';
|
|
449
|
+
const commonHeaders = {
|
|
450
|
+
...buildRequestHeaders('codex', channel),
|
|
451
|
+
'Content-Type': 'application/json',
|
|
452
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
453
|
+
'User-Agent': 'codex_cli_rs/0.65.0'
|
|
454
|
+
};
|
|
455
|
+
|
|
456
|
+
const responsesBody = JSON.stringify({
|
|
457
|
+
model,
|
|
458
|
+
instructions: 'You are Codex.',
|
|
459
|
+
input: [{ type: 'message', role: 'user', content: [{ type: 'input_text', text: 'ping' }] }],
|
|
460
|
+
max_output_tokens: 1,
|
|
461
|
+
stream: false,
|
|
462
|
+
store: false
|
|
463
|
+
});
|
|
464
|
+
|
|
465
|
+
return [
|
|
466
|
+
{
|
|
467
|
+
label: 'codex-responses',
|
|
468
|
+
url: buildOpenAiCompatibleUrl(channel.baseUrl, '/v1/responses'),
|
|
469
|
+
body: responsesBody,
|
|
470
|
+
headers: {
|
|
471
|
+
...commonHeaders,
|
|
472
|
+
'openai-beta': 'responses=experimental'
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
];
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
function createGeminiProbeAttempt(channel, model) {
|
|
479
|
+
const apiKey = channel.apiKey || '';
|
|
480
|
+
const body = JSON.stringify({
|
|
481
|
+
contents: [{ role: 'user', parts: [{ text: 'test' }] }],
|
|
482
|
+
generationConfig: { maxOutputTokens: 1, temperature: 0 }
|
|
483
|
+
});
|
|
484
|
+
|
|
485
|
+
return {
|
|
486
|
+
label: 'gemini-generate-content',
|
|
487
|
+
url: buildGeminiGenerateContentUrl(channel.baseUrl, model, apiKey),
|
|
488
|
+
body,
|
|
489
|
+
headers: {
|
|
490
|
+
...buildRequestHeaders('gemini', channel),
|
|
491
|
+
'Content-Type': 'application/json',
|
|
492
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
493
|
+
'x-goog-api-key': apiKey,
|
|
494
|
+
'User-Agent': 'google-genai-sdk/0.8.0'
|
|
495
|
+
}
|
|
496
|
+
};
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
function buildProbeAttempts(channel, channelType, model) {
|
|
500
|
+
const normalizedType = String(channelType || '').trim().toLowerCase();
|
|
501
|
+
if (normalizedType === 'claude') {
|
|
502
|
+
return createClaudeProbeAttempts(channel, model);
|
|
503
|
+
}
|
|
504
|
+
if (normalizedType === 'codex' || normalizedType === 'openai_compatible') {
|
|
505
|
+
return createCodexProbeAttempts(channel, model);
|
|
506
|
+
}
|
|
507
|
+
if (normalizedType === 'gemini') {
|
|
508
|
+
return [createGeminiProbeAttempt(channel, model)];
|
|
509
|
+
}
|
|
510
|
+
return [];
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
function extractProbeErrorMessage(responseBody) {
|
|
514
|
+
const fallback = String(responseBody || '');
|
|
515
|
+
try {
|
|
516
|
+
const response = JSON.parse(responseBody || '{}');
|
|
517
|
+
if (typeof response === 'string') return response;
|
|
518
|
+
if (typeof response?.error?.message === 'string') return response.error.message;
|
|
519
|
+
if (typeof response?.error === 'string') return response.error;
|
|
520
|
+
if (typeof response?.message === 'string') return response.message;
|
|
521
|
+
if (typeof response?.detail === 'string') return response.detail;
|
|
522
|
+
return fallback;
|
|
523
|
+
} catch {
|
|
524
|
+
return fallback;
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
function classifyProbeResult(statusCode, responseBody, model) {
|
|
529
|
+
if (statusCode >= 200 && statusCode < 300) {
|
|
530
|
+
return 'available';
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
const errorMsg = extractProbeErrorMessage(responseBody).toLowerCase();
|
|
534
|
+
const modelLower = String(model || '').toLowerCase();
|
|
535
|
+
const hasModelContext = errorMsg.includes('model')
|
|
536
|
+
|| errorMsg.includes('模型')
|
|
537
|
+
|| (modelLower && errorMsg.includes(modelLower));
|
|
538
|
+
|
|
539
|
+
if (hasModelContext && MODEL_UNAVAILABLE_HINTS.some(hint => errorMsg.includes(hint))) {
|
|
540
|
+
return 'unavailable';
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
if (statusCode === 400 || statusCode === 404 || statusCode === 405 || statusCode === 415 || statusCode === 422 || statusCode === 501) {
|
|
544
|
+
return 'retry';
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
return 'retry';
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
function sanitizeProbeErrorMessage(value) {
|
|
551
|
+
if (value === null || value === undefined) return '';
|
|
552
|
+
return String(value).replace(/\s+/g, ' ').trim().slice(0, 180);
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
function buildProbeFailureDetail(attempt, result) {
|
|
556
|
+
if (!attempt || !result) return null;
|
|
557
|
+
const statusCode = Number(result.statusCode) || 0;
|
|
558
|
+
const message = result.error?.message
|
|
559
|
+
? sanitizeProbeErrorMessage(result.error.message)
|
|
560
|
+
: sanitizeProbeErrorMessage(extractProbeErrorMessage(result.responseBody));
|
|
561
|
+
|
|
562
|
+
return {
|
|
563
|
+
attempt: attempt.label || 'unknown',
|
|
564
|
+
statusCode,
|
|
565
|
+
message
|
|
566
|
+
};
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
function formatProbeFailureDetail(detail) {
|
|
570
|
+
if (!detail) return '';
|
|
571
|
+
const parts = [];
|
|
572
|
+
if (detail.attempt) parts.push(`attempt=${detail.attempt}`);
|
|
573
|
+
if (detail.statusCode) parts.push(`status=${detail.statusCode}`);
|
|
574
|
+
if (detail.message) parts.push(`msg=${detail.message}`);
|
|
575
|
+
if (parts.length === 0) return '';
|
|
576
|
+
return ` (${parts.join(', ')})`;
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
function executeProbeAttempt(attempt) {
|
|
580
|
+
return new Promise((resolve) => {
|
|
581
|
+
try {
|
|
582
|
+
const parsedUrl = new URL(attempt.url);
|
|
583
|
+
const isHttps = parsedUrl.protocol === 'https:';
|
|
584
|
+
const httpModule = isHttps ? https : http;
|
|
585
|
+
const requestBody = String(attempt.body || '');
|
|
586
|
+
const headers = {
|
|
587
|
+
...(attempt.headers || {}),
|
|
588
|
+
'Content-Length': Buffer.byteLength(requestBody)
|
|
589
|
+
};
|
|
590
|
+
|
|
591
|
+
const options = {
|
|
592
|
+
hostname: parsedUrl.hostname,
|
|
593
|
+
port: parsedUrl.port || (isHttps ? 443 : 80),
|
|
594
|
+
path: parsedUrl.pathname + parsedUrl.search,
|
|
595
|
+
method: 'POST',
|
|
596
|
+
timeout: TEST_TIMEOUT_MS,
|
|
597
|
+
headers
|
|
598
|
+
};
|
|
599
|
+
|
|
600
|
+
const req = httpModule.request(options, (res) => {
|
|
601
|
+
collectResponseBody(res)
|
|
602
|
+
.then((data) => {
|
|
603
|
+
resolve({
|
|
604
|
+
statusCode: res.statusCode || 0,
|
|
605
|
+
responseBody: data
|
|
606
|
+
});
|
|
607
|
+
})
|
|
608
|
+
.catch((error) => {
|
|
609
|
+
resolve({
|
|
610
|
+
statusCode: 0,
|
|
611
|
+
responseBody: '',
|
|
612
|
+
error
|
|
613
|
+
});
|
|
614
|
+
});
|
|
615
|
+
});
|
|
616
|
+
|
|
617
|
+
req.on('error', (error) => resolve({
|
|
618
|
+
statusCode: 0,
|
|
619
|
+
responseBody: '',
|
|
620
|
+
error
|
|
621
|
+
}));
|
|
622
|
+
req.on('timeout', () => {
|
|
623
|
+
req.destroy();
|
|
624
|
+
resolve({
|
|
625
|
+
statusCode: 0,
|
|
626
|
+
responseBody: '',
|
|
627
|
+
error: new Error('Request timeout')
|
|
628
|
+
});
|
|
629
|
+
});
|
|
630
|
+
|
|
631
|
+
req.write(requestBody);
|
|
632
|
+
req.end();
|
|
633
|
+
} catch (error) {
|
|
634
|
+
resolve({
|
|
635
|
+
statusCode: 0,
|
|
636
|
+
responseBody: '',
|
|
637
|
+
error
|
|
638
|
+
});
|
|
639
|
+
}
|
|
640
|
+
});
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
function createDecodedStream(res) {
|
|
644
|
+
const encoding = String(res.headers['content-encoding'] || '').toLowerCase();
|
|
645
|
+
if (encoding.includes('gzip')) return res.pipe(zlib.createGunzip());
|
|
646
|
+
if (encoding.includes('deflate')) return res.pipe(zlib.createInflate());
|
|
647
|
+
if (encoding.includes('br') && typeof zlib.createBrotliDecompress === 'function') {
|
|
648
|
+
return res.pipe(zlib.createBrotliDecompress());
|
|
649
|
+
}
|
|
650
|
+
return res;
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
function collectResponseBody(res) {
|
|
654
|
+
return new Promise((resolve, reject) => {
|
|
655
|
+
const stream = createDecodedStream(res);
|
|
656
|
+
let data = '';
|
|
657
|
+
|
|
658
|
+
stream.on('data', chunk => {
|
|
659
|
+
data += chunk.toString('utf8');
|
|
660
|
+
});
|
|
661
|
+
stream.on('end', () => resolve(data));
|
|
662
|
+
stream.on('error', reject);
|
|
663
|
+
res.on('error', reject);
|
|
664
|
+
});
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
/**
|
|
668
|
+
* Get cache file path
|
|
669
|
+
*/
|
|
670
|
+
function getCacheFilePath() {
|
|
671
|
+
const dir = path.join(os.homedir(), '.cc-tool');
|
|
672
|
+
if (!fs.existsSync(dir)) {
|
|
673
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
674
|
+
}
|
|
675
|
+
return path.join(dir, 'channel-models.json');
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
/**
|
|
679
|
+
* Load model cache from disk
|
|
680
|
+
*/
|
|
681
|
+
function loadModelCache() {
|
|
682
|
+
const cachePath = getCacheFilePath();
|
|
683
|
+
try {
|
|
684
|
+
if (fs.existsSync(cachePath)) {
|
|
685
|
+
const content = fs.readFileSync(cachePath, 'utf8');
|
|
686
|
+
return JSON.parse(content);
|
|
687
|
+
}
|
|
688
|
+
} catch (error) {
|
|
689
|
+
console.error('[ModelDetector] Error loading cache:', error.message);
|
|
690
|
+
}
|
|
691
|
+
return {};
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
/**
|
|
695
|
+
* Save model cache to disk
|
|
696
|
+
*/
|
|
697
|
+
function saveModelCache(cache) {
|
|
698
|
+
const cachePath = getCacheFilePath();
|
|
699
|
+
try {
|
|
700
|
+
fs.writeFileSync(cachePath, JSON.stringify(cache, null, 2), 'utf8');
|
|
701
|
+
} catch (error) {
|
|
702
|
+
console.error('[ModelDetector] Error saving cache:', error.message);
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
/**
|
|
707
|
+
* Normalize model name to canonical form
|
|
708
|
+
* @param {string} model - Raw model name
|
|
709
|
+
* @returns {string} Normalized model name
|
|
710
|
+
*/
|
|
711
|
+
function normalizeModelName(model) {
|
|
712
|
+
if (!model) return null;
|
|
713
|
+
|
|
714
|
+
const normalized = model.toLowerCase().trim();
|
|
715
|
+
return MODEL_ALIASES[normalized] || model;
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
function normalizeModelCandidates(models = []) {
|
|
719
|
+
if (!Array.isArray(models)) return [];
|
|
720
|
+
const seen = new Set();
|
|
721
|
+
const normalized = [];
|
|
722
|
+
|
|
723
|
+
models.forEach((model) => {
|
|
724
|
+
if (typeof model !== 'string') return;
|
|
725
|
+
const trimmed = model.trim();
|
|
726
|
+
if (!trimmed) return;
|
|
727
|
+
const canonical = normalizeModelName(trimmed) || trimmed;
|
|
728
|
+
const dedupeKey = canonical.toLowerCase();
|
|
729
|
+
if (seen.has(dedupeKey)) return;
|
|
730
|
+
seen.add(dedupeKey);
|
|
731
|
+
normalized.push(canonical);
|
|
732
|
+
});
|
|
733
|
+
|
|
734
|
+
return normalized;
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
/**
|
|
738
|
+
* Stable stringify with key ordering to build deterministic cache signatures
|
|
739
|
+
*/
|
|
740
|
+
function stableStringify(value) {
|
|
741
|
+
if (value === null || value === undefined) return 'null';
|
|
742
|
+
if (Array.isArray(value)) {
|
|
743
|
+
return `[${value.map(item => stableStringify(item)).join(',')}]`;
|
|
744
|
+
}
|
|
745
|
+
if (typeof value !== 'object') return JSON.stringify(value);
|
|
746
|
+
|
|
747
|
+
const keys = Object.keys(value).sort();
|
|
748
|
+
return `{${keys.map(key => `${JSON.stringify(key)}:${stableStringify(value[key])}`).join(',')}}`;
|
|
749
|
+
}
|
|
750
|
+
|
|
751
|
+
function buildChannelCacheSignature(channel, payload = {}) {
|
|
752
|
+
const base = {
|
|
753
|
+
id: channel?.id || '',
|
|
754
|
+
name: channel?.name || '',
|
|
755
|
+
baseUrl: channel?.baseUrl || '',
|
|
756
|
+
apiKey: channel?.apiKey || '',
|
|
757
|
+
gatewaySourceType: channel?.gatewaySourceType || '',
|
|
758
|
+
wireApi: channel?.wireApi || '',
|
|
759
|
+
model: channel?.model || '',
|
|
760
|
+
speedTestModel: channel?.speedTestModel || '',
|
|
761
|
+
presetId: channel?.presetId || '',
|
|
762
|
+
modelConfig: channel?.modelConfig || null,
|
|
763
|
+
modelRedirects: Array.isArray(channel?.modelRedirects) ? channel.modelRedirects : []
|
|
764
|
+
};
|
|
765
|
+
const raw = stableStringify({
|
|
766
|
+
channel: base,
|
|
767
|
+
payload
|
|
768
|
+
});
|
|
769
|
+
|
|
770
|
+
return crypto.createHash('sha1').update(raw).digest('hex');
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
function isSignatureCacheValid(cacheEntry, signatureKey, expectedSignature) {
|
|
774
|
+
if (!cacheEntry || !signatureKey || !expectedSignature) return false;
|
|
775
|
+
return cacheEntry[signatureKey] === expectedSignature;
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
/**
|
|
779
|
+
* Test if a specific model is available for a channel
|
|
780
|
+
* @param {Object} channel - Channel configuration
|
|
781
|
+
* @param {string} channelType - 'claude' | 'codex' | 'gemini'
|
|
782
|
+
* @param {string} model - Model name to test
|
|
783
|
+
* @returns {Promise<boolean>}
|
|
784
|
+
*/
|
|
785
|
+
async function testModelAvailabilityDetailed(channel, channelType, model) {
|
|
786
|
+
try {
|
|
787
|
+
const attempts = buildProbeAttempts(channel, channelType, model);
|
|
788
|
+
if (!attempts.length) {
|
|
789
|
+
return { available: false, failureDetail: null };
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
let failureDetail = null;
|
|
793
|
+
for (const attempt of attempts) {
|
|
794
|
+
const result = await executeProbeAttempt(attempt);
|
|
795
|
+
if (result.error) {
|
|
796
|
+
if (!failureDetail) {
|
|
797
|
+
failureDetail = buildProbeFailureDetail(attempt, result);
|
|
798
|
+
}
|
|
799
|
+
continue;
|
|
800
|
+
}
|
|
801
|
+
|
|
802
|
+
const verdict = classifyProbeResult(result.statusCode, result.responseBody, model);
|
|
803
|
+
if (verdict === 'available') {
|
|
804
|
+
return { available: true, failureDetail: null };
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
if (!failureDetail) {
|
|
808
|
+
failureDetail = buildProbeFailureDetail(attempt, result);
|
|
809
|
+
}
|
|
810
|
+
if (verdict === 'unavailable') {
|
|
811
|
+
return { available: false, failureDetail };
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
return { available: false, failureDetail };
|
|
816
|
+
} catch {
|
|
817
|
+
return { available: false, failureDetail: null };
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
async function testModelAvailability(channel, channelType, model) {
|
|
822
|
+
const result = await testModelAvailabilityDetailed(channel, channelType, model);
|
|
823
|
+
return result.available;
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
/**
|
|
827
|
+
* Probe model availability for a channel
|
|
828
|
+
* Tests models in priority order and returns first available
|
|
829
|
+
* Uses 5-minute cache to avoid repeated testing
|
|
830
|
+
*
|
|
831
|
+
* @param {Object} channel - Channel configuration
|
|
832
|
+
* @param {string} channelType - 'claude' | 'codex' | 'gemini'
|
|
833
|
+
* @returns {Promise<Object>} { availableModels: string[], preferredTestModel: string|null, cached: boolean }
|
|
834
|
+
*/
|
|
835
|
+
async function probeModelAvailability(channel, channelType, options = {}) {
|
|
836
|
+
const forceRefresh = !!options.forceRefresh;
|
|
837
|
+
const toolType = options.toolType;
|
|
838
|
+
const stopOnFirstAvailable = !!options.stopOnFirstAvailable;
|
|
839
|
+
const cache = loadModelCache();
|
|
840
|
+
const cacheKey = channel.id;
|
|
841
|
+
const preferredModels = normalizeModelCandidates(options.preferredModels);
|
|
842
|
+
const probeSignature = buildChannelCacheSignature(channel, {
|
|
843
|
+
type: 'probe',
|
|
844
|
+
channelType: String(channelType || '').trim().toLowerCase(),
|
|
845
|
+
toolType: String(toolType || '').trim().toLowerCase(),
|
|
846
|
+
stopOnFirstAvailable,
|
|
847
|
+
preferredModels
|
|
848
|
+
});
|
|
849
|
+
|
|
850
|
+
// Return cached result if channel and probe options are unchanged
|
|
851
|
+
if (!forceRefresh && isSignatureCacheValid(cache[cacheKey], 'probeSignature', probeSignature)) {
|
|
852
|
+
return {
|
|
853
|
+
availableModels: cache[cacheKey].availableModels || [],
|
|
854
|
+
preferredTestModel: cache[cacheKey].preferredTestModel || null,
|
|
855
|
+
cached: true,
|
|
856
|
+
lastChecked: cache[cacheKey].lastChecked
|
|
857
|
+
};
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
// Get model priority list for this channel type
|
|
861
|
+
const priorityModels = normalizeModelCandidates(getModelPriority(channelType, { toolType }));
|
|
862
|
+
const modelsToTest = normalizeModelCandidates([...preferredModels, ...priorityModels]);
|
|
863
|
+
if (modelsToTest.length === 0) {
|
|
864
|
+
console.warn(`[ModelDetector] No models defined for channel type: ${channelType}`);
|
|
865
|
+
return {
|
|
866
|
+
availableModels: [],
|
|
867
|
+
preferredTestModel: null,
|
|
868
|
+
cached: false,
|
|
869
|
+
lastChecked: new Date().toISOString()
|
|
870
|
+
};
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
console.log(`[ModelDetector] Testing models for channel ${channel.name} (${channelType})...`);
|
|
874
|
+
|
|
875
|
+
const availableModels = [];
|
|
876
|
+
let isFirstModel = true;
|
|
877
|
+
|
|
878
|
+
// Test models in priority order
|
|
879
|
+
for (const model of modelsToTest) {
|
|
880
|
+
// Add delay between model tests to avoid rate limiting (skip first)
|
|
881
|
+
if (!isFirstModel) {
|
|
882
|
+
await randomDelay();
|
|
883
|
+
}
|
|
884
|
+
isFirstModel = false;
|
|
885
|
+
|
|
886
|
+
const probeResult = await testModelAvailabilityDetailed(channel, channelType, model);
|
|
887
|
+
const isAvailable = probeResult.available;
|
|
888
|
+
|
|
889
|
+
if (isAvailable) {
|
|
890
|
+
availableModels.push(model);
|
|
891
|
+
console.log(`[ModelDetector] ✓ ${model} available`);
|
|
892
|
+
if (stopOnFirstAvailable) {
|
|
893
|
+
break;
|
|
894
|
+
}
|
|
895
|
+
} else {
|
|
896
|
+
console.log(`[ModelDetector] ✗ ${model} not available${formatProbeFailureDetail(probeResult.failureDetail)}`);
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
const preferredTestModel = availableModels.length > 0 ? availableModels[0] : null;
|
|
901
|
+
|
|
902
|
+
// Update cache
|
|
903
|
+
const cacheEntry = {
|
|
904
|
+
lastChecked: new Date().toISOString(),
|
|
905
|
+
availableModels,
|
|
906
|
+
preferredTestModel,
|
|
907
|
+
probeSignature,
|
|
908
|
+
fetchedModels: cache[cacheKey]?.fetchedModels || [],
|
|
909
|
+
listSignature: cache[cacheKey]?.listSignature || null
|
|
910
|
+
};
|
|
911
|
+
|
|
912
|
+
cache[cacheKey] = cacheEntry;
|
|
913
|
+
saveModelCache(cache);
|
|
914
|
+
|
|
915
|
+
console.log(`[ModelDetector] Found ${availableModels.length} available model(s) for ${channel.name}`);
|
|
916
|
+
|
|
917
|
+
return {
|
|
918
|
+
availableModels,
|
|
919
|
+
preferredTestModel,
|
|
920
|
+
cached: false,
|
|
921
|
+
lastChecked: cacheEntry.lastChecked
|
|
922
|
+
};
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
/**
|
|
926
|
+
* Clear cache for specific channel or all channels
|
|
927
|
+
* @param {string|null} channelId - Channel ID to clear, or null for all
|
|
928
|
+
*/
|
|
929
|
+
function clearCache(channelId = null) {
|
|
930
|
+
const cache = loadModelCache();
|
|
931
|
+
|
|
932
|
+
if (channelId) {
|
|
933
|
+
delete cache[channelId];
|
|
934
|
+
console.log(`[ModelDetector] Cleared cache for channel: ${channelId}`);
|
|
935
|
+
} else {
|
|
936
|
+
// Clear all
|
|
937
|
+
Object.keys(cache).forEach(key => delete cache[key]);
|
|
938
|
+
console.log('[ModelDetector] Cleared all model cache');
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
saveModelCache(cache);
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
/**
|
|
945
|
+
* Get cached model info without probing
|
|
946
|
+
* @param {string} channelId - Channel ID
|
|
947
|
+
* @returns {Object|null} Cache entry or null if not found/expired
|
|
948
|
+
*/
|
|
949
|
+
function getCachedModelInfo(channelId) {
|
|
950
|
+
const cache = loadModelCache();
|
|
951
|
+
const entry = cache[channelId];
|
|
952
|
+
|
|
953
|
+
if (entry && (Array.isArray(entry.availableModels) || Array.isArray(entry.fetchedModels))) {
|
|
954
|
+
return entry;
|
|
955
|
+
}
|
|
956
|
+
|
|
957
|
+
return null;
|
|
958
|
+
}
|
|
959
|
+
|
|
960
|
+
/**
|
|
961
|
+
* Fetch available models from provider's /v1/models endpoint
|
|
962
|
+
* @param {Object} channel - Channel configuration
|
|
963
|
+
* @param {string} channelType - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
|
|
964
|
+
* @returns {Promise<Object>} { models: string[], supported: boolean, cached: boolean, error: string|null, fallbackUsed: boolean }
|
|
965
|
+
*/
|
|
966
|
+
async function fetchModelsFromProvider(channel, channelType, options = {}) {
|
|
967
|
+
const forceRefresh = !!options.forceRefresh;
|
|
968
|
+
const useV1ModelsEndpoint = shouldUseV1ModelsEndpoint(options);
|
|
969
|
+
// Only auto-detect if channelType is NOT specified at all
|
|
970
|
+
// DO NOT auto-detect when channelType is 'claude' - respect the caller's intent
|
|
971
|
+
if (!channelType) {
|
|
972
|
+
channelType = detectChannelType(channel);
|
|
973
|
+
console.log(`[ModelDetector] Auto-detected channel type: ${channelType} for ${channel.name}`);
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
if (!useV1ModelsEndpoint) {
|
|
977
|
+
return {
|
|
978
|
+
models: [],
|
|
979
|
+
supported: true,
|
|
980
|
+
fallbackUsed: true,
|
|
981
|
+
cached: false,
|
|
982
|
+
disabledByConfig: true,
|
|
983
|
+
error: '已关闭 /v1/models 模型列表探测',
|
|
984
|
+
errorHint: '当前使用默认模型探测策略'
|
|
985
|
+
};
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
// Check if provider supports model listing
|
|
989
|
+
const capability = PROVIDER_CAPABILITIES[channelType];
|
|
990
|
+
if (!capability || !capability.supportsModelList) {
|
|
991
|
+
return {
|
|
992
|
+
models: [],
|
|
993
|
+
supported: false,
|
|
994
|
+
fallbackUsed: true,
|
|
995
|
+
cached: false,
|
|
996
|
+
error: null
|
|
997
|
+
};
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
const cache = loadModelCache();
|
|
1001
|
+
const cacheKey = channel.id;
|
|
1002
|
+
const listSignature = buildChannelCacheSignature(channel, {
|
|
1003
|
+
type: 'model-list',
|
|
1004
|
+
channelType: String(channelType || '').trim().toLowerCase()
|
|
1005
|
+
});
|
|
1006
|
+
|
|
1007
|
+
// Check cache first, and only reuse when channel/list context is unchanged
|
|
1008
|
+
if (!forceRefresh
|
|
1009
|
+
&& isSignatureCacheValid(cache[cacheKey], 'listSignature', listSignature)
|
|
1010
|
+
&& Array.isArray(cache[cacheKey].fetchedModels)) {
|
|
1011
|
+
return {
|
|
1012
|
+
models: cache[cacheKey].fetchedModels || [],
|
|
1013
|
+
supported: true,
|
|
1014
|
+
cached: true,
|
|
1015
|
+
fallbackUsed: false,
|
|
1016
|
+
error: null,
|
|
1017
|
+
lastChecked: cache[cacheKey].lastChecked
|
|
1018
|
+
};
|
|
1019
|
+
}
|
|
1020
|
+
|
|
1021
|
+
return new Promise((resolve) => {
|
|
1022
|
+
try {
|
|
1023
|
+
const baseUrl = channel.baseUrl.trim().replace(/\/+$/, '');
|
|
1024
|
+
const endpoint = capability.modelListEndpoint; // e.g. '/v1/models'
|
|
1025
|
+
// 避免路径重复:如果 baseUrl 已包含 /v1,则只拼接 /models
|
|
1026
|
+
const requestUrl = baseUrl.endsWith('/v1') && endpoint.startsWith('/v1/')
|
|
1027
|
+
? `${baseUrl}${endpoint.slice(3)}`
|
|
1028
|
+
: `${baseUrl}${endpoint}`;
|
|
1029
|
+
|
|
1030
|
+
const parsedUrl = new URL(requestUrl);
|
|
1031
|
+
const isHttps = parsedUrl.protocol === 'https:';
|
|
1032
|
+
const httpModule = isHttps ? https : http;
|
|
1033
|
+
|
|
1034
|
+
// Use realistic SDK headers to avoid anti-crawler detection
|
|
1035
|
+
const headers = buildRequestHeaders(channelType, channel);
|
|
1036
|
+
|
|
1037
|
+
// Add authentication header
|
|
1038
|
+
if (capability.authHeader) {
|
|
1039
|
+
if (channel.apiKey) {
|
|
1040
|
+
headers['Authorization'] = `Bearer ${channel.apiKey}`;
|
|
1041
|
+
}
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
const options = {
|
|
1045
|
+
hostname: parsedUrl.hostname,
|
|
1046
|
+
port: parsedUrl.port || (isHttps ? 443 : 80),
|
|
1047
|
+
path: parsedUrl.pathname + parsedUrl.search,
|
|
1048
|
+
method: 'GET',
|
|
1049
|
+
timeout: TEST_TIMEOUT_MS,
|
|
1050
|
+
headers
|
|
1051
|
+
};
|
|
1052
|
+
|
|
1053
|
+
const req = httpModule.request(options, (res) => {
|
|
1054
|
+
collectResponseBody(res)
|
|
1055
|
+
.then((data) => {
|
|
1056
|
+
// Handle different status codes
|
|
1057
|
+
if (res.statusCode === 200) {
|
|
1058
|
+
try {
|
|
1059
|
+
const response = JSON.parse(data);
|
|
1060
|
+
|
|
1061
|
+
// Parse OpenAI-compatible format: { data: [{ id: "model-name", ... }] }
|
|
1062
|
+
let models = [];
|
|
1063
|
+
if (response.data && Array.isArray(response.data)) {
|
|
1064
|
+
models = response.data
|
|
1065
|
+
.map(item => item.id || item.model)
|
|
1066
|
+
.filter(Boolean);
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
// Update cache with fetched models
|
|
1070
|
+
const cacheEntry = {
|
|
1071
|
+
lastChecked: new Date().toISOString(),
|
|
1072
|
+
fetchedModels: models,
|
|
1073
|
+
availableModels: cache[cacheKey]?.availableModels || [],
|
|
1074
|
+
preferredTestModel: cache[cacheKey]?.preferredTestModel || null,
|
|
1075
|
+
probeSignature: cache[cacheKey]?.probeSignature || null,
|
|
1076
|
+
listSignature
|
|
1077
|
+
};
|
|
1078
|
+
|
|
1079
|
+
cache[cacheKey] = cacheEntry;
|
|
1080
|
+
saveModelCache(cache);
|
|
1081
|
+
|
|
1082
|
+
console.log(`[ModelDetector] Fetched ${models.length} models from ${channel.name}`);
|
|
1083
|
+
|
|
1084
|
+
resolve({
|
|
1085
|
+
models,
|
|
1086
|
+
supported: true,
|
|
1087
|
+
cached: false,
|
|
1088
|
+
fallbackUsed: false,
|
|
1089
|
+
error: null,
|
|
1090
|
+
lastChecked: cacheEntry.lastChecked
|
|
1091
|
+
});
|
|
1092
|
+
} catch (parseError) {
|
|
1093
|
+
console.error(`[ModelDetector] Failed to parse models response: ${parseError.message}`);
|
|
1094
|
+
resolve({
|
|
1095
|
+
models: [],
|
|
1096
|
+
supported: true,
|
|
1097
|
+
cached: false,
|
|
1098
|
+
fallbackUsed: true,
|
|
1099
|
+
error: `Parse error: ${parseError.message}`
|
|
1100
|
+
});
|
|
1101
|
+
}
|
|
1102
|
+
} else if (res.statusCode === 401 || res.statusCode === 403) {
|
|
1103
|
+
// Check if it's a Cloudflare protection issue
|
|
1104
|
+
const bodyLower = data.toLowerCase();
|
|
1105
|
+
const isCloudflare = bodyLower.includes('cloudflare') || bodyLower.includes('challenge') || bodyLower.includes('cf-ray');
|
|
1106
|
+
|
|
1107
|
+
let errorMessage;
|
|
1108
|
+
let errorHint;
|
|
1109
|
+
|
|
1110
|
+
if (isCloudflare) {
|
|
1111
|
+
errorMessage = 'Cloudflare 防护拦截,无法自动获取模型列表';
|
|
1112
|
+
errorHint = '该 API 端点受 Cloudflare 保护,请手动填写模型名称';
|
|
1113
|
+
console.warn(`[ModelDetector] Cloudflare protection detected for ${channel.name}, no fallback models injected`);
|
|
1114
|
+
resolve({
|
|
1115
|
+
models: [],
|
|
1116
|
+
supported: false,
|
|
1117
|
+
cached: false,
|
|
1118
|
+
fallbackUsed: true,
|
|
1119
|
+
error: errorMessage,
|
|
1120
|
+
errorHint: errorHint,
|
|
1121
|
+
statusCode: res.statusCode
|
|
1122
|
+
});
|
|
1123
|
+
} else if (res.statusCode === 401) {
|
|
1124
|
+
errorMessage = 'API 密钥认证失败';
|
|
1125
|
+
errorHint = '请检查 API 密钥是否正确配置';
|
|
1126
|
+
console.error(`[ModelDetector] Authentication failed for ${channel.name}: ${res.statusCode} - ${errorMessage}`);
|
|
1127
|
+
resolve({
|
|
1128
|
+
models: [],
|
|
1129
|
+
supported: true,
|
|
1130
|
+
cached: false,
|
|
1131
|
+
fallbackUsed: true,
|
|
1132
|
+
error: errorMessage,
|
|
1133
|
+
errorHint: errorHint,
|
|
1134
|
+
statusCode: res.statusCode
|
|
1135
|
+
});
|
|
1136
|
+
} else {
|
|
1137
|
+
errorMessage = '访问被拒绝';
|
|
1138
|
+
errorHint = '请检查 API 密钥权限或联系服务提供商';
|
|
1139
|
+
console.error(`[ModelDetector] Access denied for ${channel.name}: ${res.statusCode} - ${errorMessage}`);
|
|
1140
|
+
resolve({
|
|
1141
|
+
models: [],
|
|
1142
|
+
supported: true,
|
|
1143
|
+
cached: false,
|
|
1144
|
+
fallbackUsed: true,
|
|
1145
|
+
error: errorMessage,
|
|
1146
|
+
errorHint: errorHint,
|
|
1147
|
+
statusCode: res.statusCode
|
|
1148
|
+
});
|
|
1149
|
+
}
|
|
1150
|
+
} else if (res.statusCode === 404) {
|
|
1151
|
+
console.warn(`[ModelDetector] Model list endpoint not found for ${channel.name}`);
|
|
1152
|
+
resolve({
|
|
1153
|
+
models: [],
|
|
1154
|
+
supported: false,
|
|
1155
|
+
cached: false,
|
|
1156
|
+
fallbackUsed: true,
|
|
1157
|
+
error: '模型列表端点不存在',
|
|
1158
|
+
errorHint: '该 API 可能不支持 /v1/models 接口,请手动输入模型名称',
|
|
1159
|
+
statusCode: 404
|
|
1160
|
+
});
|
|
1161
|
+
} else if (res.statusCode === 429) {
|
|
1162
|
+
console.warn(`[ModelDetector] Rate limited for ${channel.name}`);
|
|
1163
|
+
resolve({
|
|
1164
|
+
models: [],
|
|
1165
|
+
supported: true,
|
|
1166
|
+
cached: false,
|
|
1167
|
+
fallbackUsed: true,
|
|
1168
|
+
error: '请求频率限制',
|
|
1169
|
+
errorHint: '请稍后再试或联系服务提供商提高限额',
|
|
1170
|
+
statusCode: 429
|
|
1171
|
+
});
|
|
1172
|
+
} else {
|
|
1173
|
+
console.error(`[ModelDetector] Unexpected status ${res.statusCode} for ${channel.name}`);
|
|
1174
|
+
resolve({
|
|
1175
|
+
models: [],
|
|
1176
|
+
supported: true,
|
|
1177
|
+
cached: false,
|
|
1178
|
+
fallbackUsed: true,
|
|
1179
|
+
error: `HTTP 错误 ${res.statusCode}`,
|
|
1180
|
+
errorHint: '请检查 API 端点配置或联系服务提供商',
|
|
1181
|
+
statusCode: res.statusCode
|
|
1182
|
+
});
|
|
1183
|
+
}
|
|
1184
|
+
})
|
|
1185
|
+
.catch((error) => {
|
|
1186
|
+
console.error(`[ModelDetector] Failed to read models response: ${error.message}`);
|
|
1187
|
+
resolve({
|
|
1188
|
+
models: [],
|
|
1189
|
+
supported: true,
|
|
1190
|
+
cached: false,
|
|
1191
|
+
fallbackUsed: true,
|
|
1192
|
+
error: `Read error: ${error.message}`
|
|
1193
|
+
});
|
|
1194
|
+
});
|
|
1195
|
+
});
|
|
1196
|
+
|
|
1197
|
+
req.on('error', (error) => {
|
|
1198
|
+
console.error(`[ModelDetector] Network error fetching models from ${channel.name}: ${error.message}`);
|
|
1199
|
+
resolve({
|
|
1200
|
+
models: [],
|
|
1201
|
+
supported: true,
|
|
1202
|
+
cached: false,
|
|
1203
|
+
fallbackUsed: true,
|
|
1204
|
+
error: `Network error: ${error.message}`
|
|
1205
|
+
});
|
|
1206
|
+
});
|
|
1207
|
+
|
|
1208
|
+
req.on('timeout', () => {
|
|
1209
|
+
req.destroy();
|
|
1210
|
+
console.error(`[ModelDetector] Timeout fetching models from ${channel.name}`);
|
|
1211
|
+
resolve({
|
|
1212
|
+
models: [],
|
|
1213
|
+
supported: true,
|
|
1214
|
+
cached: false,
|
|
1215
|
+
fallbackUsed: true,
|
|
1216
|
+
error: 'Request timeout'
|
|
1217
|
+
});
|
|
1218
|
+
});
|
|
1219
|
+
|
|
1220
|
+
req.end();
|
|
1221
|
+
|
|
1222
|
+
} catch (error) {
|
|
1223
|
+
console.error(`[ModelDetector] Error in fetchModelsFromProvider: ${error.message}`);
|
|
1224
|
+
resolve({
|
|
1225
|
+
models: [],
|
|
1226
|
+
supported: true,
|
|
1227
|
+
cached: false,
|
|
1228
|
+
fallbackUsed: true,
|
|
1229
|
+
error: error.message
|
|
1230
|
+
});
|
|
1231
|
+
}
|
|
1232
|
+
});
|
|
1233
|
+
}
|
|
1234
|
+
|
|
1235
|
+
module.exports = {
|
|
1236
|
+
probeModelAvailability,
|
|
1237
|
+
testModelAvailability,
|
|
1238
|
+
getModelPriority,
|
|
1239
|
+
normalizeModelName,
|
|
1240
|
+
clearCache,
|
|
1241
|
+
getCachedModelInfo,
|
|
1242
|
+
fetchModelsFromProvider,
|
|
1243
|
+
detectChannelType,
|
|
1244
|
+
MODEL_PRIORITY
|
|
1245
|
+
};
|