@adversity/coding-tool-x 3.0.0 → 3.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/web/assets/{index-AtwYwBZD.js → index-DfPKAt9R.js} +2 -2
- package/dist/web/assets/{index-BNHWEpD4.css → index-TjhcaFRe.css} +1 -1
- package/dist/web/assets/naive-ui-B1TP-0TP.js +1 -0
- package/dist/web/index.html +3 -3
- package/package.json +1 -1
- package/src/server/api/channels.js +52 -1
- package/src/server/api/config-export.js +7 -1
- package/src/server/api/proxy.js +63 -6
- package/src/server/services/channels.js +47 -3
- package/src/server/services/codex-channels.js +31 -0
- package/src/server/services/config-export-service.js +324 -1
- package/src/server/services/gemini-channels.js +95 -5
- package/src/server/services/model-detector.js +269 -0
- package/src/server/services/speed-test.js +72 -24
- package/dist/web/assets/naive-ui-BcSq2wzw.js +0 -1
|
@@ -23,6 +23,78 @@ const MODEL_PRIORITY = {
|
|
|
23
23
|
gemini: ['gemini-2.5-flash', 'gemini-2.5-pro']
|
|
24
24
|
};
|
|
25
25
|
|
|
26
|
+
const PROVIDER_CAPABILITIES = {
|
|
27
|
+
claude: {
|
|
28
|
+
supportsModelList: false,
|
|
29
|
+
modelListEndpoint: null,
|
|
30
|
+
fallbackStrategy: 'probe'
|
|
31
|
+
},
|
|
32
|
+
codex: {
|
|
33
|
+
supportsModelList: true,
|
|
34
|
+
modelListEndpoint: '/v1/models',
|
|
35
|
+
authHeader: 'Authorization: Bearer'
|
|
36
|
+
},
|
|
37
|
+
gemini: {
|
|
38
|
+
supportsModelList: false,
|
|
39
|
+
modelListEndpoint: null,
|
|
40
|
+
fallbackStrategy: 'probe'
|
|
41
|
+
},
|
|
42
|
+
openai_compatible: {
|
|
43
|
+
supportsModelList: true,
|
|
44
|
+
modelListEndpoint: '/v1/models',
|
|
45
|
+
authHeader: 'Authorization: Bearer'
|
|
46
|
+
}
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Auto-detect channel type based on baseUrl
|
|
51
|
+
* @param {Object} channel - Channel configuration
|
|
52
|
+
* @returns {string} - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
|
|
53
|
+
*/
|
|
54
|
+
function detectChannelType(channel) {
|
|
55
|
+
try {
|
|
56
|
+
// Parse the URL to extract hostname
|
|
57
|
+
const parsedUrl = new URL(channel.baseUrl);
|
|
58
|
+
const hostname = parsedUrl.hostname.toLowerCase();
|
|
59
|
+
|
|
60
|
+
// Check if it's official Anthropic API (hostname only, not path)
|
|
61
|
+
if (hostname.includes('anthropic.com') || hostname.includes('claude.ai')) {
|
|
62
|
+
return 'claude';
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Check if it's Gemini (hostname only)
|
|
66
|
+
if (hostname.includes('generativelanguage.googleapis.com') || hostname.includes('gemini')) {
|
|
67
|
+
return 'gemini';
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Check if it's OpenAI official (hostname only)
|
|
71
|
+
if (hostname.includes('api.openai.com')) {
|
|
72
|
+
return 'codex';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// All other third-party proxies default to OpenAI compatible
|
|
76
|
+
// Including: 88code, anyrouter, internal proxies, etc.
|
|
77
|
+
// This correctly handles URLs like https://code.newcli.com/claude/aws
|
|
78
|
+
return 'openai_compatible';
|
|
79
|
+
} catch (error) {
|
|
80
|
+
// If URL parsing fails, fall back to string matching on full URL
|
|
81
|
+
console.warn(`[ModelDetector] Failed to parse URL ${channel.baseUrl}: ${error.message}`);
|
|
82
|
+
const baseUrl = channel.baseUrl.toLowerCase();
|
|
83
|
+
|
|
84
|
+
if (baseUrl.includes('anthropic.com') || baseUrl.includes('claude.ai')) {
|
|
85
|
+
return 'claude';
|
|
86
|
+
}
|
|
87
|
+
if (baseUrl.includes('generativelanguage.googleapis.com')) {
|
|
88
|
+
return 'gemini';
|
|
89
|
+
}
|
|
90
|
+
if (baseUrl.includes('api.openai.com')) {
|
|
91
|
+
return 'codex';
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return 'openai_compatible';
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
26
98
|
// Model name normalization mapping
|
|
27
99
|
const MODEL_ALIASES = {
|
|
28
100
|
// Claude variants
|
|
@@ -340,11 +412,208 @@ function getCachedModelInfo(channelId) {
|
|
|
340
412
|
return null;
|
|
341
413
|
}
|
|
342
414
|
|
|
415
|
+
/**
|
|
416
|
+
* Fetch available models from provider's /v1/models endpoint
|
|
417
|
+
* @param {Object} channel - Channel configuration
|
|
418
|
+
* @param {string} channelType - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
|
|
419
|
+
* @returns {Promise<Object>} { models: string[], supported: boolean, cached: boolean, error: string|null, fallbackUsed: boolean }
|
|
420
|
+
*/
|
|
421
|
+
async function fetchModelsFromProvider(channel, channelType) {
|
|
422
|
+
// If no type specified or type is 'claude', auto-detect
|
|
423
|
+
if (!channelType || channelType === 'claude') {
|
|
424
|
+
channelType = detectChannelType(channel);
|
|
425
|
+
console.log(`[ModelDetector] Auto-detected channel type: ${channelType} for ${channel.name}`);
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// Check if provider supports model listing
|
|
429
|
+
const capability = PROVIDER_CAPABILITIES[channelType];
|
|
430
|
+
if (!capability || !capability.supportsModelList) {
|
|
431
|
+
return {
|
|
432
|
+
models: [],
|
|
433
|
+
supported: false,
|
|
434
|
+
fallbackUsed: true,
|
|
435
|
+
cached: false,
|
|
436
|
+
error: null
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
const cache = loadModelCache();
|
|
441
|
+
const cacheKey = channel.id;
|
|
442
|
+
|
|
443
|
+
// Check cache first
|
|
444
|
+
if (cache[cacheKey] && isCacheValid(cache[cacheKey]) && cache[cacheKey].fetchedModels) {
|
|
445
|
+
return {
|
|
446
|
+
models: cache[cacheKey].fetchedModels || [],
|
|
447
|
+
supported: true,
|
|
448
|
+
cached: true,
|
|
449
|
+
fallbackUsed: false,
|
|
450
|
+
error: null,
|
|
451
|
+
lastChecked: cache[cacheKey].lastChecked
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
return new Promise((resolve) => {
|
|
456
|
+
try {
|
|
457
|
+
const baseUrl = channel.baseUrl.trim().replace(/\/+$/, '');
|
|
458
|
+
const endpoint = capability.modelListEndpoint;
|
|
459
|
+
const requestUrl = `${baseUrl}${endpoint}`;
|
|
460
|
+
|
|
461
|
+
const parsedUrl = new URL(requestUrl);
|
|
462
|
+
const isHttps = parsedUrl.protocol === 'https:';
|
|
463
|
+
const httpModule = isHttps ? https : http;
|
|
464
|
+
|
|
465
|
+
const headers = {
|
|
466
|
+
'User-Agent': 'Coding-Tool-ModelDetector/1.0',
|
|
467
|
+
'Accept': 'application/json'
|
|
468
|
+
};
|
|
469
|
+
|
|
470
|
+
// Add authentication header
|
|
471
|
+
if (capability.authHeader && channel.apiKey) {
|
|
472
|
+
headers['Authorization'] = `Bearer ${channel.apiKey}`;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const options = {
|
|
476
|
+
hostname: parsedUrl.hostname,
|
|
477
|
+
port: parsedUrl.port || (isHttps ? 443 : 80),
|
|
478
|
+
path: parsedUrl.pathname + parsedUrl.search,
|
|
479
|
+
method: 'GET',
|
|
480
|
+
timeout: TEST_TIMEOUT_MS,
|
|
481
|
+
headers
|
|
482
|
+
};
|
|
483
|
+
|
|
484
|
+
const req = httpModule.request(options, (res) => {
|
|
485
|
+
let data = '';
|
|
486
|
+
res.on('data', chunk => { data += chunk; });
|
|
487
|
+
res.on('end', () => {
|
|
488
|
+
// Handle different status codes
|
|
489
|
+
if (res.statusCode === 200) {
|
|
490
|
+
try {
|
|
491
|
+
const response = JSON.parse(data);
|
|
492
|
+
|
|
493
|
+
// Parse OpenAI-compatible format: { data: [{ id: "model-name", ... }] }
|
|
494
|
+
let models = [];
|
|
495
|
+
if (response.data && Array.isArray(response.data)) {
|
|
496
|
+
models = response.data
|
|
497
|
+
.map(item => item.id || item.model)
|
|
498
|
+
.filter(Boolean);
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// Update cache with fetched models
|
|
502
|
+
const cacheEntry = {
|
|
503
|
+
lastChecked: new Date().toISOString(),
|
|
504
|
+
fetchedModels: models,
|
|
505
|
+
availableModels: cache[cacheKey]?.availableModels || [],
|
|
506
|
+
preferredTestModel: cache[cacheKey]?.preferredTestModel || null
|
|
507
|
+
};
|
|
508
|
+
|
|
509
|
+
cache[cacheKey] = cacheEntry;
|
|
510
|
+
saveModelCache(cache);
|
|
511
|
+
|
|
512
|
+
console.log(`[ModelDetector] Fetched ${models.length} models from ${channel.name}`);
|
|
513
|
+
|
|
514
|
+
resolve({
|
|
515
|
+
models,
|
|
516
|
+
supported: true,
|
|
517
|
+
cached: false,
|
|
518
|
+
fallbackUsed: false,
|
|
519
|
+
error: null,
|
|
520
|
+
lastChecked: cacheEntry.lastChecked
|
|
521
|
+
});
|
|
522
|
+
} catch (parseError) {
|
|
523
|
+
console.error(`[ModelDetector] Failed to parse models response: ${parseError.message}`);
|
|
524
|
+
resolve({
|
|
525
|
+
models: [],
|
|
526
|
+
supported: true,
|
|
527
|
+
cached: false,
|
|
528
|
+
fallbackUsed: true,
|
|
529
|
+
error: `Parse error: ${parseError.message}`
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
} else if (res.statusCode === 401 || res.statusCode === 403) {
|
|
533
|
+
console.error(`[ModelDetector] Authentication failed for ${channel.name}: ${res.statusCode}`);
|
|
534
|
+
resolve({
|
|
535
|
+
models: [],
|
|
536
|
+
supported: true,
|
|
537
|
+
cached: false,
|
|
538
|
+
fallbackUsed: true,
|
|
539
|
+
error: `Authentication failed: ${res.statusCode}`
|
|
540
|
+
});
|
|
541
|
+
} else if (res.statusCode === 404) {
|
|
542
|
+
console.warn(`[ModelDetector] Model list endpoint not found for ${channel.name}`);
|
|
543
|
+
resolve({
|
|
544
|
+
models: [],
|
|
545
|
+
supported: false,
|
|
546
|
+
cached: false,
|
|
547
|
+
fallbackUsed: true,
|
|
548
|
+
error: 'Endpoint not found (404)'
|
|
549
|
+
});
|
|
550
|
+
} else if (res.statusCode === 429) {
|
|
551
|
+
console.warn(`[ModelDetector] Rate limited for ${channel.name}`);
|
|
552
|
+
resolve({
|
|
553
|
+
models: [],
|
|
554
|
+
supported: true,
|
|
555
|
+
cached: false,
|
|
556
|
+
fallbackUsed: true,
|
|
557
|
+
error: 'Rate limited (429)'
|
|
558
|
+
});
|
|
559
|
+
} else {
|
|
560
|
+
console.error(`[ModelDetector] Unexpected status ${res.statusCode} for ${channel.name}`);
|
|
561
|
+
resolve({
|
|
562
|
+
models: [],
|
|
563
|
+
supported: true,
|
|
564
|
+
cached: false,
|
|
565
|
+
fallbackUsed: true,
|
|
566
|
+
error: `HTTP ${res.statusCode}`
|
|
567
|
+
});
|
|
568
|
+
}
|
|
569
|
+
});
|
|
570
|
+
});
|
|
571
|
+
|
|
572
|
+
req.on('error', (error) => {
|
|
573
|
+
console.error(`[ModelDetector] Network error fetching models from ${channel.name}: ${error.message}`);
|
|
574
|
+
resolve({
|
|
575
|
+
models: [],
|
|
576
|
+
supported: true,
|
|
577
|
+
cached: false,
|
|
578
|
+
fallbackUsed: true,
|
|
579
|
+
error: `Network error: ${error.message}`
|
|
580
|
+
});
|
|
581
|
+
});
|
|
582
|
+
|
|
583
|
+
req.on('timeout', () => {
|
|
584
|
+
req.destroy();
|
|
585
|
+
console.error(`[ModelDetector] Timeout fetching models from ${channel.name}`);
|
|
586
|
+
resolve({
|
|
587
|
+
models: [],
|
|
588
|
+
supported: true,
|
|
589
|
+
cached: false,
|
|
590
|
+
fallbackUsed: true,
|
|
591
|
+
error: 'Request timeout'
|
|
592
|
+
});
|
|
593
|
+
});
|
|
594
|
+
|
|
595
|
+
req.end();
|
|
596
|
+
|
|
597
|
+
} catch (error) {
|
|
598
|
+
console.error(`[ModelDetector] Error in fetchModelsFromProvider: ${error.message}`);
|
|
599
|
+
resolve({
|
|
600
|
+
models: [],
|
|
601
|
+
supported: true,
|
|
602
|
+
cached: false,
|
|
603
|
+
fallbackUsed: true,
|
|
604
|
+
error: error.message
|
|
605
|
+
});
|
|
606
|
+
}
|
|
607
|
+
});
|
|
608
|
+
}
|
|
609
|
+
|
|
343
610
|
module.exports = {
|
|
344
611
|
probeModelAvailability,
|
|
345
612
|
testModelAvailability,
|
|
346
613
|
normalizeModelName,
|
|
347
614
|
clearCache,
|
|
348
615
|
getCachedModelInfo,
|
|
616
|
+
fetchModelsFromProvider,
|
|
617
|
+
detectChannelType,
|
|
349
618
|
MODEL_PRIORITY
|
|
350
619
|
};
|
|
@@ -199,10 +199,22 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
199
199
|
// Probe model availability if channel is provided
|
|
200
200
|
let modelProbe = null;
|
|
201
201
|
if (channel) {
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
202
|
+
// Check if speedTestModel is explicitly configured
|
|
203
|
+
if (channel.speedTestModel) {
|
|
204
|
+
// Use the explicitly configured model for speed testing
|
|
205
|
+
modelProbe = {
|
|
206
|
+
preferredTestModel: channel.speedTestModel,
|
|
207
|
+
availableModels: [channel.speedTestModel],
|
|
208
|
+
cached: false
|
|
209
|
+
};
|
|
210
|
+
console.log(`[SpeedTest] Using configured speedTestModel: ${channel.speedTestModel}`);
|
|
211
|
+
} else {
|
|
212
|
+
// Fall back to auto-detection
|
|
213
|
+
try {
|
|
214
|
+
modelProbe = await probeModelAvailability(channel, channelType);
|
|
215
|
+
} catch (error) {
|
|
216
|
+
console.error('[SpeedTest] Model detection failed:', error.message);
|
|
217
|
+
}
|
|
206
218
|
}
|
|
207
219
|
}
|
|
208
220
|
|
|
@@ -243,7 +255,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
243
255
|
const sessionId = Math.random().toString(36).substring(2, 15);
|
|
244
256
|
requestBody = JSON.stringify({
|
|
245
257
|
model: testModel,
|
|
246
|
-
max_tokens:
|
|
258
|
+
max_tokens: 1,
|
|
247
259
|
stream: true,
|
|
248
260
|
messages: [{ role: 'user', content: [{ type: 'text', text: 'Hi' }] }],
|
|
249
261
|
system: [{ type: 'text', text: "You are Claude Code, Anthropic's official CLI for Claude." }],
|
|
@@ -288,7 +300,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
288
300
|
model: testModel,
|
|
289
301
|
instructions: 'You are Codex.',
|
|
290
302
|
input: [{ type: 'message', role: 'user', content: [{ type: 'input_text', text: 'ping' }] }],
|
|
291
|
-
max_output_tokens:
|
|
303
|
+
max_output_tokens: 1,
|
|
292
304
|
stream: false,
|
|
293
305
|
store: false
|
|
294
306
|
});
|
|
@@ -309,7 +321,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
309
321
|
testModel = modelProbe?.preferredTestModel || model || 'gemini-2.5-pro';
|
|
310
322
|
requestBody = JSON.stringify({
|
|
311
323
|
model: testModel,
|
|
312
|
-
max_tokens:
|
|
324
|
+
max_tokens: 1,
|
|
313
325
|
messages: [{ role: 'user', content: 'Hi' }]
|
|
314
326
|
});
|
|
315
327
|
headers = {
|
|
@@ -325,7 +337,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
325
337
|
}
|
|
326
338
|
requestBody = JSON.stringify({
|
|
327
339
|
model: 'gpt-4o-mini',
|
|
328
|
-
max_tokens:
|
|
340
|
+
max_tokens: 1,
|
|
329
341
|
messages: [{ role: 'user', content: 'Hi' }]
|
|
330
342
|
});
|
|
331
343
|
headers = {
|
|
@@ -359,6 +371,41 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
359
371
|
}
|
|
360
372
|
};
|
|
361
373
|
|
|
374
|
+
const UNEXPECTED_ERROR_PATTERNS = [
|
|
375
|
+
/unexpected/i,
|
|
376
|
+
/internal.*error/i,
|
|
377
|
+
/something.*went.*wrong/i,
|
|
378
|
+
/service.*unavailable/i,
|
|
379
|
+
/temporarily.*unavailable/i,
|
|
380
|
+
/try.*again.*later/i,
|
|
381
|
+
/server.*error/i,
|
|
382
|
+
/bad.*gateway/i,
|
|
383
|
+
/gateway.*timeout/i
|
|
384
|
+
];
|
|
385
|
+
|
|
386
|
+
function containsUnexpectedError(responseBody) {
|
|
387
|
+
try {
|
|
388
|
+
const data = typeof responseBody === 'string' ? JSON.parse(responseBody) : responseBody;
|
|
389
|
+
|
|
390
|
+
// Check for explicit error field
|
|
391
|
+
if (data.error) {
|
|
392
|
+
return { hasError: true, message: data.error.message || data.error };
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
// Check message patterns
|
|
396
|
+
const message = data.message || data.detail || data.error_description || '';
|
|
397
|
+
for (const pattern of UNEXPECTED_ERROR_PATTERNS) {
|
|
398
|
+
if (pattern.test(message)) {
|
|
399
|
+
return { hasError: true, message };
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
return { hasError: false };
|
|
404
|
+
} catch {
|
|
405
|
+
return { hasError: false };
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
|
|
362
409
|
res.on('data', chunk => {
|
|
363
410
|
data += chunk;
|
|
364
411
|
const chunkStr = chunk.toString();
|
|
@@ -377,17 +424,19 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
377
424
|
statusCode: res.statusCode
|
|
378
425
|
}));
|
|
379
426
|
} else if (chunkStr.includes('"detail"') || chunkStr.includes('"error"')) {
|
|
380
|
-
// 流式响应中的错误
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
427
|
+
// 流式响应中的错误 - 使用新的错误检测函数
|
|
428
|
+
const errorCheck = containsUnexpectedError(chunkStr);
|
|
429
|
+
if (errorCheck.hasError) {
|
|
430
|
+
resolved = true;
|
|
431
|
+
const latency = Date.now() - startTime;
|
|
432
|
+
req.destroy();
|
|
433
|
+
resolve(createResult({
|
|
434
|
+
success: false,
|
|
435
|
+
latency,
|
|
436
|
+
error: errorCheck.message,
|
|
437
|
+
statusCode: res.statusCode
|
|
438
|
+
}));
|
|
439
|
+
}
|
|
391
440
|
}
|
|
392
441
|
}
|
|
393
442
|
});
|
|
@@ -399,14 +448,13 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
|
|
|
399
448
|
|
|
400
449
|
// 严格判断:只有 2xx 且没有错误信息才算成功
|
|
401
450
|
if (res.statusCode >= 200 && res.statusCode < 300) {
|
|
402
|
-
//
|
|
403
|
-
const
|
|
404
|
-
if (
|
|
405
|
-
errMsg.includes('失败') || errMsg.includes('错误'))) {
|
|
451
|
+
// 使用新的错误检测函数
|
|
452
|
+
const errorCheck = containsUnexpectedError(data);
|
|
453
|
+
if (errorCheck.hasError) {
|
|
406
454
|
resolve(createResult({
|
|
407
455
|
success: false,
|
|
408
456
|
latency,
|
|
409
|
-
error:
|
|
457
|
+
error: errorCheck.message,
|
|
410
458
|
statusCode: res.statusCode
|
|
411
459
|
}));
|
|
412
460
|
} else {
|