@adversity/coding-tool-x 3.0.0 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23,6 +23,57 @@ const MODEL_PRIORITY = {
23
23
  gemini: ['gemini-2.5-flash', 'gemini-2.5-pro']
24
24
  };
25
25
 
26
+ const PROVIDER_CAPABILITIES = {
27
+ claude: {
28
+ supportsModelList: false,
29
+ modelListEndpoint: null,
30
+ fallbackStrategy: 'probe'
31
+ },
32
+ codex: {
33
+ supportsModelList: true,
34
+ modelListEndpoint: '/v1/models',
35
+ authHeader: 'Authorization: Bearer'
36
+ },
37
+ gemini: {
38
+ supportsModelList: false,
39
+ modelListEndpoint: null,
40
+ fallbackStrategy: 'probe'
41
+ },
42
+ openai_compatible: {
43
+ supportsModelList: true,
44
+ modelListEndpoint: '/v1/models',
45
+ authHeader: 'Authorization: Bearer'
46
+ }
47
+ };
48
+
49
+ /**
50
+ * Auto-detect channel type based on baseUrl
51
+ * @param {Object} channel - Channel configuration
52
+ * @returns {string} - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
53
+ */
54
+ function detectChannelType(channel) {
55
+ const baseUrl = channel.baseUrl.toLowerCase();
56
+
57
+ // Check if it's official Anthropic API
58
+ if (baseUrl.includes('anthropic.com') || baseUrl.includes('claude.ai')) {
59
+ return 'claude';
60
+ }
61
+
62
+ // Check if it's Gemini
63
+ if (baseUrl.includes('generativelanguage.googleapis.com') || baseUrl.includes('gemini')) {
64
+ return 'gemini';
65
+ }
66
+
67
+ // Check if it's OpenAI official
68
+ if (baseUrl.includes('api.openai.com')) {
69
+ return 'codex';
70
+ }
71
+
72
+ // All other third-party proxies default to OpenAI compatible
73
+ // Including: 88code, anyrouter, internal proxies, etc.
74
+ return 'openai_compatible';
75
+ }
76
+
26
77
  // Model name normalization mapping
27
78
  const MODEL_ALIASES = {
28
79
  // Claude variants
@@ -340,11 +391,208 @@ function getCachedModelInfo(channelId) {
340
391
  return null;
341
392
  }
342
393
 
394
+ /**
395
+ * Fetch available models from provider's /v1/models endpoint
396
+ * @param {Object} channel - Channel configuration
397
+ * @param {string} channelType - 'claude' | 'codex' | 'gemini' | 'openai_compatible'
398
+ * @returns {Promise<Object>} { models: string[], supported: boolean, cached: boolean, error: string|null, fallbackUsed: boolean }
399
+ */
400
+ async function fetchModelsFromProvider(channel, channelType) {
401
+ // If no type specified or type is 'claude', auto-detect
402
+ if (!channelType || channelType === 'claude') {
403
+ channelType = detectChannelType(channel);
404
+ console.log(`[ModelDetector] Auto-detected channel type: ${channelType} for ${channel.name}`);
405
+ }
406
+
407
+ // Check if provider supports model listing
408
+ const capability = PROVIDER_CAPABILITIES[channelType];
409
+ if (!capability || !capability.supportsModelList) {
410
+ return {
411
+ models: [],
412
+ supported: false,
413
+ fallbackUsed: true,
414
+ cached: false,
415
+ error: null
416
+ };
417
+ }
418
+
419
+ const cache = loadModelCache();
420
+ const cacheKey = channel.id;
421
+
422
+ // Check cache first
423
+ if (cache[cacheKey] && isCacheValid(cache[cacheKey]) && cache[cacheKey].fetchedModels) {
424
+ return {
425
+ models: cache[cacheKey].fetchedModels || [],
426
+ supported: true,
427
+ cached: true,
428
+ fallbackUsed: false,
429
+ error: null,
430
+ lastChecked: cache[cacheKey].lastChecked
431
+ };
432
+ }
433
+
434
+ return new Promise((resolve) => {
435
+ try {
436
+ const baseUrl = channel.baseUrl.trim().replace(/\/+$/, '');
437
+ const endpoint = capability.modelListEndpoint;
438
+ const requestUrl = `${baseUrl}${endpoint}`;
439
+
440
+ const parsedUrl = new URL(requestUrl);
441
+ const isHttps = parsedUrl.protocol === 'https:';
442
+ const httpModule = isHttps ? https : http;
443
+
444
+ const headers = {
445
+ 'User-Agent': 'Coding-Tool-ModelDetector/1.0',
446
+ 'Accept': 'application/json'
447
+ };
448
+
449
+ // Add authentication header
450
+ if (capability.authHeader && channel.apiKey) {
451
+ headers['Authorization'] = `Bearer ${channel.apiKey}`;
452
+ }
453
+
454
+ const options = {
455
+ hostname: parsedUrl.hostname,
456
+ port: parsedUrl.port || (isHttps ? 443 : 80),
457
+ path: parsedUrl.pathname + parsedUrl.search,
458
+ method: 'GET',
459
+ timeout: TEST_TIMEOUT_MS,
460
+ headers
461
+ };
462
+
463
+ const req = httpModule.request(options, (res) => {
464
+ let data = '';
465
+ res.on('data', chunk => { data += chunk; });
466
+ res.on('end', () => {
467
+ // Handle different status codes
468
+ if (res.statusCode === 200) {
469
+ try {
470
+ const response = JSON.parse(data);
471
+
472
+ // Parse OpenAI-compatible format: { data: [{ id: "model-name", ... }] }
473
+ let models = [];
474
+ if (response.data && Array.isArray(response.data)) {
475
+ models = response.data
476
+ .map(item => item.id || item.model)
477
+ .filter(Boolean);
478
+ }
479
+
480
+ // Update cache with fetched models
481
+ const cacheEntry = {
482
+ lastChecked: new Date().toISOString(),
483
+ fetchedModels: models,
484
+ availableModels: cache[cacheKey]?.availableModels || [],
485
+ preferredTestModel: cache[cacheKey]?.preferredTestModel || null
486
+ };
487
+
488
+ cache[cacheKey] = cacheEntry;
489
+ saveModelCache(cache);
490
+
491
+ console.log(`[ModelDetector] Fetched ${models.length} models from ${channel.name}`);
492
+
493
+ resolve({
494
+ models,
495
+ supported: true,
496
+ cached: false,
497
+ fallbackUsed: false,
498
+ error: null,
499
+ lastChecked: cacheEntry.lastChecked
500
+ });
501
+ } catch (parseError) {
502
+ console.error(`[ModelDetector] Failed to parse models response: ${parseError.message}`);
503
+ resolve({
504
+ models: [],
505
+ supported: true,
506
+ cached: false,
507
+ fallbackUsed: true,
508
+ error: `Parse error: ${parseError.message}`
509
+ });
510
+ }
511
+ } else if (res.statusCode === 401 || res.statusCode === 403) {
512
+ console.error(`[ModelDetector] Authentication failed for ${channel.name}: ${res.statusCode}`);
513
+ resolve({
514
+ models: [],
515
+ supported: true,
516
+ cached: false,
517
+ fallbackUsed: true,
518
+ error: `Authentication failed: ${res.statusCode}`
519
+ });
520
+ } else if (res.statusCode === 404) {
521
+ console.warn(`[ModelDetector] Model list endpoint not found for ${channel.name}`);
522
+ resolve({
523
+ models: [],
524
+ supported: false,
525
+ cached: false,
526
+ fallbackUsed: true,
527
+ error: 'Endpoint not found (404)'
528
+ });
529
+ } else if (res.statusCode === 429) {
530
+ console.warn(`[ModelDetector] Rate limited for ${channel.name}`);
531
+ resolve({
532
+ models: [],
533
+ supported: true,
534
+ cached: false,
535
+ fallbackUsed: true,
536
+ error: 'Rate limited (429)'
537
+ });
538
+ } else {
539
+ console.error(`[ModelDetector] Unexpected status ${res.statusCode} for ${channel.name}`);
540
+ resolve({
541
+ models: [],
542
+ supported: true,
543
+ cached: false,
544
+ fallbackUsed: true,
545
+ error: `HTTP ${res.statusCode}`
546
+ });
547
+ }
548
+ });
549
+ });
550
+
551
+ req.on('error', (error) => {
552
+ console.error(`[ModelDetector] Network error fetching models from ${channel.name}: ${error.message}`);
553
+ resolve({
554
+ models: [],
555
+ supported: true,
556
+ cached: false,
557
+ fallbackUsed: true,
558
+ error: `Network error: ${error.message}`
559
+ });
560
+ });
561
+
562
+ req.on('timeout', () => {
563
+ req.destroy();
564
+ console.error(`[ModelDetector] Timeout fetching models from ${channel.name}`);
565
+ resolve({
566
+ models: [],
567
+ supported: true,
568
+ cached: false,
569
+ fallbackUsed: true,
570
+ error: 'Request timeout'
571
+ });
572
+ });
573
+
574
+ req.end();
575
+
576
+ } catch (error) {
577
+ console.error(`[ModelDetector] Error in fetchModelsFromProvider: ${error.message}`);
578
+ resolve({
579
+ models: [],
580
+ supported: true,
581
+ cached: false,
582
+ fallbackUsed: true,
583
+ error: error.message
584
+ });
585
+ }
586
+ });
587
+ }
588
+
343
589
  module.exports = {
344
590
  probeModelAvailability,
345
591
  testModelAvailability,
346
592
  normalizeModelName,
347
593
  clearCache,
348
594
  getCachedModelInfo,
595
+ fetchModelsFromProvider,
596
+ detectChannelType,
349
597
  MODEL_PRIORITY
350
598
  };
@@ -199,10 +199,22 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
199
199
  // Probe model availability if channel is provided
200
200
  let modelProbe = null;
201
201
  if (channel) {
202
- try {
203
- modelProbe = await probeModelAvailability(channel, channelType);
204
- } catch (error) {
205
- console.error('[SpeedTest] Model detection failed:', error.message);
202
+ // Check if speedTestModel is explicitly configured
203
+ if (channel.speedTestModel) {
204
+ // Use the explicitly configured model for speed testing
205
+ modelProbe = {
206
+ preferredTestModel: channel.speedTestModel,
207
+ availableModels: [channel.speedTestModel],
208
+ cached: false
209
+ };
210
+ console.log(`[SpeedTest] Using configured speedTestModel: ${channel.speedTestModel}`);
211
+ } else {
212
+ // Fall back to auto-detection
213
+ try {
214
+ modelProbe = await probeModelAvailability(channel, channelType);
215
+ } catch (error) {
216
+ console.error('[SpeedTest] Model detection failed:', error.message);
217
+ }
206
218
  }
207
219
  }
208
220
 
@@ -243,7 +255,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
243
255
  const sessionId = Math.random().toString(36).substring(2, 15);
244
256
  requestBody = JSON.stringify({
245
257
  model: testModel,
246
- max_tokens: 10,
258
+ max_tokens: 1,
247
259
  stream: true,
248
260
  messages: [{ role: 'user', content: [{ type: 'text', text: 'Hi' }] }],
249
261
  system: [{ type: 'text', text: "You are Claude Code, Anthropic's official CLI for Claude." }],
@@ -288,7 +300,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
288
300
  model: testModel,
289
301
  instructions: 'You are Codex.',
290
302
  input: [{ type: 'message', role: 'user', content: [{ type: 'input_text', text: 'ping' }] }],
291
- max_output_tokens: 10,
303
+ max_output_tokens: 1,
292
304
  stream: false,
293
305
  store: false
294
306
  });
@@ -309,7 +321,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
309
321
  testModel = modelProbe?.preferredTestModel || model || 'gemini-2.5-pro';
310
322
  requestBody = JSON.stringify({
311
323
  model: testModel,
312
- max_tokens: 10,
324
+ max_tokens: 1,
313
325
  messages: [{ role: 'user', content: 'Hi' }]
314
326
  });
315
327
  headers = {
@@ -325,7 +337,7 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
325
337
  }
326
338
  requestBody = JSON.stringify({
327
339
  model: 'gpt-4o-mini',
328
- max_tokens: 10,
340
+ max_tokens: 1,
329
341
  messages: [{ role: 'user', content: 'Hi' }]
330
342
  });
331
343
  headers = {
@@ -359,6 +371,41 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
359
371
  }
360
372
  };
361
373
 
374
+ const UNEXPECTED_ERROR_PATTERNS = [
375
+ /unexpected/i,
376
+ /internal.*error/i,
377
+ /something.*went.*wrong/i,
378
+ /service.*unavailable/i,
379
+ /temporarily.*unavailable/i,
380
+ /try.*again.*later/i,
381
+ /server.*error/i,
382
+ /bad.*gateway/i,
383
+ /gateway.*timeout/i
384
+ ];
385
+
386
+ function containsUnexpectedError(responseBody) {
387
+ try {
388
+ const data = typeof responseBody === 'string' ? JSON.parse(responseBody) : responseBody;
389
+
390
+ // Check for explicit error field
391
+ if (data.error) {
392
+ return { hasError: true, message: data.error.message || data.error };
393
+ }
394
+
395
+ // Check message patterns
396
+ const message = data.message || data.detail || data.error_description || '';
397
+ for (const pattern of UNEXPECTED_ERROR_PATTERNS) {
398
+ if (pattern.test(message)) {
399
+ return { hasError: true, message };
400
+ }
401
+ }
402
+
403
+ return { hasError: false };
404
+ } catch {
405
+ return { hasError: false };
406
+ }
407
+ }
408
+
362
409
  res.on('data', chunk => {
363
410
  data += chunk;
364
411
  const chunkStr = chunk.toString();
@@ -377,17 +424,19 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
377
424
  statusCode: res.statusCode
378
425
  }));
379
426
  } else if (chunkStr.includes('"detail"') || chunkStr.includes('"error"')) {
380
- // 流式响应中的错误
381
- resolved = true;
382
- const latency = Date.now() - startTime;
383
- req.destroy();
384
- const errMsg = parseErrorMessage(chunkStr) || '流式响应错误';
385
- resolve(createResult({
386
- success: false,
387
- latency,
388
- error: errMsg,
389
- statusCode: res.statusCode
390
- }));
427
+ // 流式响应中的错误 - 使用新的错误检测函数
428
+ const errorCheck = containsUnexpectedError(chunkStr);
429
+ if (errorCheck.hasError) {
430
+ resolved = true;
431
+ const latency = Date.now() - startTime;
432
+ req.destroy();
433
+ resolve(createResult({
434
+ success: false,
435
+ latency,
436
+ error: errorCheck.message,
437
+ statusCode: res.statusCode
438
+ }));
439
+ }
391
440
  }
392
441
  }
393
442
  });
@@ -399,14 +448,13 @@ async function testAPIFunctionality(baseUrl, apiKey, timeout, channelType = 'cla
399
448
 
400
449
  // 严格判断:只有 2xx 且没有错误信息才算成功
401
450
  if (res.statusCode >= 200 && res.statusCode < 300) {
402
- // 检查响应体是否包含错误信息
403
- const errMsg = parseErrorMessage(data);
404
- if (errMsg && (errMsg.includes('error') || errMsg.includes('Error') ||
405
- errMsg.includes('失败') || errMsg.includes('错误'))) {
451
+ // 使用新的错误检测函数
452
+ const errorCheck = containsUnexpectedError(data);
453
+ if (errorCheck.hasError) {
406
454
  resolve(createResult({
407
455
  success: false,
408
456
  latency,
409
- error: errMsg,
457
+ error: errorCheck.message,
410
458
  statusCode: res.statusCode
411
459
  }));
412
460
  } else {