@adversity/coding-tool-x 3.1.1 → 3.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/CHANGELOG.md +41 -0
  2. package/dist/web/assets/Analytics-BIqc8Rin.css +1 -0
  3. package/dist/web/assets/Analytics-D2V09DHH.js +39 -0
  4. package/dist/web/assets/{ConfigTemplates-ZrK_s7ma.js → ConfigTemplates-Bf_11LhH.js} +1 -1
  5. package/dist/web/assets/Home-BRnW4FTS.js +1 -0
  6. package/dist/web/assets/Home-CyCIx4BA.css +1 -0
  7. package/dist/web/assets/{PluginManager-BD7QUZbU.js → PluginManager-B9J32GhW.js} +1 -1
  8. package/dist/web/assets/{ProjectList-DRb1DuHV.js → ProjectList-5a19MWJk.js} +1 -1
  9. package/dist/web/assets/SessionList-CXUr6S7w.css +1 -0
  10. package/dist/web/assets/SessionList-Cxg5bAdT.js +1 -0
  11. package/dist/web/assets/{SkillManager-C1xG5B4Q.js → SkillManager-CVBr0CLi.js} +1 -1
  12. package/dist/web/assets/{Terminal-DksBo_lM.js → Terminal-D2Xe_Q0H.js} +1 -1
  13. package/dist/web/assets/{WorkspaceManager-Burx7XOo.js → WorkspaceManager-C7dwV94C.js} +1 -1
  14. package/dist/web/assets/icons-BxcwoY5F.js +1 -0
  15. package/dist/web/assets/index-BS9RA6SN.js +2 -0
  16. package/dist/web/assets/index-DUNAVDGb.css +1 -0
  17. package/dist/web/assets/naive-ui-BIXcURHZ.js +1 -0
  18. package/dist/web/assets/{vendors-CO3Upi1d.js → vendors-i5CBGnlm.js} +1 -1
  19. package/dist/web/assets/{vue-vendor-DqyWIXEb.js → vue-vendor-PKd8utv_.js} +1 -1
  20. package/dist/web/index.html +6 -6
  21. package/package.json +1 -1
  22. package/src/config/default.js +7 -27
  23. package/src/config/loader.js +6 -3
  24. package/src/config/model-metadata.js +167 -0
  25. package/src/config/model-metadata.json +125 -0
  26. package/src/config/model-pricing.js +23 -93
  27. package/src/server/api/channels.js +16 -39
  28. package/src/server/api/codex-channels.js +15 -43
  29. package/src/server/api/commands.js +0 -77
  30. package/src/server/api/config.js +4 -1
  31. package/src/server/api/gemini-channels.js +16 -40
  32. package/src/server/api/opencode-channels.js +108 -56
  33. package/src/server/api/opencode-proxy.js +42 -33
  34. package/src/server/api/opencode-sessions.js +4 -69
  35. package/src/server/api/sessions.js +11 -68
  36. package/src/server/api/settings.js +138 -0
  37. package/src/server/api/skills.js +0 -44
  38. package/src/server/api/statistics.js +115 -1
  39. package/src/server/codex-proxy-server.js +32 -59
  40. package/src/server/gemini-proxy-server.js +21 -18
  41. package/src/server/index.js +13 -7
  42. package/src/server/opencode-proxy-server.js +1232 -197
  43. package/src/server/proxy-server.js +8 -8
  44. package/src/server/services/codex-sessions.js +105 -6
  45. package/src/server/services/commands-service.js +0 -29
  46. package/src/server/services/config-templates-service.js +38 -28
  47. package/src/server/services/env-checker.js +97 -9
  48. package/src/server/services/env-manager.js +29 -1
  49. package/src/server/services/opencode-channels.js +3 -1
  50. package/src/server/services/opencode-sessions.js +486 -218
  51. package/src/server/services/opencode-settings-manager.js +172 -36
  52. package/src/server/services/plugins-service.js +37 -28
  53. package/src/server/services/pty-manager.js +22 -18
  54. package/src/server/services/response-decoder.js +21 -0
  55. package/src/server/services/skill-service.js +1 -49
  56. package/src/server/services/speed-test.js +40 -3
  57. package/src/server/services/statistics-service.js +238 -1
  58. package/src/server/utils/pricing.js +51 -60
  59. package/src/server/websocket-server.js +24 -5
  60. package/dist/web/assets/Home-B8YfhZ3c.js +0 -1
  61. package/dist/web/assets/Home-Di2qsylF.css +0 -1
  62. package/dist/web/assets/SessionList-BGJWyneI.css +0 -1
  63. package/dist/web/assets/SessionList-lZ0LKzfT.js +0 -1
  64. package/dist/web/assets/icons-kcfLIMBB.js +0 -1
  65. package/dist/web/assets/index-Ufv5rCa5.css +0 -1
  66. package/dist/web/assets/index-lAkrRC3h.js +0 -2
  67. package/dist/web/assets/naive-ui-CSrLusZZ.js +0 -1
  68. package/src/server/api/convert.js +0 -260
  69. package/src/server/services/session-converter.js +0 -577
@@ -20,12 +20,13 @@ const {
20
20
  runWithConcurrencyLimit
21
21
  } = require('../services/speed-test');
22
22
  const { clearCodexRedirectCache } = require('../codex-proxy-server');
23
- const {
24
- fetchModelsFromProvider,
25
- probeModelAvailability,
26
- } = require('../services/model-detector');
23
+ const { getDefaultSpeedTestModelByToolType } = require('../../config/model-metadata');
27
24
  const CODEX_GATEWAY_SOURCE_TYPE = 'codex';
28
25
 
26
+ function getDefaultCodexModel() {
27
+ return getDefaultSpeedTestModelByToolType('codex');
28
+ }
29
+
29
30
  module.exports = (config) => {
30
31
  /**
31
32
  * GET /api/codex/channels
@@ -68,45 +69,16 @@ module.exports = (config) => {
68
69
  }
69
70
 
70
71
  const gatewaySourceType = CODEX_GATEWAY_SOURCE_TYPE;
71
- const listResult = await fetchModelsFromProvider(channel, 'openai_compatible');
72
- const listedModels = Array.isArray(listResult.models) ? listResult.models : [];
73
- let result;
74
-
75
- if (listedModels.length > 0) {
76
- result = listResult;
77
- } else {
78
- const usingConfiguredProbe = !!listResult.disabledByConfig;
79
- const probe = await probeModelAvailability(channel, 'codex');
80
- const probedModels = Array.isArray(probe.availableModels) ? probe.availableModels : [];
81
-
82
- if (probedModels.length > 0) {
83
- result = {
84
- models: probedModels,
85
- supported: true,
86
- cached: !!probe.cached,
87
- fallbackUsed: false,
88
- lastChecked: probe.lastChecked || listResult.lastChecked || new Date().toISOString(),
89
- error: null,
90
- errorHint: listResult.error
91
- ? (usingConfiguredProbe
92
- ? '已按设置跳过 /v1/models,使用默认模型探测结果'
93
- : '模型列表接口不可用,已自动切换为模型探测结果')
94
- : null
95
- };
96
- } else {
97
- result = {
98
- models: [],
99
- supported: false,
100
- cached: !!probe.cached || !!listResult.cached,
101
- fallbackUsed: false,
102
- lastChecked: probe.lastChecked || listResult.lastChecked || new Date().toISOString(),
103
- error: listResult.error || '无法探测可用模型',
104
- errorHint: listResult.errorHint || (usingConfiguredProbe
105
- ? '已按设置跳过 /v1/models,且默认模型探测无可用结果'
106
- : '模型列表接口不可用且模型探测无可用结果')
107
- };
108
- }
109
- }
72
+ const models = [getDefaultCodexModel()];
73
+ const result = {
74
+ models,
75
+ supported: models.length > 0,
76
+ cached: false,
77
+ fallbackUsed: false,
78
+ lastChecked: new Date().toISOString(),
79
+ error: models.length > 0 ? null : '未配置默认模型列表',
80
+ errorHint: models.length > 0 ? null : '请在设置中配置 Codex 默认模型'
81
+ };
110
82
 
111
83
  res.json({
112
84
  channelId: id,
@@ -479,81 +479,4 @@ router.post('/uninstall', (req, res) => {
479
479
  }
480
480
  });
481
481
 
482
- // ==================== 格式转换 API ====================
483
-
484
- /**
485
- * 转换命令格式
486
- * POST /api/commands/convert
487
- * Body: { content, targetFormat }
488
- * - content: 命令内容
489
- * - targetFormat: 目标格式 ('claude' | 'codex')
490
- */
491
- router.post('/convert', (req, res) => {
492
- try {
493
- const { platform, service } = getCommandsService(req);
494
- const { content, targetFormat } = req.body;
495
-
496
- if (!content) {
497
- return res.status(400).json({
498
- success: false,
499
- message: '请提供命令内容'
500
- });
501
- }
502
-
503
- if (!['claude', 'codex'].includes(targetFormat)) {
504
- return res.status(400).json({
505
- success: false,
506
- message: '目标格式必须是 claude 或 codex'
507
- });
508
- }
509
-
510
- const result = service.convertCommandFormat(content, targetFormat);
511
-
512
- res.json({
513
- success: true,
514
- platform,
515
- ...result
516
- });
517
- } catch (err) {
518
- console.error('[Commands API] Convert command error:', err);
519
- res.status(500).json({
520
- success: false,
521
- message: err.message
522
- });
523
- }
524
- });
525
-
526
- /**
527
- * 检测命令格式
528
- * POST /api/commands/detect-format
529
- * Body: { content }
530
- */
531
- router.post('/detect-format', (req, res) => {
532
- try {
533
- const { platform, service } = getCommandsService(req);
534
- const { content } = req.body;
535
-
536
- if (!content) {
537
- return res.status(400).json({
538
- success: false,
539
- message: '请提供命令内容'
540
- });
541
- }
542
-
543
- const format = service.detectFormat(content);
544
-
545
- res.json({
546
- success: true,
547
- platform,
548
- format
549
- });
550
- } catch (err) {
551
- console.error('[Commands API] Detect format error:', err);
552
- res.status(500).json({
553
- success: false,
554
- message: err.message
555
- });
556
- }
557
- });
558
-
559
482
  module.exports = router;
@@ -44,9 +44,12 @@ function sanitizePricing(inputPricing, currentPricing) {
44
44
  }
45
45
 
46
46
  function normalizeModelDiscovery(modelDiscovery, currentValue = DEFAULT_CONFIG.modelDiscovery) {
47
+ const defaultModelDiscovery = DEFAULT_CONFIG.modelDiscovery && typeof DEFAULT_CONFIG.modelDiscovery === 'object'
48
+ ? DEFAULT_CONFIG.modelDiscovery
49
+ : { useV1ModelsEndpoint: false };
47
50
  const current = currentValue && typeof currentValue === 'object'
48
51
  ? currentValue
49
- : DEFAULT_CONFIG.modelDiscovery;
52
+ : defaultModelDiscovery;
50
53
  const input = modelDiscovery && typeof modelDiscovery === 'object'
51
54
  ? modelDiscovery
52
55
  : {};
@@ -19,12 +19,13 @@ const {
19
19
  runWithConcurrencyLimit
20
20
  } = require('../services/speed-test');
21
21
  const { clearGeminiRedirectCache } = require('../gemini-proxy-server');
22
- const {
23
- probeModelAvailability,
24
- fetchModelsFromProvider
25
- } = require('../services/model-detector');
22
+ const { getDefaultSpeedTestModelByToolType } = require('../../config/model-metadata');
26
23
  const GEMINI_GATEWAY_SOURCE_TYPE = 'gemini';
27
24
 
25
+ function getDefaultGeminiModel() {
26
+ return getDefaultSpeedTestModelByToolType('gemini');
27
+ }
28
+
28
29
  module.exports = (config) => {
29
30
  /**
30
31
  * GET /api/gemini/channels
@@ -67,41 +68,16 @@ module.exports = (config) => {
67
68
  }
68
69
 
69
70
  const gatewaySourceType = GEMINI_GATEWAY_SOURCE_TYPE;
70
- const listResult = await fetchModelsFromProvider(channel, 'openai_compatible');
71
- const listedModels = Array.isArray(listResult.models) ? listResult.models : [];
72
- let result;
73
-
74
- if (listedModels.length > 0) {
75
- result = {
76
- models: listedModels,
77
- supported: true,
78
- cached: !!listResult.cached,
79
- fallbackUsed: false,
80
- lastChecked: listResult.lastChecked || new Date().toISOString(),
81
- error: null,
82
- errorHint: null
83
- };
84
- } else {
85
- const usingConfiguredProbe = !!listResult.disabledByConfig;
86
- const probe = await probeModelAvailability(channel, gatewaySourceType, {
87
- stopOnFirstAvailable: false
88
- });
89
- const probedModels = Array.isArray(probe.availableModels) ? probe.availableModels : [];
90
-
91
- result = {
92
- models: probedModels,
93
- supported: probedModels.length > 0,
94
- cached: !!probe.cached || !!listResult.cached,
95
- fallbackUsed: false,
96
- lastChecked: probe.lastChecked || listResult.lastChecked || new Date().toISOString(),
97
- error: probedModels.length > 0 ? null : (listResult.error || '无法获取可用模型'),
98
- errorHint: probedModels.length > 0
99
- ? (usingConfiguredProbe ? '已按设置跳过 /v1/models,使用默认模型探测结果' : '模型列表接口不可用,已自动切换为模型探测结果')
100
- : (listResult.errorHint || (usingConfiguredProbe
101
- ? '已按设置跳过 /v1/models,且默认模型探测无可用结果'
102
- : '模型列表接口不可用且模型探测无可用结果'))
103
- };
104
- }
71
+ const models = [getDefaultGeminiModel()];
72
+ const result = {
73
+ models,
74
+ supported: models.length > 0,
75
+ cached: false,
76
+ fallbackUsed: false,
77
+ lastChecked: new Date().toISOString(),
78
+ error: models.length > 0 ? null : '未配置默认模型列表',
79
+ errorHint: models.length > 0 ? null : '请在设置中配置 Gemini 默认模型'
80
+ };
105
81
 
106
82
  res.json({
107
83
  channelId: id,
@@ -157,7 +133,7 @@ module.exports = (config) => {
157
133
  return res.status(400).json({ error: 'Missing required fields: apiKey' });
158
134
  }
159
135
 
160
- const channel = createChannel(name, baseUrl, apiKey, model || 'gemini-2.5-pro', {
136
+ const channel = createChannel(name, baseUrl, apiKey, model || getDefaultGeminiModel(), {
161
137
  websiteUrl,
162
138
  enabled,
163
139
  weight,
@@ -16,11 +16,17 @@ const {
16
16
  sanitizeBatchConcurrency,
17
17
  runWithConcurrencyLimit
18
18
  } = require('../services/speed-test');
19
- const { clearOpenCodeRedirectCache } = require('../opencode-proxy-server');
19
+ const {
20
+ clearOpenCodeRedirectCache,
21
+ collectProxyModelList,
22
+ getOpenCodeProxyStatus
23
+ } = require('../opencode-proxy-server');
24
+ const { setProxyConfig } = require('../services/opencode-settings-manager');
20
25
  const {
21
26
  fetchModelsFromProvider,
22
- probeModelAvailability
27
+ clearCache
23
28
  } = require('../services/model-detector');
29
+ const { getDefaultSpeedTestModelByToolType } = require('../../config/model-metadata');
24
30
 
25
31
  module.exports = (config) => {
26
32
  function uniqueModels(models = []) {
@@ -38,31 +44,6 @@ module.exports = (config) => {
38
44
  return result;
39
45
  }
40
46
 
41
- function collectChannelPreferredModels(channel) {
42
- const candidates = [];
43
- if (!channel || typeof channel !== 'object') return candidates;
44
-
45
- candidates.push(channel.model);
46
- candidates.push(channel.speedTestModel);
47
-
48
- const modelConfig = channel.modelConfig;
49
- if (modelConfig && typeof modelConfig === 'object') {
50
- candidates.push(modelConfig.model);
51
- candidates.push(modelConfig.opusModel);
52
- candidates.push(modelConfig.sonnetModel);
53
- candidates.push(modelConfig.haikuModel);
54
- }
55
-
56
- if (Array.isArray(channel.modelRedirects)) {
57
- channel.modelRedirects.forEach((rule) => {
58
- candidates.push(rule?.from);
59
- candidates.push(rule?.to);
60
- });
61
- }
62
-
63
- return uniqueModels(candidates);
64
- }
65
-
66
47
  function resolveGatewaySourceType(channel) {
67
48
  const value = String(channel?.gatewaySourceType || '').trim().toLowerCase();
68
49
  if (value === 'claude') return 'claude';
@@ -74,11 +55,82 @@ module.exports = (config) => {
74
55
  return resolveGatewaySourceType(channel);
75
56
  }
76
57
 
77
- function isConverterPresetChannel(channel) {
58
+ function isConverterEntryChannel(channel) {
78
59
  const presetId = String(channel?.presetId || '').trim().toLowerCase();
79
60
  return presetId === 'entry_claude' || presetId === 'entry_codex' || presetId === 'entry_gemini';
80
61
  }
81
62
 
63
+ function getDefaultModelsByGatewaySourceType(gatewaySourceType) {
64
+ if (gatewaySourceType === 'claude') return [getDefaultSpeedTestModelByToolType('claude')];
65
+ if (gatewaySourceType === 'gemini') return [getDefaultSpeedTestModelByToolType('gemini')];
66
+ return [getDefaultSpeedTestModelByToolType('codex')];
67
+ }
68
+
69
+ function refreshEditedChannelModelCache(channelId) {
70
+ if (!channelId) return;
71
+ clearCache(channelId);
72
+ }
73
+
74
+ async function syncOpenCodeProxyConfigByCache() {
75
+ const proxyStatus = getOpenCodeProxyStatus();
76
+ if (!proxyStatus?.running || !Number.isFinite(proxyStatus?.port)) {
77
+ return;
78
+ }
79
+
80
+ const channels = getChannels().channels || [];
81
+ const enabledChannels = channels.filter(ch => ch.enabled !== false);
82
+
83
+ // Collect per-channel model lists for per-channel provider generation
84
+ let detectedModels = [];
85
+ try {
86
+ detectedModels = await collectProxyModelList(enabledChannels, { useCacheOnly: true }) || [];
87
+ } catch (error) {
88
+ console.warn('[OpenCode Channels API] Failed to collect cached models while syncing proxy config:', error.message);
89
+ }
90
+
91
+ const channelPayloads = enabledChannels.map((ch) => {
92
+ let models;
93
+ if (Array.isArray(ch.allowedModels) && ch.allowedModels.length > 0) {
94
+ // User explicitly selected models for this channel
95
+ models = ch.allowedModels;
96
+ } else {
97
+ // Fall back to configured + detected models
98
+ models = uniqueModels([
99
+ ch.model,
100
+ ch.speedTestModel,
101
+ ...(Array.isArray(ch.modelRedirects)
102
+ ? ch.modelRedirects.flatMap(r => [r?.from, r?.to])
103
+ : []),
104
+ ...detectedModels
105
+ ]);
106
+ }
107
+ return {
108
+ name: ch.name,
109
+ providerKey: ch.providerKey || ch.name,
110
+ model: ch.model || null,
111
+ models
112
+ };
113
+ });
114
+
115
+ const currentChannel = enabledChannels[0];
116
+ const activeModel = currentChannel?.model || currentChannel?.speedTestModel || null;
117
+ setProxyConfig(proxyStatus.port, { channels: channelPayloads, model: activeModel });
118
+ }
119
+
120
+ async function refreshEditedChannelAndSyncProxy(channelId) {
121
+ try {
122
+ await refreshEditedChannelModelCache(channelId);
123
+ } catch (error) {
124
+ console.warn('[OpenCode Channels API] Refresh edited channel model cache failed:', error.message);
125
+ }
126
+
127
+ try {
128
+ await syncOpenCodeProxyConfigByCache();
129
+ } catch (error) {
130
+ console.warn('[OpenCode Channels API] Sync proxy config after channel edit failed:', error.message);
131
+ }
132
+ }
133
+
82
134
  /**
83
135
  * GET /api/opencode/channels
84
136
  * 获取所有 OpenCode 渠道
@@ -145,10 +197,24 @@ module.exports = (config) => {
145
197
  }
146
198
 
147
199
  const gatewaySourceType = resolveGatewaySourceType(channel);
148
- const preferredModels = collectChannelPreferredModels(channel);
200
+ if (isConverterEntryChannel(channel)) {
201
+ const models = uniqueModels(getDefaultModelsByGatewaySourceType(gatewaySourceType));
202
+ const now = new Date().toISOString();
203
+ return res.json({
204
+ channelId: channelId,
205
+ gatewaySourceType,
206
+ models,
207
+ supported: models.length > 0,
208
+ cached: false,
209
+ fallbackUsed: false,
210
+ fetchedAt: now,
211
+ error: models.length > 0 ? null : '未配置默认模型列表',
212
+ errorHint: models.length > 0 ? null : '请在设置中配置对应工具类型的默认模型'
213
+ });
214
+ }
215
+
149
216
  const listResult = await fetchModelsFromProvider(channel, 'openai_compatible');
150
217
  const listedModels = Array.isArray(listResult.models) ? uniqueModels(listResult.models) : [];
151
- const shouldProbeByDefault = !!listResult.disabledByConfig;
152
218
  let result;
153
219
 
154
220
  if (listedModels.length > 0) {
@@ -161,28 +227,7 @@ module.exports = (config) => {
161
227
  error: null,
162
228
  errorHint: null
163
229
  };
164
- } else if (shouldProbeByDefault || isConverterPresetChannel(channel)) {
165
- const probe = await probeModelAvailability(channel, gatewaySourceType, {
166
- stopOnFirstAvailable: false,
167
- preferredModels
168
- });
169
- const probedModels = Array.isArray(probe.availableModels) ? uniqueModels(probe.availableModels) : [];
170
-
171
- result = {
172
- models: probedModels,
173
- supported: probedModels.length > 0,
174
- cached: !!probe.cached || !!listResult.cached,
175
- fallbackUsed: false,
176
- lastChecked: probe.lastChecked || listResult.lastChecked || new Date().toISOString(),
177
- error: probedModels.length > 0 ? null : (listResult.error || '无法获取可用模型'),
178
- errorHint: probedModels.length > 0
179
- ? (shouldProbeByDefault ? '已按设置跳过 /v1/models,使用默认模型探测结果' : '模型列表接口不可用,已自动切换为模型探测结果')
180
- : (listResult.errorHint || (shouldProbeByDefault
181
- ? '已按设置跳过 /v1/models,且默认模型探测无可用结果'
182
- : '模型列表接口不可用且模型探测无可用结果'))
183
- };
184
230
  } else {
185
- // 非入口转换器渠道:只请求 /v1/models,失败则返回空列表
186
231
  result = {
187
232
  models: [],
188
233
  supported: false,
@@ -190,7 +235,7 @@ module.exports = (config) => {
190
235
  fallbackUsed: false,
191
236
  lastChecked: listResult.lastChecked || new Date().toISOString(),
192
237
  error: listResult.error || '该渠道未返回可用模型列表',
193
- errorHint: listResult.errorHint || '此类型渠道不执行模型探测,请检查 /v1/models 接口'
238
+ errorHint: listResult.errorHint || ' OpenCode 非转换入口使用 /v1/models,请检查接口配置'
194
239
  };
195
240
  }
196
241
 
@@ -218,7 +263,7 @@ module.exports = (config) => {
218
263
  * POST /api/opencode/channels
219
264
  * 创建新渠道
220
265
  */
221
- router.post('/', (req, res) => {
266
+ router.post('/', async (req, res) => {
222
267
  try {
223
268
  const {
224
269
  name,
@@ -233,7 +278,8 @@ module.exports = (config) => {
233
278
  modelRedirects,
234
279
  speedTestModel,
235
280
  presetId,
236
- websiteUrl
281
+ websiteUrl,
282
+ allowedModels
237
283
  } = req.body;
238
284
 
239
285
  if (!name || !baseUrl) {
@@ -254,9 +300,12 @@ module.exports = (config) => {
254
300
  modelRedirects: modelRedirects || [],
255
301
  speedTestModel: speedTestModel || null,
256
302
  presetId,
257
- websiteUrl
303
+ websiteUrl,
304
+ allowedModels: allowedModels || []
258
305
  });
259
306
 
307
+ clearOpenCodeRedirectCache(channel.id);
308
+ await refreshEditedChannelAndSyncProxy(channel.id);
260
309
  res.json(channel);
261
310
  broadcastSchedulerState('opencode', getSchedulerState('opencode'));
262
311
  } catch (err) {
@@ -269,13 +318,14 @@ module.exports = (config) => {
269
318
  * PUT /api/opencode/channels/:channelId
270
319
  * 更新渠道
271
320
  */
272
- router.put('/:channelId', (req, res) => {
321
+ router.put('/:channelId', async (req, res) => {
273
322
  try {
274
323
  const { channelId } = req.params;
275
324
  const updates = req.body;
276
325
 
277
326
  const channel = updateChannel(channelId, updates);
278
327
  clearOpenCodeRedirectCache(channelId);
328
+ await refreshEditedChannelAndSyncProxy(channelId);
279
329
  res.json(channel);
280
330
  broadcastSchedulerState('opencode', getSchedulerState('opencode'));
281
331
  } catch (err) {
@@ -292,6 +342,8 @@ module.exports = (config) => {
292
342
  try {
293
343
  const { channelId } = req.params;
294
344
  const result = await deleteChannel(channelId);
345
+ clearOpenCodeRedirectCache(channelId);
346
+ await refreshEditedChannelAndSyncProxy(channelId);
295
347
  res.json(result);
296
348
  broadcastSchedulerState('opencode', getSchedulerState('opencode'));
297
349
  } catch (err) {
@@ -19,16 +19,6 @@ const { PATHS, ensureStorageDirMigrated } = require('../../config/paths');
19
19
  const fs = require('fs');
20
20
  const path = require('path');
21
21
 
22
- function pushUniqueModel(allModels, seen, modelId) {
23
- if (typeof modelId !== 'string') return;
24
- const trimmed = modelId.trim();
25
- if (!trimmed) return;
26
- const key = trimmed.toLowerCase();
27
- if (seen.has(key)) return;
28
- seen.add(key);
29
- allModels.push(trimmed);
30
- }
31
-
32
22
  function sanitizeChannel(channel) {
33
23
  if (!channel) return null;
34
24
  return {
@@ -111,35 +101,54 @@ router.post('/start', async (req, res) => {
111
101
  }
112
102
 
113
103
  // 4. 设置代理配置(写入 OpenCode 配置文件)
114
- // 收集所有启用渠道配置的模型
115
- const allModels = [];
116
- const seen = new Set();
117
- enabledChannels.forEach((ch) => {
118
- const candidates = [
119
- ch.model,
120
- ch.speedTestModel
121
- ];
122
- if (ch.modelConfig && typeof ch.modelConfig === 'object') {
123
- candidates.push(ch.modelConfig.model, ch.modelConfig.opusModel, ch.modelConfig.sonnetModel, ch.modelConfig.haikuModel);
124
- }
125
- if (Array.isArray(ch.modelRedirects)) {
126
- ch.modelRedirects.forEach(r => { candidates.push(r && r.from); candidates.push(r && r.to); });
127
- }
128
- candidates.forEach((m) => pushUniqueModel(allModels, seen, m));
129
- });
104
+ // 收集每个渠道的模型列表,生成 per-channel provider 配置
130
105
 
131
- // 若渠道未显式填写模型,回退使用代理聚合模型(含 /v1/models 与模型探测结果)。
106
+ // 若渠道未显式填写模型,回退使用代理聚合模型(来自 /v1/models)。
107
+ let detectedModels = [];
132
108
  try {
133
- const detectedModels = await collectProxyModelList(enabledChannels, { forceRefresh: false });
134
- if (Array.isArray(detectedModels)) {
135
- detectedModels.forEach(modelId => pushUniqueModel(allModels, seen, modelId));
136
- }
109
+ detectedModels = await collectProxyModelList(enabledChannels, {
110
+ useCacheOnly: true
111
+ }) || [];
137
112
  } catch (error) {
138
113
  console.warn('[OpenCode Proxy] Failed to collect proxy models before writing config:', error.message);
139
114
  }
140
115
 
141
- const activeModel = currentChannel.model || currentChannel.speedTestModel || allModels[0] || null;
142
- setProxyConfig(proxyResult.port, { model: activeModel, models: allModels });
116
+ const channelPayloads = enabledChannels.map((ch) => {
117
+ let models;
118
+ if (Array.isArray(ch.allowedModels) && ch.allowedModels.length > 0) {
119
+ models = ch.allowedModels;
120
+ } else {
121
+ const seen = new Set();
122
+ const collected = [];
123
+ const add = (m) => {
124
+ if (typeof m !== 'string') return;
125
+ const t = m.trim();
126
+ if (!t) return;
127
+ const k = t.toLowerCase();
128
+ if (seen.has(k)) return;
129
+ seen.add(k);
130
+ collected.push(t);
131
+ };
132
+ [ch.model, ch.speedTestModel].forEach(add);
133
+ if (ch.modelConfig && typeof ch.modelConfig === 'object') {
134
+ [ch.modelConfig.model, ch.modelConfig.opusModel, ch.modelConfig.sonnetModel, ch.modelConfig.haikuModel].forEach(add);
135
+ }
136
+ if (Array.isArray(ch.modelRedirects)) {
137
+ ch.modelRedirects.forEach(r => { add(r && r.from); add(r && r.to); });
138
+ }
139
+ detectedModels.forEach(add);
140
+ models = collected;
141
+ }
142
+ return {
143
+ name: ch.name,
144
+ providerKey: ch.providerKey || ch.name,
145
+ model: ch.model || null,
146
+ models
147
+ };
148
+ });
149
+
150
+ const activeModel = currentChannel.model || currentChannel.speedTestModel || null;
151
+ setProxyConfig(proxyResult.port, { channels: channelPayloads, model: activeModel });
143
152
 
144
153
  // 5. 广播状态更新
145
154
  const { broadcastProxyState } = require('../websocket-server');