coding-tool-x 3.5.4 → 3.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/web/assets/{Analytics-CmN09J9U.js → Analytics-gvYu5sCM.js} +1 -1
- package/dist/web/assets/{ConfigTemplates-CeTAPmep.js → ConfigTemplates-CPlH8Ehd.js} +1 -1
- package/dist/web/assets/{Home-BYtCM3rK.js → Home-B-qbu3uk.js} +1 -1
- package/dist/web/assets/{PluginManager-OAH1eMO0.js → PluginManager-B2tQ_YUq.js} +1 -1
- package/dist/web/assets/{ProjectList-B0pIy1cv.js → ProjectList-kDadoXXs.js} +1 -1
- package/dist/web/assets/{SessionList-DbB6ASiA.js → SessionList-eLgITwTV.js} +1 -1
- package/dist/web/assets/{SkillManager-wp1dhL1z.js → SkillManager-B7zEB5Op.js} +1 -1
- package/dist/web/assets/{WorkspaceManager-Ce6wQoKb.js → WorkspaceManager-C-RzB3ud.js} +1 -1
- package/dist/web/assets/{index-B02wDWNC.css → index-BHeh2z0i.css} +1 -1
- package/dist/web/assets/index-DG00t-zy.js +2 -0
- package/dist/web/index.html +2 -2
- package/package.json +1 -1
- package/src/server/codex-proxy-server.js +24 -59
- package/src/server/gemini-proxy-server.js +25 -66
- package/src/server/opencode-proxy-server.js +24 -59
- package/src/server/proxy-server.js +18 -30
- package/src/server/services/base/response-usage-parser.js +187 -0
- package/src/server/services/proxy-log-helper.js +21 -3
- package/src/server/services/statistics-service.js +7 -0
- package/dist/web/assets/index-CHwVofQH.js +0 -2
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* response-usage-parser.js - 统一响应解析器
|
|
3
|
+
*
|
|
4
|
+
* 从各种 AI 提供商(Claude / OpenAI / Gemini)的 SSE 事件和
|
|
5
|
+
* 非流式 JSON 响应中提取模型名称和 token 用量信息。
|
|
6
|
+
*
|
|
7
|
+
* 所有 proxy server 共用此模块,避免重复代码,
|
|
8
|
+
* 并确保模型重定向后仍能正确解析不同格式的响应。
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* 从单个 SSE 事件的 parsed JSON 中提取 model 和 token 信息。
|
|
13
|
+
* 自动检测 Claude / OpenAI / Gemini 格式。
|
|
14
|
+
*
|
|
15
|
+
* @param {object} parsed - JSON.parse 后的事件数据
|
|
16
|
+
* @param {string} [eventType=''] - SSE event: 行的值(如 'message_start')
|
|
17
|
+
* @returns {{ model: string|null, tokens: object|null, isDone: boolean }}
|
|
18
|
+
*/
|
|
19
|
+
function parseSSEUsage(parsed, eventType) {
|
|
20
|
+
if (!parsed || typeof parsed !== 'object') {
|
|
21
|
+
return { model: null, tokens: null, isDone: false };
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
let model = null;
|
|
25
|
+
let tokens = null;
|
|
26
|
+
let isDone = false;
|
|
27
|
+
|
|
28
|
+
// === Claude SSE 格式 ===
|
|
29
|
+
// event: message_start → parsed.message.model
|
|
30
|
+
// event: message_delta / message_stop → parsed.usage
|
|
31
|
+
if (eventType === 'message_start' && parsed.message && parsed.message.model) {
|
|
32
|
+
model = parsed.message.model;
|
|
33
|
+
}
|
|
34
|
+
if (eventType === 'message_stop') {
|
|
35
|
+
isDone = true;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// === OpenAI Responses API 格式 ===
|
|
39
|
+
// data: {"type": "response.completed", "response": {"model", "usage": {...}}}
|
|
40
|
+
if (parsed.type === 'response.completed' && parsed.response) {
|
|
41
|
+
if (parsed.response.model) {
|
|
42
|
+
model = parsed.response.model;
|
|
43
|
+
}
|
|
44
|
+
if (parsed.response.usage) {
|
|
45
|
+
tokens = {
|
|
46
|
+
input: parsed.response.usage.input_tokens || 0,
|
|
47
|
+
output: parsed.response.usage.output_tokens || 0,
|
|
48
|
+
total: parsed.response.usage.total_tokens || 0,
|
|
49
|
+
};
|
|
50
|
+
if (parsed.response.usage.input_tokens_details &&
|
|
51
|
+
parsed.response.usage.input_tokens_details.cached_tokens !== undefined) {
|
|
52
|
+
tokens.cached = parsed.response.usage.input_tokens_details.cached_tokens;
|
|
53
|
+
}
|
|
54
|
+
if (parsed.response.usage.output_tokens_details &&
|
|
55
|
+
parsed.response.usage.output_tokens_details.reasoning_tokens !== undefined) {
|
|
56
|
+
tokens.reasoning = parsed.response.usage.output_tokens_details.reasoning_tokens;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
isDone = true;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// === parsed.usage(Claude 原生 + OpenAI Chat Completions 共用) ===
|
|
63
|
+
if (!tokens && parsed.usage) {
|
|
64
|
+
const t = {};
|
|
65
|
+
|
|
66
|
+
// Claude 格式字段
|
|
67
|
+
if (parsed.usage.input_tokens !== undefined) {
|
|
68
|
+
t.input = parsed.usage.input_tokens;
|
|
69
|
+
}
|
|
70
|
+
if (parsed.usage.output_tokens !== undefined) {
|
|
71
|
+
t.output = parsed.usage.output_tokens;
|
|
72
|
+
}
|
|
73
|
+
if (parsed.usage.cache_creation_input_tokens !== undefined) {
|
|
74
|
+
t.cacheCreation = parsed.usage.cache_creation_input_tokens;
|
|
75
|
+
}
|
|
76
|
+
if (parsed.usage.cache_read_input_tokens !== undefined) {
|
|
77
|
+
t.cacheRead = parsed.usage.cache_read_input_tokens;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// OpenAI Chat Completions 格式字段(fallback)
|
|
81
|
+
if (t.input === undefined && parsed.usage.prompt_tokens !== undefined) {
|
|
82
|
+
t.input = parsed.usage.prompt_tokens;
|
|
83
|
+
}
|
|
84
|
+
if (t.output === undefined && parsed.usage.completion_tokens !== undefined) {
|
|
85
|
+
t.output = parsed.usage.completion_tokens;
|
|
86
|
+
}
|
|
87
|
+
if (parsed.usage.total_tokens !== undefined) {
|
|
88
|
+
t.total = parsed.usage.total_tokens;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// OpenAI detailed breakdowns
|
|
92
|
+
if (parsed.usage.input_tokens_details &&
|
|
93
|
+
parsed.usage.input_tokens_details.cached_tokens !== undefined) {
|
|
94
|
+
t.cached = parsed.usage.input_tokens_details.cached_tokens;
|
|
95
|
+
}
|
|
96
|
+
if (parsed.usage.output_tokens_details &&
|
|
97
|
+
parsed.usage.output_tokens_details.reasoning_tokens !== undefined) {
|
|
98
|
+
t.reasoning = parsed.usage.output_tokens_details.reasoning_tokens;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Gemini cache in OpenAI compat mode
|
|
102
|
+
if (parsed.usage.prompt_tokens_details &&
|
|
103
|
+
parsed.usage.prompt_tokens_details.cached_tokens !== undefined) {
|
|
104
|
+
t.cached = parsed.usage.prompt_tokens_details.cached_tokens;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
if (Object.keys(t).length > 0) {
|
|
108
|
+
tokens = t;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// === Gemini Native 格式 ===
|
|
113
|
+
// parsed.usageMetadata.{promptTokenCount, candidatesTokenCount, ...}
|
|
114
|
+
if (!tokens && parsed.usageMetadata) {
|
|
115
|
+
tokens = {
|
|
116
|
+
input: parsed.usageMetadata.promptTokenCount || 0,
|
|
117
|
+
output: parsed.usageMetadata.candidatesTokenCount || 0,
|
|
118
|
+
total: parsed.usageMetadata.totalTokenCount || 0,
|
|
119
|
+
};
|
|
120
|
+
if (parsed.usageMetadata.cachedContentTokenCount) {
|
|
121
|
+
tokens.cached = parsed.usageMetadata.cachedContentTokenCount;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// === 通用 model fallback ===
|
|
126
|
+
if (!model && parsed.model) {
|
|
127
|
+
model = parsed.model;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return { model, tokens, isDone };
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* 从完整的非流式 JSON 响应中提取 model 和 token 信息。
|
|
135
|
+
*
|
|
136
|
+
* @param {object} parsed - JSON.parse 后的完整响应
|
|
137
|
+
* @returns {{ model: string|null, tokens: object|null, isDone: boolean }}
|
|
138
|
+
*/
|
|
139
|
+
function parseNonStreamingUsage(parsed) {
|
|
140
|
+
return parseSSEUsage(parsed, '');
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* 将解析结果合并到 tokenData 对象。
|
|
145
|
+
*
|
|
146
|
+
* @param {object} tokenData - 各 proxy 的 tokenData 累积对象
|
|
147
|
+
* @param {{ model: string|null, tokens: object|null, isDone: boolean }} usage - parseSSEUsage 的返回值
|
|
148
|
+
*/
|
|
149
|
+
function mergeUsageIntoTokenData(tokenData, usage) {
|
|
150
|
+
if (usage.model) {
|
|
151
|
+
tokenData.model = usage.model;
|
|
152
|
+
}
|
|
153
|
+
if (usage.tokens) {
|
|
154
|
+
if (usage.tokens.input !== undefined) tokenData.inputTokens = usage.tokens.input;
|
|
155
|
+
if (usage.tokens.output !== undefined) tokenData.outputTokens = usage.tokens.output;
|
|
156
|
+
if (usage.tokens.cacheCreation !== undefined) tokenData.cacheCreation = usage.tokens.cacheCreation;
|
|
157
|
+
if (usage.tokens.cacheRead !== undefined) tokenData.cacheRead = usage.tokens.cacheRead;
|
|
158
|
+
if (usage.tokens.cached !== undefined) tokenData.cachedTokens = usage.tokens.cached;
|
|
159
|
+
if (usage.tokens.reasoning !== undefined) tokenData.reasoningTokens = usage.tokens.reasoning;
|
|
160
|
+
if (usage.tokens.total !== undefined) tokenData.totalTokens = usage.tokens.total;
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* 创建统一的 tokenData 初始结构。
|
|
166
|
+
*
|
|
167
|
+
* @returns {object}
|
|
168
|
+
*/
|
|
169
|
+
function createTokenData() {
|
|
170
|
+
return {
|
|
171
|
+
inputTokens: 0,
|
|
172
|
+
outputTokens: 0,
|
|
173
|
+
cacheCreation: 0,
|
|
174
|
+
cacheRead: 0,
|
|
175
|
+
cachedTokens: 0,
|
|
176
|
+
reasoningTokens: 0,
|
|
177
|
+
totalTokens: 0,
|
|
178
|
+
model: ''
|
|
179
|
+
};
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
module.exports = {
|
|
183
|
+
parseSSEUsage,
|
|
184
|
+
parseNonStreamingUsage,
|
|
185
|
+
mergeUsageIntoTokenData,
|
|
186
|
+
createTokenData
|
|
187
|
+
};
|
|
@@ -64,12 +64,14 @@ function buildSuccessLogPayload({
|
|
|
64
64
|
requestId,
|
|
65
65
|
channel,
|
|
66
66
|
model,
|
|
67
|
+
originalModel,
|
|
68
|
+
redirectedModel,
|
|
67
69
|
tokens,
|
|
68
70
|
cost = 0,
|
|
69
71
|
timestamp = Date.now()
|
|
70
72
|
}) {
|
|
71
73
|
const normalized = normalizeUsageTokens(source, tokens);
|
|
72
|
-
|
|
74
|
+
const payload = {
|
|
73
75
|
type: 'log',
|
|
74
76
|
status: 'success',
|
|
75
77
|
id: requestId,
|
|
@@ -87,6 +89,13 @@ function buildSuccessLogPayload({
|
|
|
87
89
|
source: normalizeToolSource(source),
|
|
88
90
|
timestamp
|
|
89
91
|
};
|
|
92
|
+
if (originalModel) {
|
|
93
|
+
payload.originalModel = originalModel;
|
|
94
|
+
}
|
|
95
|
+
if (redirectedModel) {
|
|
96
|
+
payload.redirectedModel = redirectedModel;
|
|
97
|
+
}
|
|
98
|
+
return payload;
|
|
90
99
|
}
|
|
91
100
|
|
|
92
101
|
function buildFailureLogPayload({
|
|
@@ -160,6 +169,8 @@ function publishUsageLog({
|
|
|
160
169
|
requestId,
|
|
161
170
|
channel: metadata.channel,
|
|
162
171
|
model,
|
|
172
|
+
originalModel: metadata.originalModel,
|
|
173
|
+
redirectedModel: metadata.redirectedModel,
|
|
163
174
|
tokens: normalizedTokens,
|
|
164
175
|
cost,
|
|
165
176
|
timestamp
|
|
@@ -167,7 +178,7 @@ function publishUsageLog({
|
|
|
167
178
|
}
|
|
168
179
|
|
|
169
180
|
if (typeof recordRequest === 'function') {
|
|
170
|
-
|
|
181
|
+
const entry = {
|
|
171
182
|
id: requestId,
|
|
172
183
|
timestamp: new Date(metadata.startTime || timestamp).toISOString(),
|
|
173
184
|
toolType: normalizedSource === 'claude' ? 'claude-code' : normalizedSource,
|
|
@@ -186,7 +197,14 @@ function publishUsageLog({
|
|
|
186
197
|
duration: Math.max(0, timestamp - toNumber(metadata.startTime || timestamp)),
|
|
187
198
|
success: true,
|
|
188
199
|
cost
|
|
189
|
-
}
|
|
200
|
+
};
|
|
201
|
+
if (metadata.originalModel) {
|
|
202
|
+
entry.originalModel = metadata.originalModel;
|
|
203
|
+
}
|
|
204
|
+
if (metadata.redirectedModel) {
|
|
205
|
+
entry.redirectedModel = metadata.redirectedModel;
|
|
206
|
+
}
|
|
207
|
+
recordRequest(entry);
|
|
190
208
|
}
|
|
191
209
|
|
|
192
210
|
if (typeof recordSuccess === 'function' && metadata.channelId) {
|
|
@@ -242,6 +242,13 @@ function recordRequest(requestData) {
|
|
|
242
242
|
session,
|
|
243
243
|
project
|
|
244
244
|
};
|
|
245
|
+
// 如果有模型重定向信息,记录到日志中
|
|
246
|
+
if (requestData.originalModel) {
|
|
247
|
+
logEntry.originalModel = requestData.originalModel;
|
|
248
|
+
}
|
|
249
|
+
if (requestData.redirectedModel) {
|
|
250
|
+
logEntry.redirectedModel = requestData.redirectedModel;
|
|
251
|
+
}
|
|
245
252
|
appendRequestLog(logEntry);
|
|
246
253
|
|
|
247
254
|
// 2. 更新总体统计
|