@kaitranntt/ccs 3.3.0 → 3.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +66 -7
- package/VERSION +1 -1
- package/bin/{auth-commands.js → auth/auth-commands.js} +3 -3
- package/bin/ccs.js +38 -19
- package/bin/glmt/budget-calculator.js +114 -0
- package/bin/glmt/delta-accumulator.js +261 -0
- package/bin/glmt/glmt-proxy.js +488 -0
- package/bin/glmt/glmt-transformer.js +919 -0
- package/bin/glmt/locale-enforcer.js +80 -0
- package/bin/glmt/sse-parser.js +96 -0
- package/bin/glmt/task-classifier.js +162 -0
- package/bin/{doctor.js → management/doctor.js} +2 -2
- package/lib/ccs +1 -1
- package/lib/ccs.ps1 +1 -1
- package/package.json +1 -1
- package/scripts/dev-install.sh +35 -0
- package/bin/glmt-proxy.js +0 -307
- package/bin/glmt-transformer.js +0 -437
- /package/bin/{profile-detector.js → auth/profile-detector.js} +0 -0
- /package/bin/{profile-registry.js → auth/profile-registry.js} +0 -0
- /package/bin/{instance-manager.js → management/instance-manager.js} +0 -0
- /package/bin/{recovery-manager.js → management/recovery-manager.js} +0 -0
- /package/bin/{shared-manager.js → management/shared-manager.js} +0 -0
- /package/bin/{claude-detector.js → utils/claude-detector.js} +0 -0
- /package/bin/{config-manager.js → utils/config-manager.js} +0 -0
- /package/bin/{error-manager.js → utils/error-manager.js} +0 -0
- /package/bin/{helpers.js → utils/helpers.js} +0 -0
package/bin/glmt-transformer.js
DELETED
|
@@ -1,437 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
'use strict';
|
|
3
|
-
|
|
4
|
-
const crypto = require('crypto');
|
|
5
|
-
const fs = require('fs');
|
|
6
|
-
const path = require('path');
|
|
7
|
-
const os = require('os');
|
|
8
|
-
|
|
9
|
-
/**
|
|
10
|
-
* GlmtTransformer - Convert between Anthropic and OpenAI formats with thinking support
|
|
11
|
-
*
|
|
12
|
-
* Features:
|
|
13
|
-
* - Request: Anthropic → OpenAI (inject reasoning params)
|
|
14
|
-
* - Response: OpenAI reasoning_content → Anthropic thinking blocks
|
|
15
|
-
* - Debug mode: Log raw data to ~/.ccs/logs/ (CCS_DEBUG_LOG=1)
|
|
16
|
-
* - Verbose mode: Console logging with timestamps
|
|
17
|
-
* - Validation: Self-test transformation results
|
|
18
|
-
*
|
|
19
|
-
* Usage:
|
|
20
|
-
* const transformer = new GlmtTransformer({ verbose: true, debugLog: true });
|
|
21
|
-
* const { openaiRequest, thinkingConfig } = transformer.transformRequest(req);
|
|
22
|
-
* const anthropicResponse = transformer.transformResponse(resp, thinkingConfig);
|
|
23
|
-
*
|
|
24
|
-
* Control Tags (in user prompt):
|
|
25
|
-
* <Thinking:On|Off> - Enable/disable reasoning
|
|
26
|
-
* <Effort:Low|Medium|High> - Control reasoning depth
|
|
27
|
-
*/
|
|
28
|
-
class GlmtTransformer {
|
|
29
|
-
constructor(config = {}) {
|
|
30
|
-
this.defaultThinking = config.defaultThinking ?? true;
|
|
31
|
-
this.verbose = config.verbose || false;
|
|
32
|
-
this.debugLog = config.debugLog ?? process.env.CCS_DEBUG_LOG === '1';
|
|
33
|
-
this.debugLogDir = config.debugLogDir || path.join(os.homedir(), '.ccs', 'logs');
|
|
34
|
-
this.modelMaxTokens = {
|
|
35
|
-
'GLM-4.6': 128000,
|
|
36
|
-
'GLM-4.5': 96000,
|
|
37
|
-
'GLM-4.5-air': 16000
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
/**
|
|
42
|
-
* Transform Anthropic request to OpenAI format
|
|
43
|
-
* @param {Object} anthropicRequest - Anthropic Messages API request
|
|
44
|
-
* @returns {Object} { openaiRequest, thinkingConfig }
|
|
45
|
-
*/
|
|
46
|
-
transformRequest(anthropicRequest) {
|
|
47
|
-
// Log original request
|
|
48
|
-
this._writeDebugLog('request-anthropic', anthropicRequest);
|
|
49
|
-
|
|
50
|
-
try {
|
|
51
|
-
// 1. Extract thinking control from messages
|
|
52
|
-
const thinkingConfig = this._extractThinkingControl(
|
|
53
|
-
anthropicRequest.messages || []
|
|
54
|
-
);
|
|
55
|
-
this.log(`Extracted thinking control: ${JSON.stringify(thinkingConfig)}`);
|
|
56
|
-
|
|
57
|
-
// 2. Map model
|
|
58
|
-
const glmModel = this._mapModel(anthropicRequest.model);
|
|
59
|
-
|
|
60
|
-
// 3. Convert to OpenAI format
|
|
61
|
-
const openaiRequest = {
|
|
62
|
-
model: glmModel,
|
|
63
|
-
messages: this._sanitizeMessages(anthropicRequest.messages || []),
|
|
64
|
-
max_tokens: this._getMaxTokens(glmModel),
|
|
65
|
-
stream: anthropicRequest.stream ?? false
|
|
66
|
-
};
|
|
67
|
-
|
|
68
|
-
// 4. Preserve optional parameters
|
|
69
|
-
if (anthropicRequest.temperature !== undefined) {
|
|
70
|
-
openaiRequest.temperature = anthropicRequest.temperature;
|
|
71
|
-
}
|
|
72
|
-
if (anthropicRequest.top_p !== undefined) {
|
|
73
|
-
openaiRequest.top_p = anthropicRequest.top_p;
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
// 5. Handle streaming (not yet supported)
|
|
77
|
-
// Silently override to buffered mode
|
|
78
|
-
if (anthropicRequest.stream) {
|
|
79
|
-
openaiRequest.stream = false;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
// 6. Inject reasoning parameters
|
|
83
|
-
this._injectReasoningParams(openaiRequest, thinkingConfig);
|
|
84
|
-
|
|
85
|
-
// Log transformed request
|
|
86
|
-
this._writeDebugLog('request-openai', openaiRequest);
|
|
87
|
-
|
|
88
|
-
return { openaiRequest, thinkingConfig };
|
|
89
|
-
} catch (error) {
|
|
90
|
-
console.error('[glmt-transformer] Request transformation error:', error);
|
|
91
|
-
// Return original request with warning
|
|
92
|
-
return {
|
|
93
|
-
openaiRequest: anthropicRequest,
|
|
94
|
-
thinkingConfig: { thinking: false },
|
|
95
|
-
error: error.message
|
|
96
|
-
};
|
|
97
|
-
}
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
/**
|
|
101
|
-
* Transform OpenAI response to Anthropic format
|
|
102
|
-
* @param {Object} openaiResponse - OpenAI Chat Completions response
|
|
103
|
-
* @param {Object} thinkingConfig - Config from request transformation
|
|
104
|
-
* @returns {Object} Anthropic Messages API response
|
|
105
|
-
*/
|
|
106
|
-
transformResponse(openaiResponse, thinkingConfig = {}) {
|
|
107
|
-
// Log original response
|
|
108
|
-
this._writeDebugLog('response-openai', openaiResponse);
|
|
109
|
-
|
|
110
|
-
try {
|
|
111
|
-
const choice = openaiResponse.choices?.[0];
|
|
112
|
-
if (!choice) {
|
|
113
|
-
throw new Error('No choices in OpenAI response');
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
const message = choice.message;
|
|
117
|
-
const content = [];
|
|
118
|
-
|
|
119
|
-
// Add thinking block if reasoning_content exists
|
|
120
|
-
if (message.reasoning_content) {
|
|
121
|
-
const length = message.reasoning_content.length;
|
|
122
|
-
const lineCount = message.reasoning_content.split('\n').length;
|
|
123
|
-
const preview = message.reasoning_content
|
|
124
|
-
.substring(0, 100)
|
|
125
|
-
.replace(/\n/g, ' ')
|
|
126
|
-
.trim();
|
|
127
|
-
|
|
128
|
-
this.log(`Detected reasoning_content:`);
|
|
129
|
-
this.log(` Length: ${length} characters`);
|
|
130
|
-
this.log(` Lines: ${lineCount}`);
|
|
131
|
-
this.log(` Preview: ${preview}...`);
|
|
132
|
-
|
|
133
|
-
content.push({
|
|
134
|
-
type: 'thinking',
|
|
135
|
-
thinking: message.reasoning_content,
|
|
136
|
-
signature: this._generateThinkingSignature(message.reasoning_content)
|
|
137
|
-
});
|
|
138
|
-
} else {
|
|
139
|
-
this.log('No reasoning_content in OpenAI response');
|
|
140
|
-
this.log('Note: This is expected if thinking not requested or model cannot reason');
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
// Add text content
|
|
144
|
-
if (message.content) {
|
|
145
|
-
content.push({
|
|
146
|
-
type: 'text',
|
|
147
|
-
text: message.content
|
|
148
|
-
});
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
// Handle tool_calls if present
|
|
152
|
-
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
153
|
-
message.tool_calls.forEach(toolCall => {
|
|
154
|
-
content.push({
|
|
155
|
-
type: 'tool_use',
|
|
156
|
-
id: toolCall.id,
|
|
157
|
-
name: toolCall.function.name,
|
|
158
|
-
input: JSON.parse(toolCall.function.arguments || '{}')
|
|
159
|
-
});
|
|
160
|
-
});
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
const anthropicResponse = {
|
|
164
|
-
id: openaiResponse.id || 'msg_' + Date.now(),
|
|
165
|
-
type: 'message',
|
|
166
|
-
role: 'assistant',
|
|
167
|
-
content: content,
|
|
168
|
-
model: openaiResponse.model || 'glm-4.6',
|
|
169
|
-
stop_reason: this._mapStopReason(choice.finish_reason),
|
|
170
|
-
usage: openaiResponse.usage || {
|
|
171
|
-
input_tokens: 0,
|
|
172
|
-
output_tokens: 0
|
|
173
|
-
}
|
|
174
|
-
};
|
|
175
|
-
|
|
176
|
-
// Validate transformation in verbose mode
|
|
177
|
-
if (this.verbose) {
|
|
178
|
-
const validation = this._validateTransformation(anthropicResponse);
|
|
179
|
-
this.log(`Transformation validation: ${validation.passed}/${validation.total} checks passed`);
|
|
180
|
-
if (!validation.valid) {
|
|
181
|
-
this.log(`Failed checks: ${JSON.stringify(validation.checks, null, 2)}`);
|
|
182
|
-
}
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
// Log transformed response
|
|
186
|
-
this._writeDebugLog('response-anthropic', anthropicResponse);
|
|
187
|
-
|
|
188
|
-
return anthropicResponse;
|
|
189
|
-
} catch (error) {
|
|
190
|
-
console.error('[glmt-transformer] Response transformation error:', error);
|
|
191
|
-
// Return minimal valid response
|
|
192
|
-
return {
|
|
193
|
-
id: 'msg_error_' + Date.now(),
|
|
194
|
-
type: 'message',
|
|
195
|
-
role: 'assistant',
|
|
196
|
-
content: [{
|
|
197
|
-
type: 'text',
|
|
198
|
-
text: '[Transformation Error] ' + error.message
|
|
199
|
-
}],
|
|
200
|
-
stop_reason: 'end_turn',
|
|
201
|
-
usage: { input_tokens: 0, output_tokens: 0 }
|
|
202
|
-
};
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
/**
|
|
207
|
-
* Sanitize messages for OpenAI API compatibility
|
|
208
|
-
* Remove thinking blocks and unsupported content types
|
|
209
|
-
* @param {Array} messages - Messages array
|
|
210
|
-
* @returns {Array} Sanitized messages
|
|
211
|
-
* @private
|
|
212
|
-
*/
|
|
213
|
-
_sanitizeMessages(messages) {
|
|
214
|
-
return messages.map(msg => {
|
|
215
|
-
// If content is a string, return as-is
|
|
216
|
-
if (typeof msg.content === 'string') {
|
|
217
|
-
return msg;
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
// If content is an array, filter out unsupported types
|
|
221
|
-
if (Array.isArray(msg.content)) {
|
|
222
|
-
const sanitizedContent = msg.content
|
|
223
|
-
.filter(block => {
|
|
224
|
-
// Keep only text content for OpenAI
|
|
225
|
-
// Filter out: thinking, tool_use, tool_result, etc.
|
|
226
|
-
return block.type === 'text';
|
|
227
|
-
})
|
|
228
|
-
.map(block => {
|
|
229
|
-
// Return just the text content
|
|
230
|
-
return block;
|
|
231
|
-
});
|
|
232
|
-
|
|
233
|
-
// If we filtered everything out, return empty string
|
|
234
|
-
if (sanitizedContent.length === 0) {
|
|
235
|
-
return {
|
|
236
|
-
role: msg.role,
|
|
237
|
-
content: ''
|
|
238
|
-
};
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
// If only one text block, convert to string
|
|
242
|
-
if (sanitizedContent.length === 1 && sanitizedContent[0].type === 'text') {
|
|
243
|
-
return {
|
|
244
|
-
role: msg.role,
|
|
245
|
-
content: sanitizedContent[0].text
|
|
246
|
-
};
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
// Return array of text blocks
|
|
250
|
-
return {
|
|
251
|
-
role: msg.role,
|
|
252
|
-
content: sanitizedContent
|
|
253
|
-
};
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
// Fallback: return message as-is
|
|
257
|
-
return msg;
|
|
258
|
-
});
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
/**
|
|
262
|
-
* Extract thinking control tags from user messages
|
|
263
|
-
* @param {Array} messages - Messages array
|
|
264
|
-
* @returns {Object} { thinking: boolean, effort: string }
|
|
265
|
-
* @private
|
|
266
|
-
*/
|
|
267
|
-
_extractThinkingControl(messages) {
|
|
268
|
-
const config = {
|
|
269
|
-
thinking: this.defaultThinking,
|
|
270
|
-
effort: 'medium'
|
|
271
|
-
};
|
|
272
|
-
|
|
273
|
-
// Scan user messages for control tags
|
|
274
|
-
for (const msg of messages) {
|
|
275
|
-
if (msg.role !== 'user') continue;
|
|
276
|
-
|
|
277
|
-
const content = msg.content;
|
|
278
|
-
if (typeof content !== 'string') continue;
|
|
279
|
-
|
|
280
|
-
// Check for <Thinking:On|Off>
|
|
281
|
-
const thinkingMatch = content.match(/<Thinking:(On|Off)>/i);
|
|
282
|
-
if (thinkingMatch) {
|
|
283
|
-
config.thinking = thinkingMatch[1].toLowerCase() === 'on';
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
// Check for <Effort:Low|Medium|High>
|
|
287
|
-
const effortMatch = content.match(/<Effort:(Low|Medium|High)>/i);
|
|
288
|
-
if (effortMatch) {
|
|
289
|
-
config.effort = effortMatch[1].toLowerCase();
|
|
290
|
-
}
|
|
291
|
-
}
|
|
292
|
-
|
|
293
|
-
return config;
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
/**
|
|
297
|
-
* Generate thinking signature for Claude Code UI
|
|
298
|
-
* @param {string} thinking - Thinking content
|
|
299
|
-
* @returns {Object} Signature object
|
|
300
|
-
* @private
|
|
301
|
-
*/
|
|
302
|
-
_generateThinkingSignature(thinking) {
|
|
303
|
-
// Generate signature hash
|
|
304
|
-
const hash = crypto.createHash('sha256')
|
|
305
|
-
.update(thinking)
|
|
306
|
-
.digest('hex')
|
|
307
|
-
.substring(0, 16);
|
|
308
|
-
|
|
309
|
-
return {
|
|
310
|
-
type: 'thinking_signature',
|
|
311
|
-
hash: hash,
|
|
312
|
-
length: thinking.length,
|
|
313
|
-
timestamp: Date.now()
|
|
314
|
-
};
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
/**
|
|
318
|
-
* Inject reasoning parameters into OpenAI request
|
|
319
|
-
* @param {Object} openaiRequest - OpenAI request to modify
|
|
320
|
-
* @param {Object} thinkingConfig - Thinking configuration
|
|
321
|
-
* @returns {Object} Modified request
|
|
322
|
-
* @private
|
|
323
|
-
*/
|
|
324
|
-
_injectReasoningParams(openaiRequest, thinkingConfig) {
|
|
325
|
-
// Always enable sampling for temperature/top_p to work
|
|
326
|
-
openaiRequest.do_sample = true;
|
|
327
|
-
|
|
328
|
-
// Add thinking-specific parameters if enabled
|
|
329
|
-
if (thinkingConfig.thinking) {
|
|
330
|
-
// Z.AI may support these parameters (based on research)
|
|
331
|
-
openaiRequest.reasoning = true;
|
|
332
|
-
openaiRequest.reasoning_effort = thinkingConfig.effort;
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
return openaiRequest;
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
/**
|
|
339
|
-
* Map Anthropic model to GLM model
|
|
340
|
-
* @param {string} anthropicModel - Anthropic model name
|
|
341
|
-
* @returns {string} GLM model name
|
|
342
|
-
* @private
|
|
343
|
-
*/
|
|
344
|
-
_mapModel(anthropicModel) {
|
|
345
|
-
// Default to GLM-4.6 (latest and most capable)
|
|
346
|
-
return 'GLM-4.6';
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
/**
|
|
350
|
-
* Get max tokens for model
|
|
351
|
-
* @param {string} model - Model name
|
|
352
|
-
* @returns {number} Max tokens
|
|
353
|
-
* @private
|
|
354
|
-
*/
|
|
355
|
-
_getMaxTokens(model) {
|
|
356
|
-
return this.modelMaxTokens[model] || 128000;
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
/**
|
|
360
|
-
* Map OpenAI stop reason to Anthropic stop reason
|
|
361
|
-
* @param {string} openaiReason - OpenAI finish_reason
|
|
362
|
-
* @returns {string} Anthropic stop_reason
|
|
363
|
-
* @private
|
|
364
|
-
*/
|
|
365
|
-
_mapStopReason(openaiReason) {
|
|
366
|
-
const mapping = {
|
|
367
|
-
'stop': 'end_turn',
|
|
368
|
-
'length': 'max_tokens',
|
|
369
|
-
'tool_calls': 'tool_use',
|
|
370
|
-
'content_filter': 'stop_sequence'
|
|
371
|
-
};
|
|
372
|
-
return mapping[openaiReason] || 'end_turn';
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
/**
|
|
376
|
-
* Write debug log to file
|
|
377
|
-
* @param {string} type - 'request-anthropic', 'request-openai', 'response-openai', 'response-anthropic'
|
|
378
|
-
* @param {object} data - Data to log
|
|
379
|
-
* @private
|
|
380
|
-
*/
|
|
381
|
-
_writeDebugLog(type, data) {
|
|
382
|
-
if (!this.debugLog) return;
|
|
383
|
-
|
|
384
|
-
try {
|
|
385
|
-
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').split('.')[0];
|
|
386
|
-
const filename = `${timestamp}-${type}.json`;
|
|
387
|
-
const filepath = path.join(this.debugLogDir, filename);
|
|
388
|
-
|
|
389
|
-
// Ensure directory exists
|
|
390
|
-
fs.mkdirSync(this.debugLogDir, { recursive: true });
|
|
391
|
-
|
|
392
|
-
// Write file (pretty-printed)
|
|
393
|
-
fs.writeFileSync(filepath, JSON.stringify(data, null, 2) + '\n', 'utf8');
|
|
394
|
-
|
|
395
|
-
if (this.verbose) {
|
|
396
|
-
this.log(`Debug log written: ${filepath}`);
|
|
397
|
-
}
|
|
398
|
-
} catch (error) {
|
|
399
|
-
console.error(`[glmt-transformer] Failed to write debug log: ${error.message}`);
|
|
400
|
-
}
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
/**
|
|
404
|
-
* Validate transformed Anthropic response
|
|
405
|
-
* @param {object} anthropicResponse - Response to validate
|
|
406
|
-
* @returns {object} Validation results
|
|
407
|
-
* @private
|
|
408
|
-
*/
|
|
409
|
-
_validateTransformation(anthropicResponse) {
|
|
410
|
-
const checks = {
|
|
411
|
-
hasContent: Boolean(anthropicResponse.content && anthropicResponse.content.length > 0),
|
|
412
|
-
hasThinking: anthropicResponse.content?.some(block => block.type === 'thinking') || false,
|
|
413
|
-
hasText: anthropicResponse.content?.some(block => block.type === 'text') || false,
|
|
414
|
-
validStructure: anthropicResponse.type === 'message' && anthropicResponse.role === 'assistant',
|
|
415
|
-
hasUsage: Boolean(anthropicResponse.usage)
|
|
416
|
-
};
|
|
417
|
-
|
|
418
|
-
const passed = Object.values(checks).filter(Boolean).length;
|
|
419
|
-
const total = Object.keys(checks).length;
|
|
420
|
-
|
|
421
|
-
return { checks, passed, total, valid: passed === total };
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
/**
|
|
425
|
-
* Log message if verbose
|
|
426
|
-
* @param {string} message - Message to log
|
|
427
|
-
* @private
|
|
428
|
-
*/
|
|
429
|
-
log(message) {
|
|
430
|
-
if (this.verbose) {
|
|
431
|
-
const timestamp = new Date().toTimeString().split(' ')[0]; // HH:MM:SS
|
|
432
|
-
console.error(`[glmt-transformer] [${timestamp}] ${message}`);
|
|
433
|
-
}
|
|
434
|
-
}
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
module.exports = GlmtTransformer;
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|