@kaitranntt/ccs 3.2.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +131 -128
- package/VERSION +1 -1
- package/bin/ccs.js +120 -4
- package/bin/glmt-proxy.js +307 -0
- package/bin/glmt-transformer.js +437 -0
- package/config/base-glmt.settings.json +17 -0
- package/lib/ccs +2 -1
- package/lib/ccs.ps1 +2 -1
- package/package.json +1 -1
- package/scripts/postinstall.js +105 -1
|
@@ -0,0 +1,437 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
|
|
4
|
+
const crypto = require('crypto');
|
|
5
|
+
const fs = require('fs');
|
|
6
|
+
const path = require('path');
|
|
7
|
+
const os = require('os');
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* GlmtTransformer - Convert between Anthropic and OpenAI formats with thinking support
|
|
11
|
+
*
|
|
12
|
+
* Features:
|
|
13
|
+
* - Request: Anthropic → OpenAI (inject reasoning params)
|
|
14
|
+
* - Response: OpenAI reasoning_content → Anthropic thinking blocks
|
|
15
|
+
* - Debug mode: Log raw data to ~/.ccs/logs/ (CCS_DEBUG_LOG=1)
|
|
16
|
+
* - Verbose mode: Console logging with timestamps
|
|
17
|
+
* - Validation: Self-test transformation results
|
|
18
|
+
*
|
|
19
|
+
* Usage:
|
|
20
|
+
* const transformer = new GlmtTransformer({ verbose: true, debugLog: true });
|
|
21
|
+
* const { openaiRequest, thinkingConfig } = transformer.transformRequest(req);
|
|
22
|
+
* const anthropicResponse = transformer.transformResponse(resp, thinkingConfig);
|
|
23
|
+
*
|
|
24
|
+
* Control Tags (in user prompt):
|
|
25
|
+
* <Thinking:On|Off> - Enable/disable reasoning
|
|
26
|
+
* <Effort:Low|Medium|High> - Control reasoning depth
|
|
27
|
+
*/
|
|
28
|
+
class GlmtTransformer {
|
|
29
|
+
constructor(config = {}) {
|
|
30
|
+
this.defaultThinking = config.defaultThinking ?? true;
|
|
31
|
+
this.verbose = config.verbose || false;
|
|
32
|
+
this.debugLog = config.debugLog ?? process.env.CCS_DEBUG_LOG === '1';
|
|
33
|
+
this.debugLogDir = config.debugLogDir || path.join(os.homedir(), '.ccs', 'logs');
|
|
34
|
+
this.modelMaxTokens = {
|
|
35
|
+
'GLM-4.6': 128000,
|
|
36
|
+
'GLM-4.5': 96000,
|
|
37
|
+
'GLM-4.5-air': 16000
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Transform Anthropic request to OpenAI format
|
|
43
|
+
* @param {Object} anthropicRequest - Anthropic Messages API request
|
|
44
|
+
* @returns {Object} { openaiRequest, thinkingConfig }
|
|
45
|
+
*/
|
|
46
|
+
transformRequest(anthropicRequest) {
|
|
47
|
+
// Log original request
|
|
48
|
+
this._writeDebugLog('request-anthropic', anthropicRequest);
|
|
49
|
+
|
|
50
|
+
try {
|
|
51
|
+
// 1. Extract thinking control from messages
|
|
52
|
+
const thinkingConfig = this._extractThinkingControl(
|
|
53
|
+
anthropicRequest.messages || []
|
|
54
|
+
);
|
|
55
|
+
this.log(`Extracted thinking control: ${JSON.stringify(thinkingConfig)}`);
|
|
56
|
+
|
|
57
|
+
// 2. Map model
|
|
58
|
+
const glmModel = this._mapModel(anthropicRequest.model);
|
|
59
|
+
|
|
60
|
+
// 3. Convert to OpenAI format
|
|
61
|
+
const openaiRequest = {
|
|
62
|
+
model: glmModel,
|
|
63
|
+
messages: this._sanitizeMessages(anthropicRequest.messages || []),
|
|
64
|
+
max_tokens: this._getMaxTokens(glmModel),
|
|
65
|
+
stream: anthropicRequest.stream ?? false
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
// 4. Preserve optional parameters
|
|
69
|
+
if (anthropicRequest.temperature !== undefined) {
|
|
70
|
+
openaiRequest.temperature = anthropicRequest.temperature;
|
|
71
|
+
}
|
|
72
|
+
if (anthropicRequest.top_p !== undefined) {
|
|
73
|
+
openaiRequest.top_p = anthropicRequest.top_p;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// 5. Handle streaming (not yet supported)
|
|
77
|
+
// Silently override to buffered mode
|
|
78
|
+
if (anthropicRequest.stream) {
|
|
79
|
+
openaiRequest.stream = false;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// 6. Inject reasoning parameters
|
|
83
|
+
this._injectReasoningParams(openaiRequest, thinkingConfig);
|
|
84
|
+
|
|
85
|
+
// Log transformed request
|
|
86
|
+
this._writeDebugLog('request-openai', openaiRequest);
|
|
87
|
+
|
|
88
|
+
return { openaiRequest, thinkingConfig };
|
|
89
|
+
} catch (error) {
|
|
90
|
+
console.error('[glmt-transformer] Request transformation error:', error);
|
|
91
|
+
// Return original request with warning
|
|
92
|
+
return {
|
|
93
|
+
openaiRequest: anthropicRequest,
|
|
94
|
+
thinkingConfig: { thinking: false },
|
|
95
|
+
error: error.message
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Transform OpenAI response to Anthropic format
|
|
102
|
+
* @param {Object} openaiResponse - OpenAI Chat Completions response
|
|
103
|
+
* @param {Object} thinkingConfig - Config from request transformation
|
|
104
|
+
* @returns {Object} Anthropic Messages API response
|
|
105
|
+
*/
|
|
106
|
+
transformResponse(openaiResponse, thinkingConfig = {}) {
|
|
107
|
+
// Log original response
|
|
108
|
+
this._writeDebugLog('response-openai', openaiResponse);
|
|
109
|
+
|
|
110
|
+
try {
|
|
111
|
+
const choice = openaiResponse.choices?.[0];
|
|
112
|
+
if (!choice) {
|
|
113
|
+
throw new Error('No choices in OpenAI response');
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const message = choice.message;
|
|
117
|
+
const content = [];
|
|
118
|
+
|
|
119
|
+
// Add thinking block if reasoning_content exists
|
|
120
|
+
if (message.reasoning_content) {
|
|
121
|
+
const length = message.reasoning_content.length;
|
|
122
|
+
const lineCount = message.reasoning_content.split('\n').length;
|
|
123
|
+
const preview = message.reasoning_content
|
|
124
|
+
.substring(0, 100)
|
|
125
|
+
.replace(/\n/g, ' ')
|
|
126
|
+
.trim();
|
|
127
|
+
|
|
128
|
+
this.log(`Detected reasoning_content:`);
|
|
129
|
+
this.log(` Length: ${length} characters`);
|
|
130
|
+
this.log(` Lines: ${lineCount}`);
|
|
131
|
+
this.log(` Preview: ${preview}...`);
|
|
132
|
+
|
|
133
|
+
content.push({
|
|
134
|
+
type: 'thinking',
|
|
135
|
+
thinking: message.reasoning_content,
|
|
136
|
+
signature: this._generateThinkingSignature(message.reasoning_content)
|
|
137
|
+
});
|
|
138
|
+
} else {
|
|
139
|
+
this.log('No reasoning_content in OpenAI response');
|
|
140
|
+
this.log('Note: This is expected if thinking not requested or model cannot reason');
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// Add text content
|
|
144
|
+
if (message.content) {
|
|
145
|
+
content.push({
|
|
146
|
+
type: 'text',
|
|
147
|
+
text: message.content
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Handle tool_calls if present
|
|
152
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
153
|
+
message.tool_calls.forEach(toolCall => {
|
|
154
|
+
content.push({
|
|
155
|
+
type: 'tool_use',
|
|
156
|
+
id: toolCall.id,
|
|
157
|
+
name: toolCall.function.name,
|
|
158
|
+
input: JSON.parse(toolCall.function.arguments || '{}')
|
|
159
|
+
});
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const anthropicResponse = {
|
|
164
|
+
id: openaiResponse.id || 'msg_' + Date.now(),
|
|
165
|
+
type: 'message',
|
|
166
|
+
role: 'assistant',
|
|
167
|
+
content: content,
|
|
168
|
+
model: openaiResponse.model || 'glm-4.6',
|
|
169
|
+
stop_reason: this._mapStopReason(choice.finish_reason),
|
|
170
|
+
usage: openaiResponse.usage || {
|
|
171
|
+
input_tokens: 0,
|
|
172
|
+
output_tokens: 0
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
// Validate transformation in verbose mode
|
|
177
|
+
if (this.verbose) {
|
|
178
|
+
const validation = this._validateTransformation(anthropicResponse);
|
|
179
|
+
this.log(`Transformation validation: ${validation.passed}/${validation.total} checks passed`);
|
|
180
|
+
if (!validation.valid) {
|
|
181
|
+
this.log(`Failed checks: ${JSON.stringify(validation.checks, null, 2)}`);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Log transformed response
|
|
186
|
+
this._writeDebugLog('response-anthropic', anthropicResponse);
|
|
187
|
+
|
|
188
|
+
return anthropicResponse;
|
|
189
|
+
} catch (error) {
|
|
190
|
+
console.error('[glmt-transformer] Response transformation error:', error);
|
|
191
|
+
// Return minimal valid response
|
|
192
|
+
return {
|
|
193
|
+
id: 'msg_error_' + Date.now(),
|
|
194
|
+
type: 'message',
|
|
195
|
+
role: 'assistant',
|
|
196
|
+
content: [{
|
|
197
|
+
type: 'text',
|
|
198
|
+
text: '[Transformation Error] ' + error.message
|
|
199
|
+
}],
|
|
200
|
+
stop_reason: 'end_turn',
|
|
201
|
+
usage: { input_tokens: 0, output_tokens: 0 }
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Sanitize messages for OpenAI API compatibility
|
|
208
|
+
* Remove thinking blocks and unsupported content types
|
|
209
|
+
* @param {Array} messages - Messages array
|
|
210
|
+
* @returns {Array} Sanitized messages
|
|
211
|
+
* @private
|
|
212
|
+
*/
|
|
213
|
+
_sanitizeMessages(messages) {
|
|
214
|
+
return messages.map(msg => {
|
|
215
|
+
// If content is a string, return as-is
|
|
216
|
+
if (typeof msg.content === 'string') {
|
|
217
|
+
return msg;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// If content is an array, filter out unsupported types
|
|
221
|
+
if (Array.isArray(msg.content)) {
|
|
222
|
+
const sanitizedContent = msg.content
|
|
223
|
+
.filter(block => {
|
|
224
|
+
// Keep only text content for OpenAI
|
|
225
|
+
// Filter out: thinking, tool_use, tool_result, etc.
|
|
226
|
+
return block.type === 'text';
|
|
227
|
+
})
|
|
228
|
+
.map(block => {
|
|
229
|
+
// Return just the text content
|
|
230
|
+
return block;
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
// If we filtered everything out, return empty string
|
|
234
|
+
if (sanitizedContent.length === 0) {
|
|
235
|
+
return {
|
|
236
|
+
role: msg.role,
|
|
237
|
+
content: ''
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// If only one text block, convert to string
|
|
242
|
+
if (sanitizedContent.length === 1 && sanitizedContent[0].type === 'text') {
|
|
243
|
+
return {
|
|
244
|
+
role: msg.role,
|
|
245
|
+
content: sanitizedContent[0].text
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Return array of text blocks
|
|
250
|
+
return {
|
|
251
|
+
role: msg.role,
|
|
252
|
+
content: sanitizedContent
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// Fallback: return message as-is
|
|
257
|
+
return msg;
|
|
258
|
+
});
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
/**
|
|
262
|
+
* Extract thinking control tags from user messages
|
|
263
|
+
* @param {Array} messages - Messages array
|
|
264
|
+
* @returns {Object} { thinking: boolean, effort: string }
|
|
265
|
+
* @private
|
|
266
|
+
*/
|
|
267
|
+
_extractThinkingControl(messages) {
|
|
268
|
+
const config = {
|
|
269
|
+
thinking: this.defaultThinking,
|
|
270
|
+
effort: 'medium'
|
|
271
|
+
};
|
|
272
|
+
|
|
273
|
+
// Scan user messages for control tags
|
|
274
|
+
for (const msg of messages) {
|
|
275
|
+
if (msg.role !== 'user') continue;
|
|
276
|
+
|
|
277
|
+
const content = msg.content;
|
|
278
|
+
if (typeof content !== 'string') continue;
|
|
279
|
+
|
|
280
|
+
// Check for <Thinking:On|Off>
|
|
281
|
+
const thinkingMatch = content.match(/<Thinking:(On|Off)>/i);
|
|
282
|
+
if (thinkingMatch) {
|
|
283
|
+
config.thinking = thinkingMatch[1].toLowerCase() === 'on';
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Check for <Effort:Low|Medium|High>
|
|
287
|
+
const effortMatch = content.match(/<Effort:(Low|Medium|High)>/i);
|
|
288
|
+
if (effortMatch) {
|
|
289
|
+
config.effort = effortMatch[1].toLowerCase();
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
return config;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
/**
|
|
297
|
+
* Generate thinking signature for Claude Code UI
|
|
298
|
+
* @param {string} thinking - Thinking content
|
|
299
|
+
* @returns {Object} Signature object
|
|
300
|
+
* @private
|
|
301
|
+
*/
|
|
302
|
+
_generateThinkingSignature(thinking) {
|
|
303
|
+
// Generate signature hash
|
|
304
|
+
const hash = crypto.createHash('sha256')
|
|
305
|
+
.update(thinking)
|
|
306
|
+
.digest('hex')
|
|
307
|
+
.substring(0, 16);
|
|
308
|
+
|
|
309
|
+
return {
|
|
310
|
+
type: 'thinking_signature',
|
|
311
|
+
hash: hash,
|
|
312
|
+
length: thinking.length,
|
|
313
|
+
timestamp: Date.now()
|
|
314
|
+
};
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Inject reasoning parameters into OpenAI request
|
|
319
|
+
* @param {Object} openaiRequest - OpenAI request to modify
|
|
320
|
+
* @param {Object} thinkingConfig - Thinking configuration
|
|
321
|
+
* @returns {Object} Modified request
|
|
322
|
+
* @private
|
|
323
|
+
*/
|
|
324
|
+
_injectReasoningParams(openaiRequest, thinkingConfig) {
|
|
325
|
+
// Always enable sampling for temperature/top_p to work
|
|
326
|
+
openaiRequest.do_sample = true;
|
|
327
|
+
|
|
328
|
+
// Add thinking-specific parameters if enabled
|
|
329
|
+
if (thinkingConfig.thinking) {
|
|
330
|
+
// Z.AI may support these parameters (based on research)
|
|
331
|
+
openaiRequest.reasoning = true;
|
|
332
|
+
openaiRequest.reasoning_effort = thinkingConfig.effort;
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
return openaiRequest;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Map Anthropic model to GLM model
|
|
340
|
+
* @param {string} anthropicModel - Anthropic model name
|
|
341
|
+
* @returns {string} GLM model name
|
|
342
|
+
* @private
|
|
343
|
+
*/
|
|
344
|
+
_mapModel(anthropicModel) {
|
|
345
|
+
// Default to GLM-4.6 (latest and most capable)
|
|
346
|
+
return 'GLM-4.6';
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
/**
|
|
350
|
+
* Get max tokens for model
|
|
351
|
+
* @param {string} model - Model name
|
|
352
|
+
* @returns {number} Max tokens
|
|
353
|
+
* @private
|
|
354
|
+
*/
|
|
355
|
+
_getMaxTokens(model) {
|
|
356
|
+
return this.modelMaxTokens[model] || 128000;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
/**
|
|
360
|
+
* Map OpenAI stop reason to Anthropic stop reason
|
|
361
|
+
* @param {string} openaiReason - OpenAI finish_reason
|
|
362
|
+
* @returns {string} Anthropic stop_reason
|
|
363
|
+
* @private
|
|
364
|
+
*/
|
|
365
|
+
_mapStopReason(openaiReason) {
|
|
366
|
+
const mapping = {
|
|
367
|
+
'stop': 'end_turn',
|
|
368
|
+
'length': 'max_tokens',
|
|
369
|
+
'tool_calls': 'tool_use',
|
|
370
|
+
'content_filter': 'stop_sequence'
|
|
371
|
+
};
|
|
372
|
+
return mapping[openaiReason] || 'end_turn';
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
/**
|
|
376
|
+
* Write debug log to file
|
|
377
|
+
* @param {string} type - 'request-anthropic', 'request-openai', 'response-openai', 'response-anthropic'
|
|
378
|
+
* @param {object} data - Data to log
|
|
379
|
+
* @private
|
|
380
|
+
*/
|
|
381
|
+
_writeDebugLog(type, data) {
|
|
382
|
+
if (!this.debugLog) return;
|
|
383
|
+
|
|
384
|
+
try {
|
|
385
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').split('.')[0];
|
|
386
|
+
const filename = `${timestamp}-${type}.json`;
|
|
387
|
+
const filepath = path.join(this.debugLogDir, filename);
|
|
388
|
+
|
|
389
|
+
// Ensure directory exists
|
|
390
|
+
fs.mkdirSync(this.debugLogDir, { recursive: true });
|
|
391
|
+
|
|
392
|
+
// Write file (pretty-printed)
|
|
393
|
+
fs.writeFileSync(filepath, JSON.stringify(data, null, 2) + '\n', 'utf8');
|
|
394
|
+
|
|
395
|
+
if (this.verbose) {
|
|
396
|
+
this.log(`Debug log written: ${filepath}`);
|
|
397
|
+
}
|
|
398
|
+
} catch (error) {
|
|
399
|
+
console.error(`[glmt-transformer] Failed to write debug log: ${error.message}`);
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
/**
|
|
404
|
+
* Validate transformed Anthropic response
|
|
405
|
+
* @param {object} anthropicResponse - Response to validate
|
|
406
|
+
* @returns {object} Validation results
|
|
407
|
+
* @private
|
|
408
|
+
*/
|
|
409
|
+
_validateTransformation(anthropicResponse) {
|
|
410
|
+
const checks = {
|
|
411
|
+
hasContent: Boolean(anthropicResponse.content && anthropicResponse.content.length > 0),
|
|
412
|
+
hasThinking: anthropicResponse.content?.some(block => block.type === 'thinking') || false,
|
|
413
|
+
hasText: anthropicResponse.content?.some(block => block.type === 'text') || false,
|
|
414
|
+
validStructure: anthropicResponse.type === 'message' && anthropicResponse.role === 'assistant',
|
|
415
|
+
hasUsage: Boolean(anthropicResponse.usage)
|
|
416
|
+
};
|
|
417
|
+
|
|
418
|
+
const passed = Object.values(checks).filter(Boolean).length;
|
|
419
|
+
const total = Object.keys(checks).length;
|
|
420
|
+
|
|
421
|
+
return { checks, passed, total, valid: passed === total };
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
/**
|
|
425
|
+
* Log message if verbose
|
|
426
|
+
* @param {string} message - Message to log
|
|
427
|
+
* @private
|
|
428
|
+
*/
|
|
429
|
+
log(message) {
|
|
430
|
+
if (this.verbose) {
|
|
431
|
+
const timestamp = new Date().toTimeString().split(' ')[0]; // HH:MM:SS
|
|
432
|
+
console.error(`[glmt-transformer] [${timestamp}] ${message}`);
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
module.exports = GlmtTransformer;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
{
|
|
2
|
+
"env": {
|
|
3
|
+
"ANTHROPIC_BASE_URL": "https://api.z.ai/api/coding/paas/v4/chat/completions",
|
|
4
|
+
"ANTHROPIC_AUTH_TOKEN": "YOUR_GLM_API_KEY_HERE",
|
|
5
|
+
"ANTHROPIC_MODEL": "glm-4.6",
|
|
6
|
+
"ANTHROPIC_DEFAULT_OPUS_MODEL": "glm-4.6",
|
|
7
|
+
"ANTHROPIC_DEFAULT_SONNET_MODEL": "glm-4.6",
|
|
8
|
+
"ANTHROPIC_DEFAULT_HAIKU_MODEL": "glm-4.6",
|
|
9
|
+
"ANTHROPIC_TEMPERATURE": "0.2",
|
|
10
|
+
"ANTHROPIC_MAX_TOKENS": "65536",
|
|
11
|
+
"MAX_THINKING_TOKENS": "32768",
|
|
12
|
+
"ENABLE_STREAMING": "true",
|
|
13
|
+
"ANTHROPIC_SAFE_MODE": "false",
|
|
14
|
+
"API_TIMEOUT_MS": "3000000"
|
|
15
|
+
},
|
|
16
|
+
"alwaysThinkingEnabled": true
|
|
17
|
+
}
|
package/lib/ccs
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
set -euo pipefail
|
|
3
3
|
|
|
4
4
|
# Version (updated by scripts/bump-version.sh)
|
|
5
|
-
CCS_VERSION="3.
|
|
5
|
+
CCS_VERSION="3.3.0"
|
|
6
6
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
7
7
|
readonly CONFIG_FILE="${CCS_CONFIG:-$HOME/.ccs/config.json}"
|
|
8
8
|
readonly PROFILES_JSON="$HOME/.ccs/profiles.json"
|
|
@@ -51,6 +51,7 @@ show_help() {
|
|
|
51
51
|
echo -e "${CYAN}Model Switching:${RESET}"
|
|
52
52
|
echo -e " ${YELLOW}ccs${RESET} Use default Claude account"
|
|
53
53
|
echo -e " ${YELLOW}ccs glm${RESET} Switch to GLM 4.6 model"
|
|
54
|
+
echo -e " ${YELLOW}ccs glmt${RESET} Switch to GLM with thinking mode"
|
|
54
55
|
echo -e " ${YELLOW}ccs kimi${RESET} Switch to Kimi for Coding"
|
|
55
56
|
echo -e " ${YELLOW}ccs glm${RESET} \"debug this code\" Use GLM and run command"
|
|
56
57
|
echo ""
|
package/lib/ccs.ps1
CHANGED
|
@@ -12,7 +12,7 @@ param(
|
|
|
12
12
|
$ErrorActionPreference = "Stop"
|
|
13
13
|
|
|
14
14
|
# Version (updated by scripts/bump-version.sh)
|
|
15
|
-
$CcsVersion = "3.
|
|
15
|
+
$CcsVersion = "3.3.0"
|
|
16
16
|
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
|
17
17
|
$ConfigFile = if ($env:CCS_CONFIG) { $env:CCS_CONFIG } else { "$env:USERPROFILE\.ccs\config.json" }
|
|
18
18
|
$ProfilesJson = "$env:USERPROFILE\.ccs\profiles.json"
|
|
@@ -107,6 +107,7 @@ function Show-Help {
|
|
|
107
107
|
Write-ColorLine "Model Switching:" "Cyan"
|
|
108
108
|
Write-ColorLine " ccs Use default Claude account" "Yellow"
|
|
109
109
|
Write-ColorLine " ccs glm Switch to GLM 4.6 model" "Yellow"
|
|
110
|
+
Write-ColorLine " ccs glmt Switch to GLM with thinking mode" "Yellow"
|
|
110
111
|
Write-ColorLine " ccs kimi Switch to Kimi for Coding" "Yellow"
|
|
111
112
|
Write-ColorLine " ccs glm 'debug this code' Use GLM and run command" "Yellow"
|
|
112
113
|
Write-Host ""
|
package/package.json
CHANGED
package/scripts/postinstall.js
CHANGED
|
@@ -33,6 +33,7 @@ function validateConfiguration() {
|
|
|
33
33
|
const requiredFiles = [
|
|
34
34
|
{ path: path.join(ccsDir, 'config.json'), name: 'config.json' },
|
|
35
35
|
{ path: path.join(ccsDir, 'glm.settings.json'), name: 'glm.settings.json' },
|
|
36
|
+
{ path: path.join(ccsDir, 'glmt.settings.json'), name: 'glmt.settings.json' },
|
|
36
37
|
{ path: path.join(ccsDir, 'kimi.settings.json'), name: 'kimi.settings.json' }
|
|
37
38
|
];
|
|
38
39
|
|
|
@@ -108,6 +109,7 @@ function createConfigFiles() {
|
|
|
108
109
|
const config = {
|
|
109
110
|
profiles: {
|
|
110
111
|
glm: '~/.ccs/glm.settings.json',
|
|
112
|
+
glmt: '~/.ccs/glmt.settings.json',
|
|
111
113
|
kimi: '~/.ccs/kimi.settings.json',
|
|
112
114
|
default: '~/.claude/settings.json'
|
|
113
115
|
}
|
|
@@ -120,7 +122,21 @@ function createConfigFiles() {
|
|
|
120
122
|
|
|
121
123
|
console.log('[OK] Created config: ~/.ccs/config.json');
|
|
122
124
|
} else {
|
|
123
|
-
|
|
125
|
+
// Update existing config with glmt if missing (migration for v3.x users)
|
|
126
|
+
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
127
|
+
// Ensure profiles object exists
|
|
128
|
+
if (!config.profiles) {
|
|
129
|
+
config.profiles = {};
|
|
130
|
+
}
|
|
131
|
+
if (!config.profiles.glmt) {
|
|
132
|
+
config.profiles.glmt = '~/.ccs/glmt.settings.json';
|
|
133
|
+
const tmpPath = `${configPath}.tmp`;
|
|
134
|
+
fs.writeFileSync(tmpPath, JSON.stringify(config, null, 2) + '\n', 'utf8');
|
|
135
|
+
fs.renameSync(tmpPath, configPath);
|
|
136
|
+
console.log('[OK] Updated config with GLMT profile');
|
|
137
|
+
} else {
|
|
138
|
+
console.log('[OK] Config exists: ~/.ccs/config.json (preserved)');
|
|
139
|
+
}
|
|
124
140
|
}
|
|
125
141
|
|
|
126
142
|
// Create glm.settings.json if missing
|
|
@@ -152,6 +168,94 @@ function createConfigFiles() {
|
|
|
152
168
|
console.log('[OK] GLM profile exists: ~/.ccs/glm.settings.json (preserved)');
|
|
153
169
|
}
|
|
154
170
|
|
|
171
|
+
// Create glmt.settings.json if missing
|
|
172
|
+
const glmtSettingsPath = path.join(ccsDir, 'glmt.settings.json');
|
|
173
|
+
if (!fs.existsSync(glmtSettingsPath)) {
|
|
174
|
+
const glmtSettings = {
|
|
175
|
+
env: {
|
|
176
|
+
ANTHROPIC_BASE_URL: 'https://api.z.ai/api/coding/paas/v4/chat/completions',
|
|
177
|
+
ANTHROPIC_AUTH_TOKEN: 'YOUR_GLM_API_KEY_HERE',
|
|
178
|
+
ANTHROPIC_MODEL: 'glm-4.6',
|
|
179
|
+
ANTHROPIC_DEFAULT_OPUS_MODEL: 'glm-4.6',
|
|
180
|
+
ANTHROPIC_DEFAULT_SONNET_MODEL: 'glm-4.6',
|
|
181
|
+
ANTHROPIC_DEFAULT_HAIKU_MODEL: 'glm-4.6',
|
|
182
|
+
ANTHROPIC_TEMPERATURE: '0.2',
|
|
183
|
+
ANTHROPIC_MAX_TOKENS: '65536',
|
|
184
|
+
MAX_THINKING_TOKENS: '32768',
|
|
185
|
+
ENABLE_STREAMING: 'true',
|
|
186
|
+
ANTHROPIC_SAFE_MODE: 'false',
|
|
187
|
+
API_TIMEOUT_MS: '3000000'
|
|
188
|
+
},
|
|
189
|
+
alwaysThinkingEnabled: true
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
// Atomic write
|
|
193
|
+
const tmpPath = `${glmtSettingsPath}.tmp`;
|
|
194
|
+
fs.writeFileSync(tmpPath, JSON.stringify(glmtSettings, null, 2) + '\n', 'utf8');
|
|
195
|
+
fs.renameSync(tmpPath, glmtSettingsPath);
|
|
196
|
+
|
|
197
|
+
console.log('[OK] Created GLMT profile: ~/.ccs/glmt.settings.json');
|
|
198
|
+
console.log('');
|
|
199
|
+
console.log(' [!] Configure GLMT API key:');
|
|
200
|
+
console.log(' 1. Get key from: https://api.z.ai');
|
|
201
|
+
console.log(' 2. Edit: ~/.ccs/glmt.settings.json');
|
|
202
|
+
console.log(' 3. Replace: YOUR_GLM_API_KEY_HERE');
|
|
203
|
+
console.log(' Note: GLMT enables GLM thinking mode (reasoning)');
|
|
204
|
+
console.log(' Defaults: Temperature 0.2, thinking enabled, 50min timeout');
|
|
205
|
+
} else {
|
|
206
|
+
console.log('[OK] GLMT profile exists: ~/.ccs/glmt.settings.json (preserved)');
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// Migrate existing GLMT configs to include new defaults (v3.3.0)
|
|
210
|
+
if (fs.existsSync(glmtSettingsPath)) {
|
|
211
|
+
try {
|
|
212
|
+
const existing = JSON.parse(fs.readFileSync(glmtSettingsPath, 'utf8'));
|
|
213
|
+
let updated = false;
|
|
214
|
+
|
|
215
|
+
// Ensure env object exists
|
|
216
|
+
if (!existing.env) {
|
|
217
|
+
existing.env = {};
|
|
218
|
+
updated = true;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Add missing env vars (preserve existing values)
|
|
222
|
+
const envDefaults = {
|
|
223
|
+
ANTHROPIC_TEMPERATURE: '0.2',
|
|
224
|
+
ANTHROPIC_MAX_TOKENS: '65536',
|
|
225
|
+
MAX_THINKING_TOKENS: '32768',
|
|
226
|
+
ENABLE_STREAMING: 'true',
|
|
227
|
+
ANTHROPIC_SAFE_MODE: 'false',
|
|
228
|
+
API_TIMEOUT_MS: '3000000'
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
for (const [key, value] of Object.entries(envDefaults)) {
|
|
232
|
+
if (existing.env[key] === undefined) {
|
|
233
|
+
existing.env[key] = value;
|
|
234
|
+
updated = true;
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Add alwaysThinkingEnabled if missing
|
|
239
|
+
if (existing.alwaysThinkingEnabled === undefined) {
|
|
240
|
+
existing.alwaysThinkingEnabled = true;
|
|
241
|
+
updated = true;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Write back if updated
|
|
245
|
+
if (updated) {
|
|
246
|
+
const tmpPath = `${glmtSettingsPath}.tmp`;
|
|
247
|
+
fs.writeFileSync(tmpPath, JSON.stringify(existing, null, 2) + '\n', 'utf8');
|
|
248
|
+
fs.renameSync(tmpPath, glmtSettingsPath);
|
|
249
|
+
console.log('[OK] Migrated GLMT config with new defaults (v3.3.0)');
|
|
250
|
+
console.log(' Added: temperature, max_tokens, thinking settings, alwaysThinkingEnabled');
|
|
251
|
+
}
|
|
252
|
+
} catch (err) {
|
|
253
|
+
console.warn('[!] GLMT config migration failed:', err.message);
|
|
254
|
+
console.warn(' Existing config preserved, may be missing new defaults');
|
|
255
|
+
console.warn(' You can manually add fields or delete file to regenerate');
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
|
|
155
259
|
// Create kimi.settings.json if missing
|
|
156
260
|
const kimiSettingsPath = path.join(ccsDir, 'kimi.settings.json');
|
|
157
261
|
if (!fs.existsSync(kimiSettingsPath)) {
|