@kaitranntt/ccs 3.3.0 → 3.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/glmt-proxy.js DELETED
@@ -1,307 +0,0 @@
1
- #!/usr/bin/env node
2
- 'use strict';
3
-
4
- const http = require('http');
5
- const https = require('https');
6
- const GlmtTransformer = require('./glmt-transformer');
7
-
8
- /**
9
- * GlmtProxy - Embedded HTTP proxy for GLM thinking support
10
- *
11
- * Architecture:
12
- * - Intercepts Claude CLI → Z.AI calls
13
- * - Transforms Anthropic format → OpenAI format
14
- * - Converts reasoning_content → thinking blocks
15
- * - Buffered mode only (streaming not supported)
16
- *
17
- * Lifecycle:
18
- * - Spawned by bin/ccs.js when 'glmt' profile detected
19
- * - Binds to 127.0.0.1:random_port (security + avoid conflicts)
20
- * - Terminates when parent process exits
21
- *
22
- * Debugging:
23
- * - Verbose: Pass --verbose to see request/response logs
24
- * - Debug: Set CCS_DEBUG_LOG=1 to write logs to ~/.ccs/logs/
25
- *
26
- * Usage:
27
- * const proxy = new GlmtProxy({ verbose: true });
28
- * await proxy.start();
29
- */
30
- class GlmtProxy {
31
- constructor(config = {}) {
32
- this.transformer = new GlmtTransformer({ verbose: config.verbose });
33
- this.upstreamUrl = 'https://api.z.ai/api/coding/paas/v4/chat/completions';
34
- this.server = null;
35
- this.port = null;
36
- this.verbose = config.verbose || false;
37
- this.timeout = config.timeout || 120000; // 120s default
38
- }
39
-
40
- /**
41
- * Start HTTP server on random port
42
- * @returns {Promise<number>} Port number
43
- */
44
- async start() {
45
- return new Promise((resolve, reject) => {
46
- this.server = http.createServer((req, res) => {
47
- this.handleRequest(req, res);
48
- });
49
-
50
- // Bind to 127.0.0.1:0 (random port for security + avoid conflicts)
51
- this.server.listen(0, '127.0.0.1', () => {
52
- this.port = this.server.address().port;
53
- // Signal parent process
54
- console.log(`PROXY_READY:${this.port}`);
55
- // One-time info message (always shown)
56
- console.error(`[glmt] Proxy listening on port ${this.port} (buffered mode)`);
57
-
58
- // Debug mode notice
59
- if (this.transformer.debugLog) {
60
- console.error(`[glmt] Debug logging enabled: ${this.transformer.debugLogDir}`);
61
- console.error(`[glmt] WARNING: Debug logs contain full request/response data`);
62
- }
63
-
64
- this.log(`Verbose logging enabled`);
65
- resolve(this.port);
66
- });
67
-
68
- this.server.on('error', (error) => {
69
- console.error('[glmt-proxy] Server error:', error);
70
- reject(error);
71
- });
72
- });
73
- }
74
-
75
- /**
76
- * Handle incoming HTTP request
77
- * @param {http.IncomingMessage} req - Request
78
- * @param {http.ServerResponse} res - Response
79
- */
80
- async handleRequest(req, res) {
81
- const startTime = Date.now();
82
- this.log(`Request: ${req.method} ${req.url}`);
83
-
84
- try {
85
- // Only accept POST requests
86
- if (req.method !== 'POST') {
87
- res.writeHead(405, { 'Content-Type': 'application/json' });
88
- res.end(JSON.stringify({ error: 'Method not allowed' }));
89
- return;
90
- }
91
-
92
- // Read request body
93
- const body = await this._readBody(req);
94
- this.log(`Request body size: ${body.length} bytes`);
95
-
96
- // Parse JSON with error handling
97
- let anthropicRequest;
98
- try {
99
- anthropicRequest = JSON.parse(body);
100
- } catch (jsonError) {
101
- res.writeHead(400, { 'Content-Type': 'application/json' });
102
- res.end(JSON.stringify({
103
- error: {
104
- type: 'invalid_request_error',
105
- message: 'Invalid JSON in request body: ' + jsonError.message
106
- }
107
- }));
108
- return;
109
- }
110
-
111
- // Transform to OpenAI format
112
- const { openaiRequest, thinkingConfig } =
113
- this.transformer.transformRequest(anthropicRequest);
114
-
115
- this.log(`Transformed request, thinking: ${thinkingConfig.thinking}`);
116
-
117
- // Forward to Z.AI
118
- const openaiResponse = await this._forwardToUpstream(
119
- openaiRequest,
120
- req.headers
121
- );
122
-
123
- this.log(`Received response from upstream`);
124
-
125
- // Transform back to Anthropic format
126
- const anthropicResponse = this.transformer.transformResponse(
127
- openaiResponse,
128
- thinkingConfig
129
- );
130
-
131
- // Return to Claude CLI
132
- res.writeHead(200, {
133
- 'Content-Type': 'application/json',
134
- 'Access-Control-Allow-Origin': '*'
135
- });
136
- res.end(JSON.stringify(anthropicResponse));
137
-
138
- const duration = Date.now() - startTime;
139
- this.log(`Request completed in ${duration}ms`);
140
-
141
- } catch (error) {
142
- console.error('[glmt-proxy] Request error:', error.message);
143
- const duration = Date.now() - startTime;
144
- this.log(`Request failed after ${duration}ms: ${error.message}`);
145
-
146
- res.writeHead(500, { 'Content-Type': 'application/json' });
147
- res.end(JSON.stringify({
148
- error: {
149
- type: 'proxy_error',
150
- message: error.message
151
- }
152
- }));
153
- }
154
- }
155
-
156
- /**
157
- * Read request body
158
- * @param {http.IncomingMessage} req - Request
159
- * @returns {Promise<string>} Body content
160
- * @private
161
- */
162
- _readBody(req) {
163
- return new Promise((resolve, reject) => {
164
- const chunks = [];
165
- const maxSize = 10 * 1024 * 1024; // 10MB limit
166
- let totalSize = 0;
167
-
168
- req.on('data', chunk => {
169
- totalSize += chunk.length;
170
- if (totalSize > maxSize) {
171
- reject(new Error('Request body too large (max 10MB)'));
172
- return;
173
- }
174
- chunks.push(chunk);
175
- });
176
-
177
- req.on('end', () => resolve(Buffer.concat(chunks).toString()));
178
- req.on('error', reject);
179
- });
180
- }
181
-
182
- /**
183
- * Forward request to Z.AI upstream
184
- * @param {Object} openaiRequest - OpenAI format request
185
- * @param {Object} originalHeaders - Original request headers
186
- * @returns {Promise<Object>} OpenAI response
187
- * @private
188
- */
189
- _forwardToUpstream(openaiRequest, originalHeaders) {
190
- return new Promise((resolve, reject) => {
191
- const url = new URL(this.upstreamUrl);
192
- const requestBody = JSON.stringify(openaiRequest);
193
-
194
- const options = {
195
- hostname: url.hostname,
196
- port: url.port || 443,
197
- path: '/api/coding/paas/v4/chat/completions', // OpenAI-compatible endpoint
198
- method: 'POST',
199
- headers: {
200
- 'Content-Type': 'application/json',
201
- 'Content-Length': Buffer.byteLength(requestBody),
202
- // Preserve auth header from original request
203
- 'Authorization': originalHeaders['authorization'] || '',
204
- 'User-Agent': 'CCS-GLMT-Proxy/1.0'
205
- }
206
- };
207
-
208
- // Debug logging
209
- this.log(`Forwarding to: ${url.hostname}${options.path}`);
210
-
211
- // Set timeout
212
- const timeoutHandle = setTimeout(() => {
213
- req.destroy();
214
- reject(new Error('Upstream request timeout'));
215
- }, this.timeout);
216
-
217
- const req = https.request(options, (res) => {
218
- clearTimeout(timeoutHandle);
219
-
220
- const chunks = [];
221
- res.on('data', chunk => chunks.push(chunk));
222
-
223
- res.on('end', () => {
224
- try {
225
- const body = Buffer.concat(chunks).toString();
226
- this.log(`Upstream response size: ${body.length} bytes`);
227
-
228
- // Check for non-200 status
229
- if (res.statusCode !== 200) {
230
- reject(new Error(
231
- `Upstream error: ${res.statusCode} ${res.statusMessage}\n${body}`
232
- ));
233
- return;
234
- }
235
-
236
- const response = JSON.parse(body);
237
- resolve(response);
238
- } catch (error) {
239
- reject(new Error('Invalid JSON from upstream: ' + error.message));
240
- }
241
- });
242
- });
243
-
244
- req.on('error', (error) => {
245
- clearTimeout(timeoutHandle);
246
- reject(error);
247
- });
248
-
249
- req.write(requestBody);
250
- req.end();
251
- });
252
- }
253
-
254
- /**
255
- * Stop proxy server
256
- */
257
- stop() {
258
- if (this.server) {
259
- this.log('Stopping proxy server');
260
- this.server.close();
261
- }
262
- }
263
-
264
- /**
265
- * Log message if verbose
266
- * @param {string} message - Message to log
267
- * @private
268
- */
269
- log(message) {
270
- if (this.verbose) {
271
- console.error(`[glmt-proxy] ${message}`);
272
- }
273
- }
274
- }
275
-
276
- // Main entry point
277
- if (require.main === module) {
278
- const args = process.argv.slice(2);
279
- const verbose = args.includes('--verbose') || args.includes('-v');
280
-
281
- const proxy = new GlmtProxy({ verbose });
282
-
283
- proxy.start().catch(error => {
284
- console.error('[glmt-proxy] Failed to start:', error);
285
- process.exit(1);
286
- });
287
-
288
- // Cleanup on signals
289
- process.on('SIGTERM', () => {
290
- proxy.stop();
291
- process.exit(0);
292
- });
293
-
294
- process.on('SIGINT', () => {
295
- proxy.stop();
296
- process.exit(0);
297
- });
298
-
299
- // Keep process alive
300
- process.on('uncaughtException', (error) => {
301
- console.error('[glmt-proxy] Uncaught exception:', error);
302
- proxy.stop();
303
- process.exit(1);
304
- });
305
- }
306
-
307
- module.exports = GlmtProxy;