@ebowwa/channel-ssh 1.1.0 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +190 -22
- package/package.json +2 -2
- package/src/index.ts +93 -25
package/dist/index.js
CHANGED
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
// @bun
|
|
3
3
|
|
|
4
4
|
// src/index.ts
|
|
5
|
+
import { execSync } from "child_process";
|
|
5
6
|
import { existsSync, readFileSync, writeFileSync, mkdirSync, watch } from "fs";
|
|
6
7
|
import { homedir } from "os";
|
|
7
8
|
import { join } from "path";
|
|
8
|
-
import { GLMClient, GLMRateLimitError, GLMTimeoutError, GLMNetworkError } from "@ebowwa/ai";
|
|
9
9
|
function requireEnv(name) {
|
|
10
10
|
const value = process.env[name];
|
|
11
11
|
if (!value) {
|
|
@@ -90,33 +90,201 @@ class ConversationMemory {
|
|
|
90
90
|
this.save();
|
|
91
91
|
}
|
|
92
92
|
}
|
|
93
|
-
var
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
93
|
+
var TOOLS = [
|
|
94
|
+
{
|
|
95
|
+
name: "read_file",
|
|
96
|
+
description: "Read a file from the filesystem.",
|
|
97
|
+
parameters: {
|
|
98
|
+
type: "object",
|
|
99
|
+
properties: { path: { type: "string", description: "File path to read" } },
|
|
100
|
+
required: ["path"]
|
|
101
|
+
},
|
|
102
|
+
handler: async (args) => {
|
|
103
|
+
const path = args.path;
|
|
104
|
+
try {
|
|
105
|
+
if (!existsSync(path))
|
|
106
|
+
return `File not found: ${path}`;
|
|
107
|
+
const content = readFileSync(path, "utf-8");
|
|
108
|
+
return content.length > 4000 ? content.slice(0, 4000) + `
|
|
109
|
+
...[truncated]` : content;
|
|
110
|
+
} catch (e) {
|
|
111
|
+
return `Error: ${e.message}`;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
name: "write_file",
|
|
117
|
+
description: "Write content to a file.",
|
|
118
|
+
parameters: {
|
|
119
|
+
type: "object",
|
|
120
|
+
properties: {
|
|
121
|
+
path: { type: "string" },
|
|
122
|
+
content: { type: "string" }
|
|
123
|
+
},
|
|
124
|
+
required: ["path", "content"]
|
|
125
|
+
},
|
|
126
|
+
handler: async (args) => {
|
|
127
|
+
try {
|
|
128
|
+
writeFileSync(args.path, args.content);
|
|
129
|
+
return `Wrote ${args.content.length} bytes to ${args.path}`;
|
|
130
|
+
} catch (e) {
|
|
131
|
+
return `Error: ${e.message}`;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
name: "run_command",
|
|
137
|
+
description: "Execute a shell command.",
|
|
138
|
+
parameters: {
|
|
139
|
+
type: "object",
|
|
140
|
+
properties: {
|
|
141
|
+
command: { type: "string" },
|
|
142
|
+
cwd: { type: "string" }
|
|
143
|
+
},
|
|
144
|
+
required: ["command"]
|
|
145
|
+
},
|
|
146
|
+
handler: async (args) => {
|
|
147
|
+
const cmd = args.command;
|
|
148
|
+
const blocked = ["rm -rf", "mkfs", "dd if=", "> /dev/"];
|
|
149
|
+
if (blocked.some((b) => cmd.includes(b)))
|
|
150
|
+
return "Blocked: dangerous command";
|
|
151
|
+
try {
|
|
152
|
+
const result = execSync(cmd, { timeout: 1e4, cwd: args.cwd || process.cwd() });
|
|
153
|
+
return result.toString() || "(no output)";
|
|
154
|
+
} catch (e) {
|
|
155
|
+
return e.stdout?.toString() || e.message;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
},
|
|
159
|
+
{
|
|
160
|
+
name: "git_status",
|
|
161
|
+
description: "Check git repository status.",
|
|
162
|
+
parameters: { type: "object", properties: { cwd: { type: "string" } } },
|
|
163
|
+
handler: async (args) => {
|
|
164
|
+
const cwd = args.cwd || process.cwd();
|
|
165
|
+
try {
|
|
166
|
+
const status = execSync("git status 2>&1", { cwd }).toString();
|
|
167
|
+
const branch = execSync("git branch --show-current 2>&1", { cwd }).toString();
|
|
168
|
+
return `Branch: ${branch}
|
|
169
|
+
|
|
170
|
+
${status}`;
|
|
171
|
+
} catch (e) {
|
|
172
|
+
return `Error: ${e.message}`;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
name: "system_info",
|
|
178
|
+
description: "Get system resource info.",
|
|
179
|
+
parameters: { type: "object", properties: {} },
|
|
180
|
+
handler: async () => {
|
|
181
|
+
try {
|
|
182
|
+
const cpu = execSync('nproc 2>/dev/null || echo "unknown"').toString().trim();
|
|
183
|
+
const mem = execSync('free -h 2>/dev/null | grep Mem || echo "unknown"').toString().trim();
|
|
184
|
+
const disk = execSync('df -h / 2>/dev/null | tail -1 || echo "unknown"').toString().trim();
|
|
185
|
+
return `CPU: ${cpu} cores
|
|
186
|
+
Memory: ${mem}
|
|
187
|
+
Disk: ${disk}`;
|
|
188
|
+
} catch (e) {
|
|
189
|
+
return `Error: ${e.message}`;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
];
|
|
194
|
+
function getGLMTools() {
|
|
195
|
+
return TOOLS.map((t) => ({
|
|
196
|
+
type: "function",
|
|
197
|
+
function: { name: t.name, description: t.description, parameters: t.parameters }
|
|
198
|
+
}));
|
|
199
|
+
}
|
|
200
|
+
async function executeTool(name, args) {
|
|
201
|
+
const tool = TOOLS.find((t) => t.name === name);
|
|
202
|
+
if (tool)
|
|
203
|
+
return tool.handler(args);
|
|
204
|
+
return `Unknown tool: ${name}`;
|
|
205
|
+
}
|
|
206
|
+
var GLM_API_ENDPOINT = "https://api.z.ai/api/coding/paas/v4/chat/completions";
|
|
207
|
+
function getAPIKey() {
|
|
208
|
+
const envKey = process.env.ZAI_API_KEY || process.env.Z_AI_API_KEY || process.env.GLM_API_KEY;
|
|
209
|
+
if (envKey)
|
|
210
|
+
return envKey;
|
|
211
|
+
const keysJson = process.env.ZAI_API_KEYS || process.env.Z_AI_API_KEYS;
|
|
212
|
+
if (keysJson) {
|
|
213
|
+
try {
|
|
214
|
+
const keys = JSON.parse(keysJson);
|
|
215
|
+
if (Array.isArray(keys) && keys.length > 0) {
|
|
216
|
+
return keys[Math.floor(Math.random() * keys.length)];
|
|
217
|
+
}
|
|
218
|
+
} catch {}
|
|
97
219
|
}
|
|
98
|
-
|
|
220
|
+
throw new Error("No API key found. Set ZAI_API_KEY env var.");
|
|
221
|
+
}
|
|
222
|
+
function sleep(ms) {
|
|
223
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
99
224
|
}
|
|
100
|
-
|
|
101
|
-
|
|
225
|
+
function calculateBackoff(retryCount) {
|
|
226
|
+
return Math.min(1000 * Math.pow(2, retryCount), 1e4);
|
|
227
|
+
}
|
|
228
|
+
async function callGLM(messages, retryCount = 0) {
|
|
229
|
+
const apiKey = getAPIKey();
|
|
102
230
|
try {
|
|
103
|
-
const
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
231
|
+
const controller = new AbortController;
|
|
232
|
+
const timeoutId = setTimeout(() => controller.abort(), CONFIG.timeout);
|
|
233
|
+
const response = await fetch(GLM_API_ENDPOINT, {
|
|
234
|
+
method: "POST",
|
|
235
|
+
headers: {
|
|
236
|
+
"Content-Type": "application/json",
|
|
237
|
+
Authorization: `Bearer ${apiKey}`
|
|
238
|
+
},
|
|
239
|
+
signal: controller.signal,
|
|
240
|
+
body: JSON.stringify({
|
|
241
|
+
model: CONFIG.model,
|
|
242
|
+
messages: messages.map((m) => ({ role: m.role, content: m.content })),
|
|
243
|
+
tools: getGLMTools(),
|
|
244
|
+
temperature: CONFIG.temperature,
|
|
245
|
+
max_tokens: CONFIG.maxTokens
|
|
246
|
+
})
|
|
109
247
|
});
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
248
|
+
clearTimeout(timeoutId);
|
|
249
|
+
if (!response.ok) {
|
|
250
|
+
const text = await response.text();
|
|
251
|
+
if ((response.status === 429 || response.status >= 500) && retryCount < CONFIG.maxRetries) {
|
|
252
|
+
const backoff = calculateBackoff(retryCount);
|
|
253
|
+
console.log(`GLM API error ${response.status}, retrying in ${backoff}ms (${retryCount + 1}/${CONFIG.maxRetries})`);
|
|
254
|
+
await sleep(backoff);
|
|
255
|
+
return callGLM(messages, retryCount + 1);
|
|
256
|
+
}
|
|
257
|
+
throw new Error(`GLM API error: ${response.status} - ${text}`);
|
|
114
258
|
}
|
|
115
|
-
|
|
116
|
-
|
|
259
|
+
const data = await response.json();
|
|
260
|
+
const choice = data.choices?.[0];
|
|
261
|
+
if (!choice) {
|
|
262
|
+
throw new Error("No response from GLM");
|
|
117
263
|
}
|
|
118
|
-
if (
|
|
119
|
-
|
|
264
|
+
if (choice.message?.tool_calls && choice.message.tool_calls.length > 0) {
|
|
265
|
+
const toolResults = [];
|
|
266
|
+
for (const tc of choice.message.tool_calls) {
|
|
267
|
+
const toolName = tc.function?.name;
|
|
268
|
+
const toolArgs = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
|
|
269
|
+
const result = await executeTool(toolName, toolArgs);
|
|
270
|
+
toolResults.push(`[${toolName}]: ${result}`);
|
|
271
|
+
}
|
|
272
|
+
const updatedMessages = [
|
|
273
|
+
...messages,
|
|
274
|
+
{ role: "assistant", content: choice.message.content || "", timestamp: Date.now() },
|
|
275
|
+
{ role: "user", content: `Tool results:
|
|
276
|
+
${toolResults.join(`
|
|
277
|
+
`)}`, timestamp: Date.now() }
|
|
278
|
+
];
|
|
279
|
+
return callGLM(updatedMessages, 0);
|
|
280
|
+
}
|
|
281
|
+
return choice.message?.content || "(no response)";
|
|
282
|
+
} catch (error) {
|
|
283
|
+
if (error instanceof Error && (error.name === "AbortError" || error.message.includes("fetch")) && retryCount < CONFIG.maxRetries) {
|
|
284
|
+
const backoff = calculateBackoff(retryCount);
|
|
285
|
+
console.log(`Network error, retrying in ${backoff}ms (${retryCount + 1}/${CONFIG.maxRetries})`);
|
|
286
|
+
await sleep(backoff);
|
|
287
|
+
return callGLM(messages, retryCount + 1);
|
|
120
288
|
}
|
|
121
289
|
throw error;
|
|
122
290
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ebowwa/channel-ssh",
|
|
3
|
-
"version": "1.1.
|
|
3
|
+
"version": "1.1.2",
|
|
4
4
|
"description": "SSH chat channel for GLM AI - configurable via environment variables",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
"prepublishOnly": "bun run build"
|
|
15
15
|
},
|
|
16
16
|
"dependencies": {
|
|
17
|
-
"@ebowwa/ai": "^0.
|
|
17
|
+
"@ebowwa/ai": "^0.2.2"
|
|
18
18
|
},
|
|
19
19
|
"devDependencies": {
|
|
20
20
|
"@types/bun": "latest",
|
package/src/index.ts
CHANGED
|
@@ -24,7 +24,6 @@ import { execSync } from 'child_process';
|
|
|
24
24
|
import { existsSync, readFileSync, writeFileSync, mkdirSync, watch } from 'fs';
|
|
25
25
|
import { homedir } from 'os';
|
|
26
26
|
import { join } from 'path';
|
|
27
|
-
import { GLMClient, GLMRateLimitError, GLMTimeoutError, GLMNetworkError } from '@ebowwa/ai';
|
|
28
27
|
|
|
29
28
|
// ====================================================================
|
|
30
29
|
// Configuration (all via environment variables - REQUIRED)
|
|
@@ -258,44 +257,113 @@ async function executeTool(name: string, args: Record<string, unknown>): Promise
|
|
|
258
257
|
}
|
|
259
258
|
|
|
260
259
|
// ====================================================================
|
|
261
|
-
// GLM API Client (
|
|
260
|
+
// GLM API Client (direct fetch with tools + retry logic)
|
|
262
261
|
// ====================================================================
|
|
263
262
|
|
|
264
|
-
|
|
263
|
+
const GLM_API_ENDPOINT = 'https://api.z.ai/api/coding/paas/v4/chat/completions';
|
|
265
264
|
|
|
266
|
-
function
|
|
267
|
-
|
|
268
|
-
|
|
265
|
+
function getAPIKey(): string {
|
|
266
|
+
const envKey = process.env.ZAI_API_KEY || process.env.Z_AI_API_KEY || process.env.GLM_API_KEY;
|
|
267
|
+
if (envKey) return envKey;
|
|
268
|
+
|
|
269
|
+
const keysJson = process.env.ZAI_API_KEYS || process.env.Z_AI_API_KEYS;
|
|
270
|
+
if (keysJson) {
|
|
271
|
+
try {
|
|
272
|
+
const keys = JSON.parse(keysJson);
|
|
273
|
+
if (Array.isArray(keys) && keys.length > 0) {
|
|
274
|
+
return keys[Math.floor(Math.random() * keys.length)];
|
|
275
|
+
}
|
|
276
|
+
} catch {}
|
|
269
277
|
}
|
|
270
|
-
|
|
278
|
+
|
|
279
|
+
throw new Error('No API key found. Set ZAI_API_KEY env var.');
|
|
271
280
|
}
|
|
272
281
|
|
|
273
|
-
|
|
274
|
-
|
|
282
|
+
function sleep(ms: number): Promise<void> {
|
|
283
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
function calculateBackoff(retryCount: number): number {
|
|
287
|
+
return Math.min(1000 * Math.pow(2, retryCount), 10000);
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
async function callGLM(messages: Message[], retryCount = 0): Promise<string> {
|
|
291
|
+
const apiKey = getAPIKey();
|
|
275
292
|
|
|
276
293
|
try {
|
|
277
|
-
const
|
|
278
|
-
|
|
279
|
-
|
|
294
|
+
const controller = new AbortController();
|
|
295
|
+
const timeoutId = setTimeout(() => controller.abort(), CONFIG.timeout);
|
|
296
|
+
|
|
297
|
+
const response = await fetch(GLM_API_ENDPOINT, {
|
|
298
|
+
method: 'POST',
|
|
299
|
+
headers: {
|
|
300
|
+
'Content-Type': 'application/json',
|
|
301
|
+
'Authorization': `Bearer ${apiKey}`
|
|
302
|
+
},
|
|
303
|
+
signal: controller.signal,
|
|
304
|
+
body: JSON.stringify({
|
|
280
305
|
model: CONFIG.model,
|
|
306
|
+
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
307
|
+
tools: getGLMTools(),
|
|
281
308
|
temperature: CONFIG.temperature,
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
309
|
+
max_tokens: CONFIG.maxTokens
|
|
310
|
+
})
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
clearTimeout(timeoutId);
|
|
314
|
+
|
|
315
|
+
if (!response.ok) {
|
|
316
|
+
const text = await response.text();
|
|
317
|
+
|
|
318
|
+
// Retry on 429 (rate limit) or 5xx errors
|
|
319
|
+
if ((response.status === 429 || response.status >= 500) && retryCount < CONFIG.maxRetries) {
|
|
320
|
+
const backoff = calculateBackoff(retryCount);
|
|
321
|
+
console.log(`GLM API error ${response.status}, retrying in ${backoff}ms (${retryCount + 1}/${CONFIG.maxRetries})`);
|
|
322
|
+
await sleep(backoff);
|
|
323
|
+
return callGLM(messages, retryCount + 1);
|
|
285
324
|
}
|
|
286
|
-
);
|
|
287
325
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
326
|
+
throw new Error(`GLM API error: ${response.status} - ${text}`);
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const data = await response.json();
|
|
330
|
+
const choice = data.choices?.[0];
|
|
331
|
+
|
|
332
|
+
if (!choice) {
|
|
333
|
+
throw new Error('No response from GLM');
|
|
293
334
|
}
|
|
294
|
-
|
|
295
|
-
|
|
335
|
+
|
|
336
|
+
// Handle tool calls
|
|
337
|
+
if (choice.message?.tool_calls && choice.message.tool_calls.length > 0) {
|
|
338
|
+
const toolResults: string[] = [];
|
|
339
|
+
|
|
340
|
+
for (const tc of choice.message.tool_calls) {
|
|
341
|
+
const toolName = tc.function?.name;
|
|
342
|
+
const toolArgs = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
|
|
343
|
+
const result = await executeTool(toolName, toolArgs);
|
|
344
|
+
toolResults.push(`[${toolName}]: ${result}`);
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
// Add assistant message with tool calls and user message with results
|
|
348
|
+
const updatedMessages = [
|
|
349
|
+
...messages,
|
|
350
|
+
{ role: 'assistant' as const, content: choice.message.content || '', timestamp: Date.now() },
|
|
351
|
+
{ role: 'user' as const, content: `Tool results:\n${toolResults.join('\n')}`, timestamp: Date.now() }
|
|
352
|
+
];
|
|
353
|
+
|
|
354
|
+
// Continue conversation with tool results
|
|
355
|
+
return callGLM(updatedMessages, 0);
|
|
296
356
|
}
|
|
297
|
-
|
|
298
|
-
|
|
357
|
+
|
|
358
|
+
return choice.message?.content || '(no response)';
|
|
359
|
+
|
|
360
|
+
} catch (error) {
|
|
361
|
+
// Retry on network errors or timeout
|
|
362
|
+
if ((error instanceof Error && (error.name === 'AbortError' || error.message.includes('fetch'))) && retryCount < CONFIG.maxRetries) {
|
|
363
|
+
const backoff = calculateBackoff(retryCount);
|
|
364
|
+
console.log(`Network error, retrying in ${backoff}ms (${retryCount + 1}/${CONFIG.maxRetries})`);
|
|
365
|
+
await sleep(backoff);
|
|
366
|
+
return callGLM(messages, retryCount + 1);
|
|
299
367
|
}
|
|
300
368
|
throw error;
|
|
301
369
|
}
|