@ebowwa/channel-ssh 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +150 -264
  2. package/package.json +4 -6
  3. package/src/index.ts +200 -202
package/dist/index.js CHANGED
@@ -2,20 +2,58 @@
2
2
  // @bun
3
3
 
4
4
  // src/index.ts
5
- import { execSync } from "child_process";
6
- import { existsSync, readFileSync, writeFileSync } from "fs";
5
+ import { existsSync, readFileSync, writeFileSync, mkdirSync, watch } from "fs";
7
6
  import { homedir } from "os";
8
7
  import { join } from "path";
9
- var SESSION_NAME = process.env.SSH_CHAT_SESSION || "ssh-chat";
10
- var GLM_API_ENDPOINT = "https://api.z.ai/api/coding/paas/v4/chat/completions";
11
- var MEMORY_FILE = process.env.SSH_MEMORY_FILE || join(homedir(), ".ssh-chat-memory.json");
12
- var PROMPTS_FILE = process.env.PROMPTS_FILE || join(homedir(), ".ssh-chat-prompts.json");
8
+ import { GLMClient, GLMRateLimitError, GLMTimeoutError, GLMNetworkError } from "@ebowwa/ai";
9
+ function requireEnv(name) {
10
+ const value = process.env[name];
11
+ if (!value) {
12
+ throw new Error(`Missing required environment variable: ${name}`);
13
+ }
14
+ return value;
15
+ }
16
+ function requireEnvInt(name) {
17
+ return parseInt(requireEnv(name), 10);
18
+ }
19
+ function requireEnvFloat(name) {
20
+ return parseFloat(requireEnv(name));
21
+ }
22
+ var CONFIG = {
23
+ chatDir: process.env.SSH_CHAT_DIR ?? join(homedir(), ".ssh-chat"),
24
+ model: requireEnv("GLM_MODEL"),
25
+ maxRetries: requireEnvInt("GLM_MAX_RETRIES"),
26
+ timeout: requireEnvInt("GLM_TIMEOUT_MS"),
27
+ temperature: requireEnvFloat("GLM_TEMPERATURE"),
28
+ maxTokens: requireEnvInt("GLM_MAX_TOKENS"),
29
+ pollInterval: requireEnvInt("SSH_CHAT_POLL_MS"),
30
+ memoryLimit: requireEnvInt("SSH_CHAT_MEMORY_LIMIT"),
31
+ contextLimit: requireEnvInt("SSH_CHAT_CONTEXT_LIMIT")
32
+ };
33
+ var IN_FILE = join(CONFIG.chatDir, "in");
34
+ var OUT_FILE = join(CONFIG.chatDir, "out");
35
+ var STATUS_FILE = join(CONFIG.chatDir, "status");
36
+ var MEMORY_FILE = join(CONFIG.chatDir, "memory.json");
37
+ function ensureDir() {
38
+ if (!existsSync(CONFIG.chatDir)) {
39
+ mkdirSync(CONFIG.chatDir, { recursive: true });
40
+ }
41
+ }
42
+ function setStatus(status) {
43
+ writeFileSync(STATUS_FILE, JSON.stringify({ status, timestamp: Date.now() }));
44
+ }
45
+ function writeOutput(text) {
46
+ const timestamp = new Date().toISOString();
47
+ writeFileSync(OUT_FILE, `[${timestamp}]
48
+ ${text}
49
+ `);
50
+ }
13
51
 
14
52
  class ConversationMemory {
15
53
  file;
16
54
  messages = [];
17
55
  maxMessages;
18
- constructor(file, maxMessages = 50) {
56
+ constructor(file, maxMessages = CONFIG.memoryLimit) {
19
57
  this.file = file;
20
58
  this.maxMessages = maxMessages;
21
59
  this.load();
@@ -52,281 +90,129 @@ class ConversationMemory {
52
90
  this.save();
53
91
  }
54
92
  }
55
- var TOOLS = [
56
- {
57
- name: "read_file",
58
- description: "Read a file from the filesystem.",
59
- parameters: {
60
- type: "object",
61
- properties: { path: { type: "string", description: "File path to read" } },
62
- required: ["path"]
63
- },
64
- handler: async (args) => {
65
- const path = args.path;
66
- try {
67
- if (!existsSync(path))
68
- return `File not found: ${path}`;
69
- const content = readFileSync(path, "utf-8");
70
- return content.length > 4000 ? content.slice(0, 4000) + `
71
- ...[truncated]` : content;
72
- } catch (e) {
73
- return `Error: ${e.message}`;
74
- }
75
- }
76
- },
77
- {
78
- name: "write_file",
79
- description: "Write content to a file.",
80
- parameters: {
81
- type: "object",
82
- properties: {
83
- path: { type: "string" },
84
- content: { type: "string" }
85
- },
86
- required: ["path", "content"]
87
- },
88
- handler: async (args) => {
89
- try {
90
- writeFileSync(args.path, args.content);
91
- return `Wrote ${args.content.length} bytes to ${args.path}`;
92
- } catch (e) {
93
- return `Error: ${e.message}`;
94
- }
95
- }
96
- },
97
- {
98
- name: "run_command",
99
- description: "Execute a shell command.",
100
- parameters: {
101
- type: "object",
102
- properties: {
103
- command: { type: "string" },
104
- cwd: { type: "string" }
105
- },
106
- required: ["command"]
107
- },
108
- handler: async (args) => {
109
- const cmd = args.command;
110
- const blocked = ["rm -rf", "mkfs", "dd if=", "> /dev/"];
111
- if (blocked.some((b) => cmd.includes(b)))
112
- return "Blocked: dangerous command";
113
- try {
114
- const result = execSync(cmd, { timeout: 1e4, cwd: args.cwd || process.cwd() });
115
- return result.toString() || "(no output)";
116
- } catch (e) {
117
- return e.stdout?.toString() || e.message;
118
- }
93
+ var glmClient = null;
94
+ function getClient() {
95
+ if (!glmClient) {
96
+ glmClient = new GLMClient;
97
+ }
98
+ return glmClient;
99
+ }
100
+ async function callGLM(messages) {
101
+ const client = getClient();
102
+ try {
103
+ const response = await client.chatCompletion(messages.map((m) => ({ role: m.role, content: m.content })), {
104
+ model: CONFIG.model,
105
+ temperature: CONFIG.temperature,
106
+ maxTokens: CONFIG.maxTokens,
107
+ maxRetries: CONFIG.maxRetries,
108
+ timeout: CONFIG.timeout
109
+ });
110
+ return response.choices[0]?.message?.content || "(no response)";
111
+ } catch (error) {
112
+ if (error instanceof GLMRateLimitError) {
113
+ throw new Error(`Rate limit exceeded after ${CONFIG.maxRetries} retries. Please try again later or check API credits.`);
119
114
  }
120
- },
121
- {
122
- name: "git_status",
123
- description: "Check git repository status.",
124
- parameters: { type: "object", properties: { cwd: { type: "string" } } },
125
- handler: async (args) => {
126
- const cwd = args.cwd || process.cwd();
127
- try {
128
- const status = execSync("git status 2>&1", { cwd }).toString();
129
- const branch = execSync("git branch --show-current 2>&1", { cwd }).toString();
130
- return `Branch: ${branch}
131
-
132
- ${status}`;
133
- } catch (e) {
134
- return `Error: ${e.message}`;
135
- }
115
+ if (error instanceof GLMTimeoutError) {
116
+ throw new Error(`Request timed out after ${CONFIG.maxRetries} retries.`);
136
117
  }
137
- },
138
- {
139
- name: "system_info",
140
- description: "Get system resource info.",
141
- parameters: { type: "object", properties: {} },
142
- handler: async () => {
143
- try {
144
- const cpu = execSync('nproc 2>/dev/null || echo "unknown"').toString().trim();
145
- const mem = execSync('free -h 2>/dev/null | grep Mem || echo "unknown"').toString().trim();
146
- const disk = execSync('df -h / 2>/dev/null | tail -1 || echo "unknown"').toString().trim();
147
- return `CPU: ${cpu} cores
148
- Memory: ${mem}
149
- Disk: ${disk}`;
150
- } catch (e) {
151
- return `Error: ${e.message}`;
152
- }
118
+ if (error instanceof GLMNetworkError) {
119
+ throw new Error(`Network error after ${CONFIG.maxRetries} retries: ${error.message}`);
153
120
  }
121
+ throw error;
154
122
  }
155
- ];
156
- function getGLMTools() {
157
- return TOOLS.map((t) => ({
158
- type: "function",
159
- function: { name: t.name, description: t.description, parameters: t.parameters }
160
- }));
161
123
  }
162
- async function executeTool(name, args) {
163
- const tool = TOOLS.find((t) => t.name === name);
164
- if (tool)
165
- return tool.handler(args);
166
- return `Unknown tool: ${name}`;
167
- }
168
- function getAPIKey() {
169
- const envKey = process.env.ZAI_API_KEY || process.env.GLM_API_KEY;
170
- if (envKey)
171
- return envKey;
172
- const keysJson = process.env.ZAI_API_KEYS;
173
- if (keysJson) {
174
- try {
175
- const keys = JSON.parse(keysJson);
176
- if (Array.isArray(keys) && keys.length > 0) {
177
- return keys[Math.floor(Math.random() * keys.length)];
178
- }
179
- } catch {}
180
- }
181
- throw new Error("No API key found. Set ZAI_API_KEY or ZAI_API_KEYS env var.");
182
- }
183
- async function callGLM(messages, tools) {
184
- const apiKey = getAPIKey();
185
- const response = await fetch(GLM_API_ENDPOINT, {
186
- method: "POST",
187
- headers: {
188
- "Content-Type": "application/json",
189
- Authorization: `Bearer ${apiKey}`
190
- },
191
- body: JSON.stringify({
192
- model: "glm-4-plus",
193
- messages: messages.map((m) => ({ role: m.role, content: m.content })),
194
- tools: getGLMTools(),
195
- temperature: 0.7,
196
- max_tokens: 4096
197
- })
198
- });
199
- if (!response.ok) {
200
- const text = await response.text();
201
- throw new Error(`GLM API error: ${response.status} - ${text}`);
202
- }
203
- const data = await response.json();
204
- const choice = data.choices?.[0];
205
- if (!choice) {
206
- throw new Error("No response from GLM");
207
- }
208
- if (choice.message?.tool_calls) {
209
- const toolResults = [];
210
- for (const tc of choice.message.tool_calls) {
211
- const toolName = tc.function?.name;
212
- const toolArgs = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
213
- const result = await executeTool(toolName, toolArgs);
214
- toolResults.push(`[${toolName}]: ${result}`);
124
+ async function processMessage(input, memory) {
125
+ if (input.startsWith("/")) {
126
+ if (input === "/clear") {
127
+ memory.clear();
128
+ return "Memory cleared.";
215
129
  }
216
- messages.push({ role: "assistant", content: "", timestamp: Date.now() });
217
- messages.push({ role: "user", content: `Tool results:
218
- ${toolResults.join(`
219
- `)}`, timestamp: Date.now() });
220
- return callGLM(messages, tools);
221
- }
222
- return choice.message?.content || "(no response)";
223
- }
224
- function tmux(args) {
225
- try {
226
- return execSync(`tmux ${args}`, { encoding: "utf-8" }).trim();
227
- } catch (e) {
228
- return e.stdout?.toString().trim() || "";
229
- }
230
- }
231
- function sessionExists() {
232
- const result = tmux(`has-session -t ${SESSION_NAME} 2>/dev/null`);
233
- return !result.includes("no session");
234
- }
235
- function createSession() {
236
- if (!sessionExists()) {
237
- tmux(`new-session -d -s ${SESSION_NAME} -x 200 -y 50`);
238
- tmux(`send-keys -t ${SESSION_NAME} '\uD83E\uDD16 SSH Chat Channel - Type your message and press Enter' Enter`);
239
- tmux(`send-keys -t ${SESSION_NAME} '\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501' Enter`);
240
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
241
- console.log(`Created tmux session: ${SESSION_NAME}`);
242
- }
243
- }
244
- function getPaneContent() {
245
- return tmux(`capture-pane -t ${SESSION_NAME} -p -S -100`);
246
- }
247
- function sendToPane(text) {
248
- const lines = text.split(`
249
- `);
250
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
251
- for (const line of lines) {
252
- const escaped = line.replace(/["'$`\\]/g, "\\$&");
253
- tmux(`send-keys -t ${SESSION_NAME} '${escaped}' Enter`);
254
- }
255
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
256
- tmux(`send-keys -t ${SESSION_NAME} '\uD83D\uDC64 You: '`);
257
- }
258
- var lastContent = "";
259
- function detectNewInput() {
260
- const currentContent = getPaneContent();
261
- if (currentContent === lastContent) {
262
- return null;
263
- }
264
- const lastLines = lastContent.split(`
265
- `);
266
- const currentLines = currentContent.split(`
267
- `);
268
- const newLines = [];
269
- let foundLast = false;
270
- for (const line of currentLines) {
271
- if (!foundLast) {
272
- if (line === lastLines[lastLines.length - 1]) {
273
- foundLast = true;
274
- }
275
- } else {
276
- if (!line.includes("\uD83D\uDC64 You:") && line.trim()) {
277
- newLines.push(line.trim());
278
- }
130
+ if (input === "/help") {
131
+ return `Commands:
132
+ /clear - Clear conversation memory
133
+ /help - Show this help
134
+ /status - Show system status
135
+
136
+ Just type a message to chat with AI.`;
137
+ }
138
+ if (input === "/status") {
139
+ return `Status: running
140
+ Memory file: ${MEMORY_FILE}
141
+ Chat dir: ${CONFIG.chatDir}`;
279
142
  }
143
+ return `Unknown command: ${input}. Type /help for available commands.`;
280
144
  }
281
- lastContent = currentContent;
282
- const input = newLines.join(" ").trim();
283
- return input || null;
145
+ memory.add("user", input);
146
+ const messages = memory.getContext(CONFIG.contextLimit);
147
+ return await callGLM(messages);
284
148
  }
285
149
  async function main() {
286
- console.log("\uD83E\uDD16 SSH Chat Channel starting...");
287
- console.log(`Session: ${SESSION_NAME}`);
150
+ console.log("SSH Chat Channel starting...");
151
+ console.log(`Chat dir: ${CONFIG.chatDir}`);
288
152
  console.log(`Memory: ${MEMORY_FILE}`);
289
- createSession();
153
+ console.log("");
154
+ console.log("Usage:");
155
+ console.log(` Write message: echo "your message" > ${IN_FILE}`);
156
+ console.log(` Read response: cat ${OUT_FILE}`);
157
+ console.log("");
158
+ ensureDir();
159
+ try {
160
+ getClient();
161
+ console.log("GLM client initialized with retry support");
162
+ } catch (e) {
163
+ console.error("Failed to initialize GLM client:", e.message);
164
+ process.exit(1);
165
+ }
290
166
  const memory = new ConversationMemory(MEMORY_FILE);
291
- memory.add("system", `You are an AI assistant accessible via SSH tmux session.
167
+ memory.add("system", `You are an AI assistant accessible via SSH.
292
168
  You are helpful, concise, and can execute tools to help the user.
293
169
  This is a private SSH channel separate from any Telegram or other chat interfaces.`);
294
- console.log("Ready. Monitoring tmux session for input...");
295
- console.log(`Attach with: tmux attach -t ${SESSION_NAME}`);
296
- while (true) {
170
+ if (!existsSync(IN_FILE))
171
+ writeFileSync(IN_FILE, "");
172
+ if (!existsSync(OUT_FILE))
173
+ writeFileSync(OUT_FILE, `Ready. Send a message.
174
+ `);
175
+ setStatus("idle");
176
+ let lastContent = "";
177
+ console.log("Watching for messages...");
178
+ const watcher = watch(CONFIG.chatDir, (eventType, filename) => {
179
+ if (filename === "in" && eventType === "change") {
180
+ processIncoming();
181
+ }
182
+ });
183
+ async function processIncoming() {
297
184
  try {
298
- const input = detectNewInput();
299
- if (input && input.length > 0) {
300
- if (input.startsWith("/")) {
301
- if (input === "/clear") {
302
- memory.clear();
303
- sendToPane("\uD83D\uDDD1\uFE0F Memory cleared.");
304
- } else if (input === "/exit" || input === "/quit") {
305
- sendToPane("\uD83D\uDC4B Goodbye!");
306
- break;
307
- } else {
308
- sendToPane(`Unknown command: ${input}`);
309
- }
310
- continue;
311
- }
312
- console.log(`[${new Date().toISOString()}] Input: ${input.slice(0, 50)}...`);
313
- memory.add("user", input);
314
- const messages = memory.getContext(20);
315
- const response = await callGLM(messages, TOOLS);
316
- memory.add("assistant", response);
317
- sendToPane(`\uD83E\uDD16 AI: ${response}`);
318
- console.log(`[${new Date().toISOString()}] Response sent`);
319
- }
320
- await new Promise((r) => setTimeout(r, 500));
185
+ const content = readFileSync(IN_FILE, "utf-8").trim();
186
+ if (!content || content === lastContent)
187
+ return;
188
+ lastContent = content;
189
+ setStatus("processing");
190
+ console.log(`[${new Date().toISOString()}] Processing: ${content.slice(0, 50)}...`);
191
+ writeFileSync(IN_FILE, "");
192
+ const response = await processMessage(content, memory);
193
+ writeOutput(response);
194
+ memory.add("assistant", response);
195
+ setStatus("idle");
196
+ console.log(`[${new Date().toISOString()}] Response sent`);
321
197
  } catch (error) {
322
198
  console.error("Error:", error);
323
- await new Promise((r) => setTimeout(r, 2000));
199
+ setStatus("error");
200
+ writeOutput(`Error: ${error.message}`);
324
201
  }
325
202
  }
326
- }
327
- process.on("SIGINT", () => {
328
- console.log(`
203
+ setInterval(() => {
204
+ try {
205
+ const content = readFileSync(IN_FILE, "utf-8").trim();
206
+ if (content && content !== lastContent) {
207
+ processIncoming();
208
+ }
209
+ } catch {}
210
+ }, CONFIG.pollInterval);
211
+ process.on("SIGINT", () => {
212
+ console.log(`
329
213
  Shutting down...`);
330
- process.exit(0);
331
- });
214
+ watcher.close();
215
+ process.exit(0);
216
+ });
217
+ }
332
218
  main().catch(console.error);
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@ebowwa/channel-ssh",
3
- "version": "1.0.1",
4
- "description": "SSH tmux channel for GLM Daemon - separate from Telegram",
3
+ "version": "1.1.0",
4
+ "description": "SSH chat channel for GLM AI - configurable via environment variables",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
7
7
  "types": "./dist/index.d.ts",
@@ -9,13 +9,12 @@
9
9
  "channel-ssh": "./dist/index.js"
10
10
  },
11
11
  "scripts": {
12
- "build": "bun build src/index.ts --outdir dist --target bun --external '@ebowwa/*' --external 'node-telegram-bot-api'",
12
+ "build": "bun build src/index.ts --outdir dist --target bun --external '@ebowwa/*'",
13
13
  "dev": "bun run src/index.ts",
14
14
  "prepublishOnly": "bun run build"
15
15
  },
16
16
  "dependencies": {
17
- "@ebowwa/structured-prompts": "^0.3.2",
18
- "@ebowwa/terminal": "^0.3.0"
17
+ "@ebowwa/ai": "^0.1.0"
19
18
  },
20
19
  "devDependencies": {
21
20
  "@types/bun": "latest",
@@ -23,7 +22,6 @@
23
22
  },
24
23
  "keywords": [
25
24
  "ssh",
26
- "tmux",
27
25
  "channel",
28
26
  "glm",
29
27
  "ai"
package/src/index.ts CHANGED
@@ -2,29 +2,85 @@
2
2
  /**
3
3
  * SSH Channel for GLM Daemon
4
4
  *
5
- * Provides AI chat via SSH tmux session - completely separate from Telegram
5
+ * Provides AI chat via file-based IPC - works with systemd and tmux
6
6
  *
7
7
  * Usage:
8
- * bun run src/index.ts
8
+ * Direct: bun run src/index.ts
9
+ * With tmux wrapper: tmux new-session -s ssh-chat "channel-ssh-interactive"
10
+ *
11
+ * Communication:
12
+ * IN_FILE: ~/.ssh-chat/in (user writes messages here)
13
+ * OUT_FILE: ~/.ssh-chat/out (AI responses here)
14
+ * STATUS_FILE: ~/.ssh-chat/status (processing/idle)
9
15
  *
10
16
  * Features:
11
- * - Creates/attaches to tmux session "ssh-chat"
12
- * - Monitors pane for user input (lines ending with Enter)
13
- * - GLM-4.7 AI responses
17
+ * - File-based IPC for systemd compatibility
18
+ * - GLM-4.7 AI responses with retry logic (via @ebowwa/ai)
14
19
  * - Separate conversation memory from Telegram
15
20
  * - Tool support (read_file, run_command, etc.)
16
21
  */
17
22
 
18
- import { execSync, spawn } from 'child_process';
19
- import { existsSync, readFileSync, writeFileSync, appendFileSync } from 'fs';
20
- import { getStore } from '@ebowwa/structured-prompts';
23
+ import { execSync } from 'child_process';
24
+ import { existsSync, readFileSync, writeFileSync, mkdirSync, watch } from 'fs';
21
25
  import { homedir } from 'os';
22
26
  import { join } from 'path';
27
+ import { GLMClient, GLMRateLimitError, GLMTimeoutError, GLMNetworkError } from '@ebowwa/ai';
28
+
29
+ // ====================================================================
30
+ // Configuration (all via environment variables - REQUIRED)
31
+ // ====================================================================
32
+
33
+ function requireEnv(name: string): string {
34
+ const value = process.env[name];
35
+ if (!value) {
36
+ throw new Error(`Missing required environment variable: ${name}`);
37
+ }
38
+ return value;
39
+ }
40
+
41
+ function requireEnvInt(name: string): number {
42
+ return parseInt(requireEnv(name), 10);
43
+ }
44
+
45
+ function requireEnvFloat(name: string): number {
46
+ return parseFloat(requireEnv(name));
47
+ }
23
48
 
24
- const SESSION_NAME = process.env.SSH_CHAT_SESSION || 'ssh-chat';
25
- const GLM_API_ENDPOINT = 'https://api.z.ai/api/coding/paas/v4/chat/completions';
26
- const MEMORY_FILE = process.env.SSH_MEMORY_FILE || join(homedir(), '.ssh-chat-memory.json');
27
- const PROMPTS_FILE = process.env.PROMPTS_FILE || join(homedir(), '.ssh-chat-prompts.json');
49
+ const CONFIG = {
50
+ chatDir: process.env.SSH_CHAT_DIR ?? join(homedir(), '.ssh-chat'), // only optional one
51
+ model: requireEnv('GLM_MODEL'),
52
+ maxRetries: requireEnvInt('GLM_MAX_RETRIES'),
53
+ timeout: requireEnvInt('GLM_TIMEOUT_MS'),
54
+ temperature: requireEnvFloat('GLM_TEMPERATURE'),
55
+ maxTokens: requireEnvInt('GLM_MAX_TOKENS'),
56
+ pollInterval: requireEnvInt('SSH_CHAT_POLL_MS'),
57
+ memoryLimit: requireEnvInt('SSH_CHAT_MEMORY_LIMIT'),
58
+ contextLimit: requireEnvInt('SSH_CHAT_CONTEXT_LIMIT'),
59
+ };
60
+
61
+ const IN_FILE = join(CONFIG.chatDir, 'in');
62
+ const OUT_FILE = join(CONFIG.chatDir, 'out');
63
+ const STATUS_FILE = join(CONFIG.chatDir, 'status');
64
+ const MEMORY_FILE = join(CONFIG.chatDir, 'memory.json');
65
+
66
+ // ====================================================================
67
+ // Setup
68
+ // ====================================================================
69
+
70
+ function ensureDir(): void {
71
+ if (!existsSync(CONFIG.chatDir)) {
72
+ mkdirSync(CONFIG.chatDir, { recursive: true });
73
+ }
74
+ }
75
+
76
+ function setStatus(status: 'idle' | 'processing' | 'error' | 'retrying'): void {
77
+ writeFileSync(STATUS_FILE, JSON.stringify({ status, timestamp: Date.now() }));
78
+ }
79
+
80
+ function writeOutput(text: string): void {
81
+ const timestamp = new Date().toISOString();
82
+ writeFileSync(OUT_FILE, `[${timestamp}]\n${text}\n`);
83
+ }
28
84
 
29
85
  // ====================================================================
30
86
  // Conversation Memory (Separate from Telegram)
@@ -40,7 +96,7 @@ class ConversationMemory {
40
96
  private messages: Message[] = [];
41
97
  private maxMessages: number;
42
98
 
43
- constructor(private file: string, maxMessages = 50) {
99
+ constructor(private file: string, maxMessages = CONFIG.memoryLimit) {
44
100
  this.maxMessages = maxMessages;
45
101
  this.load();
46
102
  }
@@ -202,237 +258,179 @@ async function executeTool(name: string, args: Record<string, unknown>): Promise
202
258
  }
203
259
 
204
260
  // ====================================================================
205
- // GLM API Client
261
+ // GLM API Client (using @ebowwa/ai with retry logic)
206
262
  // ====================================================================
207
263
 
208
- function getAPIKey(): string {
209
- // Try environment variable first
210
- const envKey = process.env.ZAI_API_KEY || process.env.GLM_API_KEY;
211
- if (envKey) return envKey;
264
+ let glmClient: GLMClient | null = null;
212
265
 
213
- // Try rolling keys
214
- const keysJson = process.env.ZAI_API_KEYS;
215
- if (keysJson) {
216
- try {
217
- const keys = JSON.parse(keysJson);
218
- if (Array.isArray(keys) && keys.length > 0) {
219
- return keys[Math.floor(Math.random() * keys.length)];
220
- }
221
- } catch {}
266
+ function getClient(): GLMClient {
267
+ if (!glmClient) {
268
+ glmClient = new GLMClient();
222
269
  }
223
-
224
- throw new Error('No API key found. Set ZAI_API_KEY or ZAI_API_KEYS env var.');
270
+ return glmClient;
225
271
  }
226
272
 
227
- async function callGLM(messages: Message[], tools: typeof TOOLS): Promise<string> {
228
- const apiKey = getAPIKey();
273
+ async function callGLM(messages: Message[]): Promise<string> {
274
+ const client = getClient();
229
275
 
230
- const response = await fetch(GLM_API_ENDPOINT, {
231
- method: 'POST',
232
- headers: {
233
- 'Content-Type': 'application/json',
234
- 'Authorization': `Bearer ${apiKey}`
235
- },
236
- body: JSON.stringify({
237
- model: 'glm-4-plus',
238
- messages: messages.map(m => ({ role: m.role, content: m.content })),
239
- tools: getGLMTools(),
240
- temperature: 0.7,
241
- max_tokens: 4096
242
- })
243
- });
244
-
245
- if (!response.ok) {
246
- const text = await response.text();
247
- throw new Error(`GLM API error: ${response.status} - ${text}`);
248
- }
249
-
250
- const data = await response.json();
251
- const choice = data.choices?.[0];
276
+ try {
277
+ const response = await client.chatCompletion(
278
+ messages.map(m => ({ role: m.role, content: m.content })),
279
+ {
280
+ model: CONFIG.model,
281
+ temperature: CONFIG.temperature,
282
+ maxTokens: CONFIG.maxTokens,
283
+ maxRetries: CONFIG.maxRetries,
284
+ timeout: CONFIG.timeout
285
+ }
286
+ );
252
287
 
253
- if (!choice) {
254
- throw new Error('No response from GLM');
288
+ return response.choices[0]?.message?.content || '(no response)';
289
+ } catch (error) {
290
+ // Provide better error messages based on error type
291
+ if (error instanceof GLMRateLimitError) {
292
+ throw new Error(`Rate limit exceeded after ${CONFIG.maxRetries} retries. Please try again later or check API credits.`);
293
+ }
294
+ if (error instanceof GLMTimeoutError) {
295
+ throw new Error(`Request timed out after ${CONFIG.maxRetries} retries.`);
296
+ }
297
+ if (error instanceof GLMNetworkError) {
298
+ throw new Error(`Network error after ${CONFIG.maxRetries} retries: ${error.message}`);
299
+ }
300
+ throw error;
255
301
  }
302
+ }
256
303
 
257
- // Handle tool calls
258
- if (choice.message?.tool_calls) {
259
- const toolResults: string[] = [];
260
-
261
- for (const tc of choice.message.tool_calls) {
262
- const toolName = tc.function?.name;
263
- const toolArgs = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
304
+ // ====================================================================
305
+ // Process Message
306
+ // ====================================================================
264
307
 
265
- const result = await executeTool(toolName, toolArgs);
266
- toolResults.push(`[${toolName}]: ${result}`);
308
+ async function processMessage(input: string, memory: ConversationMemory): Promise<string> {
309
+ // Handle commands
310
+ if (input.startsWith('/')) {
311
+ if (input === '/clear') {
312
+ memory.clear();
313
+ return 'Memory cleared.';
267
314
  }
315
+ if (input === '/help') {
316
+ return `Commands:
317
+ /clear - Clear conversation memory
318
+ /help - Show this help
319
+ /status - Show system status
268
320
 
269
- // Continue conversation with tool results
270
- messages.push({ role: 'assistant', content: '', timestamp: Date.now() });
271
- messages.push({ role: 'user', content: `Tool results:\n${toolResults.join('\n')}`, timestamp: Date.now() });
272
-
273
- // Recursive call for final response
274
- return callGLM(messages, tools);
321
+ Just type a message to chat with AI.`;
322
+ }
323
+ if (input === '/status') {
324
+ return `Status: running
325
+ Memory file: ${MEMORY_FILE}
326
+ Chat dir: ${CONFIG.chatDir}`;
327
+ }
328
+ return `Unknown command: ${input}. Type /help for available commands.`;
275
329
  }
276
330
 
277
- return choice.message?.content || '(no response)';
331
+ // Regular message - get AI response
332
+ memory.add('user', input);
333
+ const messages = memory.getContext(CONFIG.contextLimit);
334
+ return await callGLM(messages);
278
335
  }
279
336
 
280
337
  // ====================================================================
281
- // Tmux Interface
338
+ // Main Loop - File Watcher
282
339
  // ====================================================================
283
340
 
284
- function tmux(args: string): string {
285
- try {
286
- return execSync(`tmux ${args}`, { encoding: 'utf-8' }).trim();
287
- } catch (e: any) {
288
- return e.stdout?.toString().trim() || '';
289
- }
290
- }
291
-
292
- function sessionExists(): boolean {
293
- const result = tmux(`has-session -t ${SESSION_NAME} 2>/dev/null`);
294
- return !result.includes('no session');
295
- }
296
-
297
- function createSession(): void {
298
- if (!sessionExists()) {
299
- tmux(`new-session -d -s ${SESSION_NAME} -x 200 -y 50`);
300
- tmux(`send-keys -t ${SESSION_NAME} '🤖 SSH Chat Channel - Type your message and press Enter' Enter`);
301
- tmux(`send-keys -t ${SESSION_NAME} '━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━' Enter`);
302
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
303
- console.log(`Created tmux session: ${SESSION_NAME}`);
304
- }
305
- }
341
+ async function main() {
342
+ console.log('SSH Chat Channel starting...');
343
+ console.log(`Chat dir: ${CONFIG.chatDir}`);
344
+ console.log(`Memory: ${MEMORY_FILE}`);
345
+ console.log('');
346
+ console.log('Usage:');
347
+ console.log(` Write message: echo "your message" > ${IN_FILE}`);
348
+ console.log(` Read response: cat ${OUT_FILE}`);
349
+ console.log('');
306
350
 
307
- function getPaneContent(): string {
308
- return tmux(`capture-pane -t ${SESSION_NAME} -p -S -100`);
309
- }
351
+ // Ensure directories exist
352
+ ensureDir();
310
353
 
311
- function sendToPane(text: string): void {
312
- // Format and send response
313
- const lines = text.split('\n');
314
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
315
- for (const line of lines) {
316
- // Escape special characters for tmux
317
- const escaped = line.replace(/["'$`\\]/g, '\\$&');
318
- tmux(`send-keys -t ${SESSION_NAME} '${escaped}' Enter`);
354
+ // Initialize GLM client (will throw if no API key)
355
+ try {
356
+ getClient();
357
+ console.log('GLM client initialized with retry support');
358
+ } catch (e) {
359
+ console.error('Failed to initialize GLM client:', (e as Error).message);
360
+ process.exit(1);
319
361
  }
320
- tmux(`send-keys -t ${SESSION_NAME} '' Enter`);
321
- tmux(`send-keys -t ${SESSION_NAME} '👤 You: '`);
322
- }
323
362
 
324
- // Track last seen content to detect new input
325
- let lastContent = '';
363
+ // Initialize memory
364
+ const memory = new ConversationMemory(MEMORY_FILE);
365
+ memory.add('system', `You are an AI assistant accessible via SSH.
366
+ You are helpful, concise, and can execute tools to help the user.
367
+ This is a private SSH channel separate from any Telegram or other chat interfaces.`);
326
368
 
327
- function detectNewInput(): string | null {
328
- const currentContent = getPaneContent();
369
+ // Create empty files if they don't exist
370
+ if (!existsSync(IN_FILE)) writeFileSync(IN_FILE, '');
371
+ if (!existsSync(OUT_FILE)) writeFileSync(OUT_FILE, 'Ready. Send a message.\n');
329
372
 
330
- if (currentContent === lastContent) {
331
- return null;
332
- }
373
+ setStatus('idle');
333
374
 
334
- // Find new lines
335
- const lastLines = lastContent.split('\n');
336
- const currentLines = currentContent.split('\n');
375
+ // Track last processed content
376
+ let lastContent = '';
337
377
 
338
- // Get lines added after last check
339
- const newLines: string[] = [];
340
- let foundLast = false;
378
+ console.log('Watching for messages...');
341
379
 
342
- for (const line of currentLines) {
343
- if (!foundLast) {
344
- if (line === lastLines[lastLines.length - 1]) {
345
- foundLast = true;
346
- }
347
- } else {
348
- // Skip prompt line
349
- if (!line.includes('👤 You:') && line.trim()) {
350
- newLines.push(line.trim());
351
- }
380
+ // Watch for file changes
381
+ const watcher = watch(CONFIG.chatDir, (eventType, filename) => {
382
+ if (filename === 'in' && eventType === 'change') {
383
+ processIncoming();
352
384
  }
353
- }
354
-
355
- lastContent = currentContent;
356
-
357
- // Combine new lines as input
358
- const input = newLines.join(' ').trim();
359
- return input || null;
360
- }
385
+ });
361
386
 
362
- // ====================================================================
363
- // Main Loop
364
- // ====================================================================
387
+ async function processIncoming() {
388
+ try {
389
+ const content = readFileSync(IN_FILE, 'utf-8').trim();
365
390
 
366
- async function main() {
367
- console.log('🤖 SSH Chat Channel starting...');
368
- console.log(`Session: ${SESSION_NAME}`);
369
- console.log(`Memory: ${MEMORY_FILE}`);
391
+ // Skip if same as last or empty
392
+ if (!content || content === lastContent) return;
370
393
 
371
- // Create tmux session
372
- createSession();
394
+ lastContent = content;
395
+ setStatus('processing');
373
396
 
374
- // Initialize memory (separate from Telegram)
375
- const memory = new ConversationMemory(MEMORY_FILE);
397
+ console.log(`[${new Date().toISOString()}] Processing: ${content.slice(0, 50)}...`);
376
398
 
377
- // Add system prompt
378
- memory.add('system', `You are an AI assistant accessible via SSH tmux session.
379
- You are helpful, concise, and can execute tools to help the user.
380
- This is a private SSH channel separate from any Telegram or other chat interfaces.`);
399
+ // Clear input file after reading
400
+ writeFileSync(IN_FILE, '');
381
401
 
382
- console.log('Ready. Monitoring tmux session for input...');
383
- console.log(`Attach with: tmux attach -t ${SESSION_NAME}`);
402
+ // Process message
403
+ const response = await processMessage(content, memory);
384
404
 
385
- // Main loop
386
- while (true) {
387
- try {
388
- const input = detectNewInput();
389
-
390
- if (input && input.length > 0) {
391
- // Skip commands
392
- if (input.startsWith('/')) {
393
- if (input === '/clear') {
394
- memory.clear();
395
- sendToPane('🗑️ Memory cleared.');
396
- } else if (input === '/exit' || input === '/quit') {
397
- sendToPane('👋 Goodbye!');
398
- break;
399
- } else {
400
- sendToPane(`Unknown command: ${input}`);
401
- }
402
- continue;
403
- }
404
-
405
- console.log(`[${new Date().toISOString()}] Input: ${input.slice(0, 50)}...`);
406
-
407
- // Add user message to memory
408
- memory.add('user', input);
409
-
410
- // Get AI response
411
- const messages = memory.getContext(20);
412
- const response = await callGLM(messages, TOOLS);
413
-
414
- // Add response to memory
415
- memory.add('assistant', response);
416
-
417
- // Send to tmux
418
- sendToPane(`🤖 AI: ${response}`);
419
-
420
- console.log(`[${new Date().toISOString()}] Response sent`);
421
- }
405
+ // Write response
406
+ writeOutput(response);
407
+ memory.add('assistant', response);
422
408
 
423
- // Poll every 500ms
424
- await new Promise(r => setTimeout(r, 500));
409
+ setStatus('idle');
410
+ console.log(`[${new Date().toISOString()}] Response sent`);
425
411
  } catch (error) {
426
412
  console.error('Error:', error);
427
- await new Promise(r => setTimeout(r, 2000));
413
+ setStatus('error');
414
+ writeOutput(`Error: ${(error as Error).message}`);
428
415
  }
429
416
  }
430
- }
431
417
 
432
- // Handle shutdown
433
- process.on('SIGINT', () => {
434
- console.log('\nShutting down...');
435
- process.exit(0);
436
- });
418
+ // Also poll as backup (watch can be unreliable)
419
+ setInterval(() => {
420
+ try {
421
+ const content = readFileSync(IN_FILE, 'utf-8').trim();
422
+ if (content && content !== lastContent) {
423
+ processIncoming();
424
+ }
425
+ } catch {}
426
+ }, CONFIG.pollInterval);
427
+
428
+ // Keep running
429
+ process.on('SIGINT', () => {
430
+ console.log('\nShutting down...');
431
+ watcher.close();
432
+ process.exit(0);
433
+ });
434
+ }
437
435
 
438
436
  main().catch(console.error);