sensorium-mcp 2.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +103 -0
- package/dist/dispatcher.d.ts +82 -0
- package/dist/dispatcher.d.ts.map +1 -0
- package/dist/dispatcher.js +464 -0
- package/dist/dispatcher.js.map +1 -0
- package/dist/index.d.ts +29 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1186 -0
- package/dist/index.js.map +1 -0
- package/dist/openai.d.ts +71 -0
- package/dist/openai.d.ts.map +1 -0
- package/dist/openai.js +221 -0
- package/dist/openai.js.map +1 -0
- package/dist/scheduler.d.ts +58 -0
- package/dist/scheduler.d.ts.map +1 -0
- package/dist/scheduler.js +191 -0
- package/dist/scheduler.js.map +1 -0
- package/dist/telegram.d.ts +119 -0
- package/dist/telegram.d.ts.map +1 -0
- package/dist/telegram.js +249 -0
- package/dist/telegram.js.map +1 -0
- package/dist/utils.d.ts +38 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +68 -0
- package/dist/utils.js.map +1 -0
- package/package.json +45 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Andriy
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# sensorium-mcp
|
|
2
|
+
|
|
3
|
+
An MCP (Model Context Protocol) server for remote control of AI assistants via Telegram.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
This server exposes four tools that allow an AI assistant (e.g. GitHub Copilot) to be operated remotely through a Telegram bot:
|
|
8
|
+
|
|
9
|
+
| Tool | Description |
|
|
10
|
+
|------|-------------|
|
|
11
|
+
| `start_session` | Begin or resume a sensorium session. Creates a dedicated Telegram topic thread (or resumes an existing one by name or thread ID). |
|
|
12
|
+
| `remote_copilot_wait_for_instructions` | Blocks until a new message (text, photo, document, or voice) arrives in the active topic or the timeout elapses. |
|
|
13
|
+
| `report_progress` | Sends a progress update back to the operator using standard Markdown (auto-converted to Telegram MarkdownV2). |
|
|
14
|
+
| `send_file` | Sends a file or image to the operator via Telegram (base64-encoded). Images are sent as photos; everything else as documents. |
|
|
15
|
+
| `send_voice` | Sends a voice message to the operator via Telegram. Text is converted to speech using OpenAI TTS (max 4096 chars). |
|
|
16
|
+
|
|
17
|
+
## Features
|
|
18
|
+
|
|
19
|
+
- **Concurrent sessions** — Multiple VS Code windows can run independent sessions simultaneously. A shared file-based dispatcher ensures only one process polls Telegram (`getUpdates`), while each session reads from its own per-thread message file. No 409 conflicts, no lost updates.
|
|
20
|
+
- **Named session persistence** — Sessions are mapped by name to Telegram thread IDs in `~/.remote-copilot-mcp-sessions.json`. Calling `start_session({ name: "Fix auth bug" })` always resumes the same thread, even across VS Code restarts.
|
|
21
|
+
- **Image & document support** — Send photos or documents to the agent from Telegram; the agent receives them as native MCP image content blocks or base64 text. The agent can also send files back via the `send_file` tool.
|
|
22
|
+
- **Voice message support** — Send voice messages from Telegram; they are automatically transcribed using OpenAI Whisper and delivered as text to the agent. The agent can also send voice responses back via OpenAI TTS. Requires `OPENAI_API_KEY`.
|
|
23
|
+
- **Automatic Markdown conversion** — Standard Markdown in `report_progress` is automatically converted to Telegram MarkdownV2, including code blocks, tables, blockquotes, and special characters.
|
|
24
|
+
- **Keep-alive pings** — Periodic heartbeat messages to Telegram so the operator knows the agent is still alive during long idle periods.
|
|
25
|
+
|
|
26
|
+
## Prerequisites
|
|
27
|
+
|
|
28
|
+
- Node.js 18 or later (uses native `fetch`)
|
|
29
|
+
- A [Telegram bot token](https://core.telegram.org/bots#botfather) (`TELEGRAM_TOKEN`)
|
|
30
|
+
- A Telegram **forum supergroup** where the bot is an admin with the **Manage Topics** right
|
|
31
|
+
- In Telegram: create a group → *Edit → Topics → Enable*
|
|
32
|
+
- Add your bot as admin and grant it *Manage Topics*
|
|
33
|
+
- Copy the group's chat ID (e.g. `-1001234567890`) as `TELEGRAM_CHAT_ID`
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
npm install
|
|
39
|
+
npm run build
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Configuration
|
|
43
|
+
|
|
44
|
+
Set the following environment variables:
|
|
45
|
+
|
|
46
|
+
| Variable | Required | Default | Description |
|
|
47
|
+
|----------|----------|---------|-------------|
|
|
48
|
+
| `TELEGRAM_TOKEN` | ✅ | — | Telegram Bot API token from @BotFather |
|
|
49
|
+
| `TELEGRAM_CHAT_ID` | ✅ | — | Chat ID of the forum supergroup (e.g. `-1001234567890`). The bot must be admin with Manage Topics right. |
|
|
50
|
+
| `WAIT_TIMEOUT_MINUTES` | ❌ | `120` | Minutes to wait for a message before timing out |
|
|
51
|
+
| `OPENAI_API_KEY` | ❌ | — | OpenAI API key for voice message transcription (Whisper) and TTS (`send_voice`). Without it, voice messages show a placeholder instead of a transcript. |
|
|
52
|
+
| `VOICE_ANALYSIS_URL` | ❌ | — | URL of the voice emotion analysis microservice (e.g. `https://voice-analysis.example.com`). When set, voice messages are analyzed for emotion and the result is included with the transcript. See `voice-analysis/` for the deployable service. |
|
|
53
|
+
|
|
54
|
+
## Usage
|
|
55
|
+
|
|
56
|
+
### Simply use this prompt
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
Start remote copilot session
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Configure in MCP client (e.g. VS Code Copilot)
|
|
63
|
+
|
|
64
|
+
Add to your MCP configuration:
|
|
65
|
+
|
|
66
|
+
```json
|
|
67
|
+
{
|
|
68
|
+
"mcpServers": {
|
|
69
|
+
"sensorium-mcp": {
|
|
70
|
+
"command": "npx",
|
|
71
|
+
"args": [
|
|
72
|
+
"sensorium-mcp@latest"
|
|
73
|
+
],
|
|
74
|
+
"env": {
|
|
75
|
+
"TELEGRAM_TOKEN": "${input:TELEGRAM_TOKEN}",
|
|
76
|
+
"TELEGRAM_CHAT_ID": "${input:TELEGRAM_CHAT_ID}",
|
|
77
|
+
"WAIT_TIMEOUT_MINUTES": "30"
|
|
78
|
+
},
|
|
79
|
+
"type": "stdio"
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## How it works
|
|
86
|
+
|
|
87
|
+
1. The AI calls `start_session`, which creates a new Telegram topic (e.g. *Copilot — 07 Mar 2026, 14:30*) or resumes an existing one by name/thread ID.
|
|
88
|
+
2. A shared **dispatcher** runs a single `getUpdates` poller (elected via a lock file at `~/.remote-copilot-mcp/poller.lock`). It writes incoming messages to per-thread JSONL files under `~/.remote-copilot-mcp/threads/`. Each MCP instance reads from its own thread file — no 409 conflicts between concurrent sessions.
|
|
89
|
+
3. When a message arrives (text, photo, or document), the tool downloads any media, converts it to MCP content blocks (image or text with base64), and instructs the AI to act on it.
|
|
90
|
+
4. The AI calls `report_progress` to post status updates and `send_file` to send files/images back to the operator.
|
|
91
|
+
5. If the timeout elapses with no message, the tool tells the AI to call `remote_copilot_wait_for_instructions` again immediately.
|
|
92
|
+
|
|
93
|
+
### Architecture
|
|
94
|
+
|
|
95
|
+
```
|
|
96
|
+
~/.remote-copilot-mcp/
|
|
97
|
+
poller.lock ← PID + timestamp; first instance becomes the poller
|
|
98
|
+
offset ← shared getUpdates offset
|
|
99
|
+
threads/
|
|
100
|
+
<threadId>.jsonl ← messages for each topic thread
|
|
101
|
+
general.jsonl ← messages with no thread ID
|
|
102
|
+
~/.remote-copilot-mcp-sessions.json ← name → threadId mapping
|
|
103
|
+
```
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared Telegram update dispatcher.
|
|
3
|
+
*
|
|
4
|
+
* Problem: Telegram's getUpdates API is exclusive — only one poller per bot
|
|
5
|
+
* token. When multiple MCP server instances run concurrently (multiple VS Code
|
|
6
|
+
* windows), they fight for the poll lock with 409 Conflict errors and silently
|
|
7
|
+
* lose updates meant for other sessions.
|
|
8
|
+
*
|
|
9
|
+
* Solution: A file-system–based message broker.
|
|
10
|
+
*
|
|
11
|
+
* 1. One MCP instance becomes the **poller** (elected via a lock file).
|
|
12
|
+
* It calls getUpdates and writes incoming messages to per-thread JSON
|
|
13
|
+
* files under ~/.remote-copilot-mcp/threads/<threadId>.jsonl.
|
|
14
|
+
* 2. All MCP instances (including the poller) **read** from their own
|
|
15
|
+
* thread file to retrieve messages. This is contention-free because
|
|
16
|
+
* each instance is scoped to its own thread ID.
|
|
17
|
+
* 3. The lock file is automatically released if the poller process dies
|
|
18
|
+
* (stale-lock detection via PID check). Another instance then takes over.
|
|
19
|
+
*
|
|
20
|
+
* The dispatcher exports two public functions:
|
|
21
|
+
* - startDispatcher(telegram, chatId) — call once on MCP server startup.
|
|
22
|
+
* - readThreadMessages(threadId) — non-blocking; returns and clears
|
|
23
|
+
* pending messages for a thread.
|
|
24
|
+
*/
|
|
25
|
+
import type { TelegramClient } from "./telegram.js";
|
|
26
|
+
export interface StoredMessage {
|
|
27
|
+
update_id: number;
|
|
28
|
+
message: {
|
|
29
|
+
message_id: number;
|
|
30
|
+
chat_id: number;
|
|
31
|
+
text?: string;
|
|
32
|
+
caption?: string;
|
|
33
|
+
message_thread_id?: number;
|
|
34
|
+
photo?: Array<{
|
|
35
|
+
file_id: string;
|
|
36
|
+
width: number;
|
|
37
|
+
height: number;
|
|
38
|
+
}>;
|
|
39
|
+
document?: {
|
|
40
|
+
file_id: string;
|
|
41
|
+
file_name?: string;
|
|
42
|
+
mime_type?: string;
|
|
43
|
+
};
|
|
44
|
+
voice?: {
|
|
45
|
+
file_id: string;
|
|
46
|
+
duration: number;
|
|
47
|
+
mime_type?: string;
|
|
48
|
+
};
|
|
49
|
+
video_note?: {
|
|
50
|
+
file_id: string;
|
|
51
|
+
length: number;
|
|
52
|
+
duration: number;
|
|
53
|
+
};
|
|
54
|
+
date: number;
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Read and clear all pending messages for a thread.
|
|
59
|
+
* Uses rename for atomic read-and-clear to prevent message loss.
|
|
60
|
+
*/
|
|
61
|
+
export declare function readThreadMessages(threadId: number | undefined): StoredMessage[];
|
|
62
|
+
/**
|
|
63
|
+
* Non-destructive peek at pending messages for a thread.
|
|
64
|
+
* Unlike readThreadMessages, this does NOT consume the messages — they remain
|
|
65
|
+
* in the thread file for the next readThreadMessages call.
|
|
66
|
+
*/
|
|
67
|
+
export declare function peekThreadMessages(threadId: number | undefined): StoredMessage[];
|
|
68
|
+
/**
|
|
69
|
+
* Start the shared dispatcher.
|
|
70
|
+
* - Ensures directories exist.
|
|
71
|
+
* - Attempts to acquire the poller lock.
|
|
72
|
+
* - If acquired, starts a polling loop that writes to per-thread files.
|
|
73
|
+
* - If not acquired, this instance is a consumer only (reads from thread files).
|
|
74
|
+
*
|
|
75
|
+
* Returns: whether this instance became the poller.
|
|
76
|
+
*/
|
|
77
|
+
export declare function startDispatcher(telegram: TelegramClient, chatId: string): Promise<boolean>;
|
|
78
|
+
/**
|
|
79
|
+
* Stop the poller loop (if this instance is the poller).
|
|
80
|
+
*/
|
|
81
|
+
export declare function stopDispatcher(): void;
|
|
82
|
+
//# sourceMappingURL=dispatcher.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"dispatcher.d.ts","sourceRoot":"","sources":["../src/dispatcher.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;GAuBG;AAaH,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,eAAe,CAAC;AAqKpD,MAAM,WAAW,aAAa;IAC1B,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE;QACL,UAAU,EAAE,MAAM,CAAC;QACnB,OAAO,EAAE,MAAM,CAAC;QAChB,IAAI,CAAC,EAAE,MAAM,CAAC;QACd,OAAO,CAAC,EAAE,MAAM,CAAC;QACjB,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAC3B,KAAK,CAAC,EAAE,KAAK,CAAC;YACV,OAAO,EAAE,MAAM,CAAC;YAChB,KAAK,EAAE,MAAM,CAAC;YACd,MAAM,EAAE,MAAM,CAAC;SAClB,CAAC,CAAC;QACH,QAAQ,CAAC,EAAE;YACP,OAAO,EAAE,MAAM,CAAC;YAChB,SAAS,CAAC,EAAE,MAAM,CAAC;YACnB,SAAS,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,KAAK,CAAC,EAAE;YACJ,OAAO,EAAE,MAAM,CAAC;YAChB,QAAQ,EAAE,MAAM,CAAC;YACjB,SAAS,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,UAAU,CAAC,EAAE;YACT,OAAO,EAAE,MAAM,CAAC;YAChB,MAAM,EAAE,MAAM,CAAC;YACf,QAAQ,EAAE,MAAM,CAAC;SACpB,CAAC;QACF,IAAI,EAAE,MAAM,CAAC;KAChB,CAAC;CACL;AAgCD;;;GAGG;AACH,wBAAgB,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,SAAS,GAAG,aAAa,EAAE,CAiBhF;AAED;;;;GAIG;AACH,wBAAgB,kBAAkB,CAAC,QAAQ,EAAE,MAAM,GAAG,SAAS,GAAG,aAAa,EAAE,CAShF;AAgID;;;;;;;;GAQG;AACH,wBAAsB,eAAe,CACjC,QAAQ,EAAE,cAAc,EACxB,MAAM,EAAE,MAAM,GACf,OAAO,CAAC,OAAO,CAAC,CAgHlB;AAED;;GAEG;AACH,wBAAgB,cAAc,IAAI,IAAI,CAGrC"}
|
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared Telegram update dispatcher.
|
|
3
|
+
*
|
|
4
|
+
* Problem: Telegram's getUpdates API is exclusive — only one poller per bot
|
|
5
|
+
* token. When multiple MCP server instances run concurrently (multiple VS Code
|
|
6
|
+
* windows), they fight for the poll lock with 409 Conflict errors and silently
|
|
7
|
+
* lose updates meant for other sessions.
|
|
8
|
+
*
|
|
9
|
+
* Solution: A file-system–based message broker.
|
|
10
|
+
*
|
|
11
|
+
* 1. One MCP instance becomes the **poller** (elected via a lock file).
|
|
12
|
+
* It calls getUpdates and writes incoming messages to per-thread JSON
|
|
13
|
+
* files under ~/.remote-copilot-mcp/threads/<threadId>.jsonl.
|
|
14
|
+
* 2. All MCP instances (including the poller) **read** from their own
|
|
15
|
+
* thread file to retrieve messages. This is contention-free because
|
|
16
|
+
* each instance is scoped to its own thread ID.
|
|
17
|
+
* 3. The lock file is automatically released if the poller process dies
|
|
18
|
+
* (stale-lock detection via PID check). Another instance then takes over.
|
|
19
|
+
*
|
|
20
|
+
* The dispatcher exports two public functions:
|
|
21
|
+
* - startDispatcher(telegram, chatId) — call once on MCP server startup.
|
|
22
|
+
* - readThreadMessages(threadId) — non-blocking; returns and clears
|
|
23
|
+
* pending messages for a thread.
|
|
24
|
+
*/
|
|
25
|
+
import { existsSync, mkdirSync, readFileSync, readdirSync, renameSync, unlinkSync, writeFileSync } from "fs";
|
|
26
|
+
import { homedir } from "os";
|
|
27
|
+
import { join } from "path";
|
|
28
|
+
import { errorMessage } from "./utils.js";
|
|
29
|
+
// ---------------------------------------------------------------------------
|
|
30
|
+
// Paths
|
|
31
|
+
// ---------------------------------------------------------------------------
|
|
32
|
+
const BASE_DIR = join(homedir(), ".remote-copilot-mcp");
|
|
33
|
+
const THREADS_DIR = join(BASE_DIR, "threads");
|
|
34
|
+
const LOCK_FILE = join(BASE_DIR, "poller.lock");
|
|
35
|
+
const OFFSET_FILE = join(BASE_DIR, "offset");
|
|
36
|
+
function ensureDirs() {
|
|
37
|
+
mkdirSync(THREADS_DIR, { recursive: true });
|
|
38
|
+
recoverOrphanedReads();
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* On startup, scan for orphaned `.reading.PID` files left by hard-crashed
|
|
42
|
+
* processes and append their content back to the original thread file
|
|
43
|
+
* so those messages aren't permanently lost.
|
|
44
|
+
*/
|
|
45
|
+
function recoverOrphanedReads() {
|
|
46
|
+
try {
|
|
47
|
+
const files = readdirSync(THREADS_DIR);
|
|
48
|
+
for (const f of files) {
|
|
49
|
+
const match = f.match(/^(.+)\.reading\.(\d+)$/);
|
|
50
|
+
if (match) {
|
|
51
|
+
const pid = Number.parseInt(match[2], 10);
|
|
52
|
+
if (!isPidAlive(pid)) {
|
|
53
|
+
const orphan = join(THREADS_DIR, f);
|
|
54
|
+
const original = join(THREADS_DIR, match[1]);
|
|
55
|
+
try {
|
|
56
|
+
const content = readFileSync(orphan, "utf8");
|
|
57
|
+
writeFileSync(original, content, { flag: "a", encoding: "utf8" });
|
|
58
|
+
unlinkSync(orphan);
|
|
59
|
+
process.stderr.write(`[dispatcher] Recovered orphaned file: ${f}\n`);
|
|
60
|
+
}
|
|
61
|
+
catch { /* best effort */ }
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
catch { /* non-fatal */ }
|
|
67
|
+
}
|
|
68
|
+
// ---------------------------------------------------------------------------
|
|
69
|
+
// Lock helpers
|
|
70
|
+
// ---------------------------------------------------------------------------
|
|
71
|
+
function readLock() {
|
|
72
|
+
try {
|
|
73
|
+
const raw = readFileSync(LOCK_FILE, "utf8");
|
|
74
|
+
const parsed = JSON.parse(raw);
|
|
75
|
+
if (typeof parsed.pid === "number" && typeof parsed.ts === "number") {
|
|
76
|
+
return parsed;
|
|
77
|
+
}
|
|
78
|
+
return null;
|
|
79
|
+
}
|
|
80
|
+
catch {
|
|
81
|
+
return null;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Write (refresh) the lock file, but only if we still own it.
|
|
86
|
+
* Prevents a TOCTOU race where two pollers run simultaneously:
|
|
87
|
+
* if another process stole the lock between our check and write,
|
|
88
|
+
* we must not overwrite their lock.
|
|
89
|
+
*
|
|
90
|
+
* Returns true if the lock was refreshed, false if we lost ownership.
|
|
91
|
+
*/
|
|
92
|
+
function refreshLock() {
|
|
93
|
+
const current = readLock();
|
|
94
|
+
if (current && current.pid !== process.pid) {
|
|
95
|
+
return false; // Someone else owns the lock now.
|
|
96
|
+
}
|
|
97
|
+
writeFileSync(LOCK_FILE, JSON.stringify({ pid: process.pid, ts: Date.now() }), "utf8");
|
|
98
|
+
return true;
|
|
99
|
+
}
|
|
100
|
+
function removeLock() {
|
|
101
|
+
try {
|
|
102
|
+
unlinkSync(LOCK_FILE);
|
|
103
|
+
}
|
|
104
|
+
catch {
|
|
105
|
+
// Already gone.
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
/** Check whether a PID is still alive. */
|
|
109
|
+
function isPidAlive(pid) {
|
|
110
|
+
try {
|
|
111
|
+
process.kill(pid, 0); // Signal 0 = existence check, does not kill.
|
|
112
|
+
return true;
|
|
113
|
+
}
|
|
114
|
+
catch {
|
|
115
|
+
return false;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Try to become the poller using exclusive file creation to prevent TOCTOU races.
|
|
120
|
+
* - If no lock file exists → atomically create it (flag: "wx").
|
|
121
|
+
* - If lock file exists but the PID is dead → remove and retry.
|
|
122
|
+
* - If lock file exists, PID is alive, but lock is stale → remove and retry.
|
|
123
|
+
* - Otherwise → someone else is the poller.
|
|
124
|
+
*/
|
|
125
|
+
const STALE_LOCK_MS = 90 * 1000; // 90 seconds
|
|
126
|
+
function tryAcquireLock() {
|
|
127
|
+
const existing = readLock();
|
|
128
|
+
if (existing) {
|
|
129
|
+
const alive = isPidAlive(existing.pid);
|
|
130
|
+
const stale = Date.now() - existing.ts > STALE_LOCK_MS;
|
|
131
|
+
if (alive && !stale) {
|
|
132
|
+
return false; // Someone else is actively polling.
|
|
133
|
+
}
|
|
134
|
+
// Dead or stale — remove before attempting exclusive create.
|
|
135
|
+
try {
|
|
136
|
+
unlinkSync(LOCK_FILE);
|
|
137
|
+
}
|
|
138
|
+
catch { /* race-ok */ }
|
|
139
|
+
}
|
|
140
|
+
else if (existsSync(LOCK_FILE)) {
|
|
141
|
+
// Lock file exists but is corrupt/empty (readLock returned null).
|
|
142
|
+
// Remove it so the exclusive create below can succeed.
|
|
143
|
+
try {
|
|
144
|
+
unlinkSync(LOCK_FILE);
|
|
145
|
+
}
|
|
146
|
+
catch { /* race-ok */ }
|
|
147
|
+
}
|
|
148
|
+
// Atomic exclusive create: fails if another process created first.
|
|
149
|
+
try {
|
|
150
|
+
writeFileSync(LOCK_FILE, JSON.stringify({ pid: process.pid, ts: Date.now() }), { encoding: "utf8", flag: "wx" });
|
|
151
|
+
return true;
|
|
152
|
+
}
|
|
153
|
+
catch {
|
|
154
|
+
return false; // Another process won the race.
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
// ---------------------------------------------------------------------------
|
|
158
|
+
// Offset persistence (shared across all instances)
|
|
159
|
+
// ---------------------------------------------------------------------------
|
|
160
|
+
function readOffset() {
|
|
161
|
+
try {
|
|
162
|
+
const raw = readFileSync(OFFSET_FILE, "utf8").trim();
|
|
163
|
+
const n = Number(raw);
|
|
164
|
+
return Number.isFinite(n) ? n : 0;
|
|
165
|
+
}
|
|
166
|
+
catch {
|
|
167
|
+
return 0;
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
function writeOffset(offset) {
|
|
171
|
+
try {
|
|
172
|
+
writeFileSync(OFFSET_FILE, String(offset), "utf8");
|
|
173
|
+
}
|
|
174
|
+
catch {
|
|
175
|
+
// Non-fatal.
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
function threadFilePath(threadId) {
|
|
179
|
+
return join(THREADS_DIR, `${threadId}.jsonl`);
|
|
180
|
+
}
|
|
181
|
+
/** Parse JSONL content into StoredMessage[], skipping corrupt lines. */
|
|
182
|
+
function parseJsonlLines(raw, label) {
|
|
183
|
+
if (!raw)
|
|
184
|
+
return [];
|
|
185
|
+
const results = [];
|
|
186
|
+
for (const line of raw.split("\n")) {
|
|
187
|
+
try {
|
|
188
|
+
results.push(JSON.parse(line));
|
|
189
|
+
}
|
|
190
|
+
catch {
|
|
191
|
+
process.stderr.write(`[dispatcher] Skipping corrupt JSONL line in ${label}\n`);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
return results;
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Append a message to a thread's JSONL file.
|
|
198
|
+
* Throws on write failure so the caller can track which messages were persisted.
|
|
199
|
+
*/
|
|
200
|
+
function appendToThread(threadId, msg) {
|
|
201
|
+
const file = threadFilePath(threadId);
|
|
202
|
+
const line = JSON.stringify(msg) + "\n";
|
|
203
|
+
writeFileSync(file, line, { flag: "a", encoding: "utf8" });
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Read and clear all pending messages for a thread.
|
|
207
|
+
* Uses rename for atomic read-and-clear to prevent message loss.
|
|
208
|
+
*/
|
|
209
|
+
export function readThreadMessages(threadId) {
|
|
210
|
+
const key = threadId ?? "general";
|
|
211
|
+
const file = threadFilePath(key);
|
|
212
|
+
const tmp = file + ".reading." + process.pid;
|
|
213
|
+
try {
|
|
214
|
+
renameSync(file, tmp);
|
|
215
|
+
}
|
|
216
|
+
catch {
|
|
217
|
+
return [];
|
|
218
|
+
}
|
|
219
|
+
try {
|
|
220
|
+
const raw = readFileSync(tmp, "utf8").trim();
|
|
221
|
+
return parseJsonlLines(raw, `${key}.jsonl`);
|
|
222
|
+
}
|
|
223
|
+
catch {
|
|
224
|
+
return [];
|
|
225
|
+
}
|
|
226
|
+
finally {
|
|
227
|
+
try {
|
|
228
|
+
unlinkSync(tmp);
|
|
229
|
+
}
|
|
230
|
+
catch { /* already gone */ }
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Non-destructive peek at pending messages for a thread.
|
|
235
|
+
* Unlike readThreadMessages, this does NOT consume the messages — they remain
|
|
236
|
+
* in the thread file for the next readThreadMessages call.
|
|
237
|
+
*/
|
|
238
|
+
export function peekThreadMessages(threadId) {
|
|
239
|
+
const key = threadId ?? "general";
|
|
240
|
+
const file = threadFilePath(key);
|
|
241
|
+
try {
|
|
242
|
+
const raw = readFileSync(file, "utf8").trim();
|
|
243
|
+
return parseJsonlLines(raw, `${key}.jsonl`);
|
|
244
|
+
}
|
|
245
|
+
catch {
|
|
246
|
+
return [];
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
// ---------------------------------------------------------------------------
|
|
250
|
+
// Poller loop
|
|
251
|
+
// ---------------------------------------------------------------------------
|
|
252
|
+
let pollerRunning = false;
|
|
253
|
+
let pollAbortController;
|
|
254
|
+
async function pollOnce(telegram, chatId) {
|
|
255
|
+
if (!refreshLock()) {
|
|
256
|
+
// We lost lock ownership — another process took over. Step down.
|
|
257
|
+
pollerRunning = false;
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
const POLL_TIMEOUT_SECONDS = 10;
|
|
261
|
+
let offset = readOffset();
|
|
262
|
+
// Refresh the lock periodically during long polls / 409 retries
|
|
263
|
+
// to prevent it from going stale (STALE_LOCK_MS = 90 s).
|
|
264
|
+
const lockRefresher = setInterval(() => {
|
|
265
|
+
if (!refreshLock()) {
|
|
266
|
+
pollerRunning = false;
|
|
267
|
+
pollAbortController?.abort();
|
|
268
|
+
}
|
|
269
|
+
}, 15_000);
|
|
270
|
+
try {
|
|
271
|
+
pollAbortController = new AbortController();
|
|
272
|
+
const updates = await telegram.getUpdates(offset, POLL_TIMEOUT_SECONDS, pollAbortController.signal);
|
|
273
|
+
// Refresh again after the (potentially 10-second) long poll returns.
|
|
274
|
+
if (!refreshLock()) {
|
|
275
|
+
pollerRunning = false;
|
|
276
|
+
return;
|
|
277
|
+
}
|
|
278
|
+
if (updates.length === 0)
|
|
279
|
+
return;
|
|
280
|
+
// Track the highest offset for which ALL messages were successfully written.
|
|
281
|
+
let committedOffset = offset;
|
|
282
|
+
let allSucceeded = true;
|
|
283
|
+
for (const u of updates) {
|
|
284
|
+
if (!u.message) {
|
|
285
|
+
// Non-message update — skip but still advance past it.
|
|
286
|
+
committedOffset = u.update_id + 1;
|
|
287
|
+
continue;
|
|
288
|
+
}
|
|
289
|
+
if (String(u.message.chat.id) !== chatId) {
|
|
290
|
+
committedOffset = u.update_id + 1;
|
|
291
|
+
continue;
|
|
292
|
+
}
|
|
293
|
+
const threadId = u.message.message_thread_id ?? "general";
|
|
294
|
+
const stored = {
|
|
295
|
+
update_id: u.update_id,
|
|
296
|
+
message: {
|
|
297
|
+
message_id: u.message.message_id,
|
|
298
|
+
chat_id: u.message.chat.id,
|
|
299
|
+
text: u.message.text,
|
|
300
|
+
caption: u.message.caption,
|
|
301
|
+
message_thread_id: u.message.message_thread_id,
|
|
302
|
+
photo: u.message.photo?.map((p) => ({
|
|
303
|
+
file_id: p.file_id,
|
|
304
|
+
width: p.width,
|
|
305
|
+
height: p.height,
|
|
306
|
+
})),
|
|
307
|
+
document: u.message.document ? {
|
|
308
|
+
file_id: u.message.document.file_id,
|
|
309
|
+
file_name: u.message.document.file_name,
|
|
310
|
+
mime_type: u.message.document.mime_type,
|
|
311
|
+
} : undefined,
|
|
312
|
+
voice: u.message.voice ? {
|
|
313
|
+
file_id: u.message.voice.file_id,
|
|
314
|
+
duration: u.message.voice.duration,
|
|
315
|
+
mime_type: u.message.voice.mime_type,
|
|
316
|
+
} : undefined,
|
|
317
|
+
video_note: u.message.video_note ? {
|
|
318
|
+
file_id: u.message.video_note.file_id,
|
|
319
|
+
length: u.message.video_note.length,
|
|
320
|
+
duration: u.message.video_note.duration,
|
|
321
|
+
} : undefined,
|
|
322
|
+
date: u.message.date,
|
|
323
|
+
},
|
|
324
|
+
};
|
|
325
|
+
try {
|
|
326
|
+
appendToThread(threadId, stored);
|
|
327
|
+
committedOffset = u.update_id + 1;
|
|
328
|
+
}
|
|
329
|
+
catch (writeErr) {
|
|
330
|
+
process.stderr.write(`[dispatcher] Failed to write message ${u.update_id} to thread ${threadId}: ${errorMessage(writeErr)}\n`);
|
|
331
|
+
allSucceeded = false;
|
|
332
|
+
break; // Stop processing — don't skip messages.
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
// Only advance offset to the last successfully written message.
|
|
336
|
+
if (committedOffset > offset) {
|
|
337
|
+
writeOffset(committedOffset);
|
|
338
|
+
}
|
|
339
|
+
if (!allSucceeded) {
|
|
340
|
+
process.stderr.write("[dispatcher] Partial batch write. Will retry remaining messages on next poll.\n");
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
catch (err) {
|
|
344
|
+
// Ignore abort errors during shutdown.
|
|
345
|
+
if (err instanceof DOMException && err.name === "AbortError")
|
|
346
|
+
return;
|
|
347
|
+
process.stderr.write(`[dispatcher] Poll error: ${errorMessage(err)}\n`);
|
|
348
|
+
}
|
|
349
|
+
finally {
|
|
350
|
+
clearInterval(lockRefresher);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Start the shared dispatcher.
|
|
355
|
+
* - Ensures directories exist.
|
|
356
|
+
* - Attempts to acquire the poller lock.
|
|
357
|
+
* - If acquired, starts a polling loop that writes to per-thread files.
|
|
358
|
+
* - If not acquired, this instance is a consumer only (reads from thread files).
|
|
359
|
+
*
|
|
360
|
+
* Returns: whether this instance became the poller.
|
|
361
|
+
*/
|
|
362
|
+
export async function startDispatcher(telegram, chatId) {
|
|
363
|
+
ensureDirs();
|
|
364
|
+
const isPoller = tryAcquireLock();
|
|
365
|
+
const CONSUMER_RETRY_MS = 10_000;
|
|
366
|
+
// Shared cleanup + loop helpers.
|
|
367
|
+
let cleanupRegistered = false;
|
|
368
|
+
const registerCleanup = () => {
|
|
369
|
+
if (cleanupRegistered)
|
|
370
|
+
return;
|
|
371
|
+
cleanupRegistered = true;
|
|
372
|
+
const cleanup = () => {
|
|
373
|
+
pollerRunning = false;
|
|
374
|
+
pollAbortController?.abort();
|
|
375
|
+
removeLock();
|
|
376
|
+
};
|
|
377
|
+
process.on("exit", cleanup);
|
|
378
|
+
process.on("SIGINT", () => { cleanup(); process.exit(0); });
|
|
379
|
+
process.on("SIGTERM", () => { cleanup(); process.exit(0); });
|
|
380
|
+
};
|
|
381
|
+
const installConsumerRetry = () => {
|
|
382
|
+
const timer = setInterval(() => {
|
|
383
|
+
if (tryAcquireLock()) {
|
|
384
|
+
clearInterval(timer);
|
|
385
|
+
process.stderr.write("[dispatcher] Promoted to poller (previous poller seems inactive).\n");
|
|
386
|
+
pollerRunning = true;
|
|
387
|
+
startLoop();
|
|
388
|
+
registerCleanup();
|
|
389
|
+
}
|
|
390
|
+
}, CONSUMER_RETRY_MS);
|
|
391
|
+
timer.unref();
|
|
392
|
+
};
|
|
393
|
+
const startLoop = () => {
|
|
394
|
+
const loop = async () => {
|
|
395
|
+
while (pollerRunning) {
|
|
396
|
+
const currentLock = readLock();
|
|
397
|
+
if (currentLock && currentLock.pid !== process.pid) {
|
|
398
|
+
process.stderr.write(`[dispatcher] Lock taken by PID ${currentLock.pid}. Stepping down to consumer.\n`);
|
|
399
|
+
pollerRunning = false;
|
|
400
|
+
installConsumerRetry();
|
|
401
|
+
break;
|
|
402
|
+
}
|
|
403
|
+
try {
|
|
404
|
+
await pollOnce(telegram, chatId);
|
|
405
|
+
}
|
|
406
|
+
catch (err) {
|
|
407
|
+
process.stderr.write(`[dispatcher] Unexpected poll error: ${errorMessage(err)}\n`);
|
|
408
|
+
await new Promise((r) => setTimeout(r, 5000));
|
|
409
|
+
}
|
|
410
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
411
|
+
}
|
|
412
|
+
};
|
|
413
|
+
void loop();
|
|
414
|
+
};
|
|
415
|
+
if (isPoller) {
|
|
416
|
+
process.stderr.write("[dispatcher] This instance is the poller.\n");
|
|
417
|
+
// On first run with no offset file, skip all old updates in one call
|
|
418
|
+
// by fetching the latest update_id and setting the offset past it.
|
|
419
|
+
// Awaited so the poll loop never sees offset=0.
|
|
420
|
+
if (!existsSync(OFFSET_FILE)) {
|
|
421
|
+
process.stderr.write("[dispatcher] No offset file. Skipping old updates...\n");
|
|
422
|
+
try {
|
|
423
|
+
// Use a short timeout to prevent blocking startup if another
|
|
424
|
+
// poller is active (409 retry loop could stall for 60+ seconds).
|
|
425
|
+
const drainAbort = new AbortController();
|
|
426
|
+
const drainTimeout = setTimeout(() => drainAbort.abort(), 10_000);
|
|
427
|
+
const latest = await telegram.getUpdates(-1, 0, drainAbort.signal);
|
|
428
|
+
clearTimeout(drainTimeout);
|
|
429
|
+
if (latest.length > 0) {
|
|
430
|
+
const skipTo = latest[latest.length - 1].update_id + 1;
|
|
431
|
+
writeOffset(skipTo);
|
|
432
|
+
process.stderr.write(`[dispatcher] Skipped to offset ${skipTo}.\n`);
|
|
433
|
+
}
|
|
434
|
+
else {
|
|
435
|
+
writeOffset(0);
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
catch (err) {
|
|
439
|
+
// Don't write offset=0 — leave it at whatever the current value is.
|
|
440
|
+
// If the file still doesn't exist, readOffset() returns 0 which is
|
|
441
|
+
// acceptable because the drain simply failed to optimise.
|
|
442
|
+
process.stderr.write(`[dispatcher] Warning: drain failed: ${errorMessage(err)}. Poll loop will start from offset 0.\n`);
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
pollerRunning = true;
|
|
446
|
+
startLoop();
|
|
447
|
+
registerCleanup();
|
|
448
|
+
}
|
|
449
|
+
else {
|
|
450
|
+
process.stderr.write("[dispatcher] Another instance is the poller. This instance is a consumer only.\n");
|
|
451
|
+
// Periodically try to become the poller in case the current one dies
|
|
452
|
+
// but its PID remains alive (zombie process, stuck socket, etc.).
|
|
453
|
+
installConsumerRetry();
|
|
454
|
+
}
|
|
455
|
+
return isPoller;
|
|
456
|
+
}
|
|
457
|
+
/**
|
|
458
|
+
* Stop the poller loop (if this instance is the poller).
|
|
459
|
+
*/
|
|
460
|
+
export function stopDispatcher() {
|
|
461
|
+
pollerRunning = false;
|
|
462
|
+
removeLock();
|
|
463
|
+
}
|
|
464
|
+
//# sourceMappingURL=dispatcher.js.map
|