multis 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +19 -0
- package/CLAUDE.md +66 -0
- package/README.md +98 -0
- package/package.json +32 -0
- package/skills/capture.md +60 -0
- package/skills/files.md +38 -0
- package/skills/shell.md +53 -0
- package/skills/weather.md +32 -0
- package/src/bot/handlers.js +712 -0
- package/src/bot/telegram.js +51 -0
- package/src/cli/setup-beeper.js +239 -0
- package/src/config.js +157 -0
- package/src/governance/audit.js +95 -0
- package/src/governance/validate.js +99 -0
- package/src/index.js +71 -0
- package/src/indexer/chunk.js +68 -0
- package/src/indexer/chunker.js +87 -0
- package/src/indexer/index.js +150 -0
- package/src/indexer/parsers.js +299 -0
- package/src/indexer/store.js +256 -0
- package/src/llm/anthropic.js +106 -0
- package/src/llm/base.js +38 -0
- package/src/llm/client.js +34 -0
- package/src/llm/ollama.js +148 -0
- package/src/llm/openai.js +107 -0
- package/src/llm/prompts.js +71 -0
- package/src/memory/capture.js +85 -0
- package/src/memory/manager.js +123 -0
- package/src/platforms/base.js +38 -0
- package/src/platforms/beeper.js +238 -0
- package/src/platforms/message.js +61 -0
- package/src/platforms/telegram.js +95 -0
- package/src/skills/executor.js +125 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* RAG prompt builder — formats search chunks into LLM prompts.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
const SYSTEM_PROMPT = `You are multis, a personal assistant. Answer based on the provided documents. Cite sources as [filename, page X] or [filename, section]. If the documents don't contain the answer, say so clearly.`;
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Build a RAG prompt from a question and search chunks.
|
|
9
|
+
* @param {string} question - The user's question
|
|
10
|
+
* @param {Array} chunks - Search result chunks from the indexer
|
|
11
|
+
* @returns {{ system: string, user: string }}
|
|
12
|
+
*/
|
|
13
|
+
function buildRAGPrompt(question, chunks) {
|
|
14
|
+
if (!chunks || chunks.length === 0) {
|
|
15
|
+
return {
|
|
16
|
+
system: SYSTEM_PROMPT,
|
|
17
|
+
user: `No matching documents found.\n\nQuestion: ${question}`
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const formattedChunks = chunks.map((chunk, i) => {
|
|
22
|
+
const source = chunk.name || 'unknown';
|
|
23
|
+
const section = chunk.sectionPath?.join(' > ') || '';
|
|
24
|
+
const pages = chunk.pageStart != null
|
|
25
|
+
? chunk.pageEnd && chunk.pageEnd !== chunk.pageStart
|
|
26
|
+
? `pages ${chunk.pageStart}-${chunk.pageEnd}`
|
|
27
|
+
: `page ${chunk.pageStart}`
|
|
28
|
+
: '';
|
|
29
|
+
const meta = [source, section, pages].filter(Boolean).join(', ');
|
|
30
|
+
return `--- Document ${i + 1} [${meta}] ---\n${chunk.content}`;
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
return {
|
|
34
|
+
system: SYSTEM_PROMPT,
|
|
35
|
+
user: `${formattedChunks.join('\n\n')}\n\n---\nQuestion: ${question}`
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Build a system prompt that includes durable memory and optional RAG chunks.
|
|
41
|
+
* Used by the memory-aware conversation flow.
|
|
42
|
+
* @param {string} memoryMd - Contents of memory.md (durable notes)
|
|
43
|
+
* @param {Array} chunks - Optional RAG search chunks
|
|
44
|
+
* @returns {string} - Combined system prompt
|
|
45
|
+
*/
|
|
46
|
+
function buildMemorySystemPrompt(memoryMd, chunks) {
|
|
47
|
+
const parts = [SYSTEM_PROMPT];
|
|
48
|
+
|
|
49
|
+
if (memoryMd && memoryMd.trim()) {
|
|
50
|
+
parts.push(`\n## Memory (durable notes about this conversation)\n${memoryMd.trim()}`);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (chunks && chunks.length > 0) {
|
|
54
|
+
const formattedChunks = chunks.map((chunk, i) => {
|
|
55
|
+
const source = chunk.name || 'unknown';
|
|
56
|
+
const section = chunk.sectionPath?.join(' > ') || '';
|
|
57
|
+
const pages = chunk.pageStart != null
|
|
58
|
+
? chunk.pageEnd && chunk.pageEnd !== chunk.pageStart
|
|
59
|
+
? `pages ${chunk.pageStart}-${chunk.pageEnd}`
|
|
60
|
+
: `page ${chunk.pageStart}`
|
|
61
|
+
: '';
|
|
62
|
+
const meta = [source, section, pages].filter(Boolean).join(', ');
|
|
63
|
+
return `--- Document ${i + 1} [${meta}] ---\n${chunk.content}`;
|
|
64
|
+
});
|
|
65
|
+
parts.push(`\n## Relevant documents\n${formattedChunks.join('\n\n')}`);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return parts.join('\n');
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
module.exports = { buildRAGPrompt, buildMemorySystemPrompt };
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Memory capture orchestrator — summarizes conversation when rolling window overflows.
|
|
3
|
+
* Runs fire-and-forget after responding (recent.json + daily log preserve raw data).
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const CAPTURE_SYSTEM = `You are a memory capture assistant. Your job is to extract durable, useful notes from a conversation.
|
|
7
|
+
|
|
8
|
+
Rules:
|
|
9
|
+
- Extract facts, preferences, decisions, and action items
|
|
10
|
+
- Use concise bullet points
|
|
11
|
+
- Include names, dates, and specific details
|
|
12
|
+
- Skip greetings, small talk, and meta-conversation
|
|
13
|
+
- If nothing noteworthy, respond with "No notable information."
|
|
14
|
+
- Do NOT repeat information that already exists in the memory section below`;
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Run memory capture: summarize recent messages and store as durable notes.
|
|
18
|
+
* @param {string} chatId - Chat identifier
|
|
19
|
+
* @param {import('./manager').ChatMemoryManager} mem - Memory manager instance
|
|
20
|
+
* @param {import('../llm/base').LLMProvider} llm - LLM provider
|
|
21
|
+
* @param {import('../indexer/index').DocumentIndexer} indexer - Document indexer
|
|
22
|
+
* @param {Object} options - { keepLast: 5 }
|
|
23
|
+
*/
|
|
24
|
+
async function runCapture(chatId, mem, llm, indexer, options = {}) {
|
|
25
|
+
const keepLast = options.keepLast || 5;
|
|
26
|
+
|
|
27
|
+
try {
|
|
28
|
+
const recent = mem.loadRecent();
|
|
29
|
+
if (recent.length === 0) return;
|
|
30
|
+
|
|
31
|
+
const existingMemory = mem.loadMemory();
|
|
32
|
+
|
|
33
|
+
// Format conversation for LLM
|
|
34
|
+
const conversationText = recent
|
|
35
|
+
.map(m => `[${m.role}]: ${m.content}`)
|
|
36
|
+
.join('\n');
|
|
37
|
+
|
|
38
|
+
const system = existingMemory.trim()
|
|
39
|
+
? `${CAPTURE_SYSTEM}\n\n## Existing memory\n${existingMemory}`
|
|
40
|
+
: CAPTURE_SYSTEM;
|
|
41
|
+
|
|
42
|
+
const summary = await llm.generate(
|
|
43
|
+
`Summarize the notable information from this conversation:\n\n${conversationText}`,
|
|
44
|
+
{ system, maxTokens: 512, temperature: 0.3 }
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
// Append to durable memory (skip if nothing notable)
|
|
48
|
+
if (summary && !summary.toLowerCase().includes('no notable information')) {
|
|
49
|
+
mem.appendMemory(summary);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Index raw messages as conversation chunks
|
|
53
|
+
for (const m of recent.slice(0, -keepLast)) {
|
|
54
|
+
try {
|
|
55
|
+
indexer.store.saveChunk({
|
|
56
|
+
chunk_id: `conv-${chatId}-${m.timestamp}`,
|
|
57
|
+
file_path: `memory/chats/${chatId}`,
|
|
58
|
+
page_start: null,
|
|
59
|
+
page_end: null,
|
|
60
|
+
element_type: 'conversation',
|
|
61
|
+
name: `${m.role} @ ${m.timestamp}`,
|
|
62
|
+
content: m.content,
|
|
63
|
+
parent_chunk_id: null,
|
|
64
|
+
section_path: JSON.stringify([chatId, m.role]),
|
|
65
|
+
section_level: 0,
|
|
66
|
+
document_type: 'conversation',
|
|
67
|
+
metadata: JSON.stringify({ role: m.role, chatId }),
|
|
68
|
+
created_at: m.timestamp,
|
|
69
|
+
updated_at: m.timestamp
|
|
70
|
+
});
|
|
71
|
+
} catch {
|
|
72
|
+
// Duplicate chunk_id or store error — not critical
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Trim recent to keep last N messages
|
|
77
|
+
mem.trimRecent(keepLast);
|
|
78
|
+
|
|
79
|
+
console.log(`[capture] Chat ${chatId}: captured ${recent.length - keepLast} messages`);
|
|
80
|
+
} catch (err) {
|
|
81
|
+
console.error(`[capture] Error for chat ${chatId}: ${err.message}`);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
module.exports = { runCapture };
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
const MEMORY_BASE = path.join(
|
|
5
|
+
process.env.HOME || process.env.USERPROFILE,
|
|
6
|
+
'.multis', 'memory', 'chats'
|
|
7
|
+
);
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Per-chat memory manager — handles profile, recent messages, durable memory, and daily logs.
|
|
11
|
+
* All I/O is synchronous (single process, no concurrency).
|
|
12
|
+
*/
|
|
13
|
+
class ChatMemoryManager {
|
|
14
|
+
constructor(chatId) {
|
|
15
|
+
this.chatId = String(chatId);
|
|
16
|
+
this.dir = path.join(MEMORY_BASE, this.chatId);
|
|
17
|
+
this.profilePath = path.join(this.dir, 'profile.json');
|
|
18
|
+
this.recentPath = path.join(this.dir, 'recent.json');
|
|
19
|
+
this.memoryPath = path.join(this.dir, 'memory.md');
|
|
20
|
+
this.logDir = path.join(this.dir, 'log');
|
|
21
|
+
this.ensureDirectories();
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
ensureDirectories() {
|
|
25
|
+
if (!fs.existsSync(this.dir)) fs.mkdirSync(this.dir, { recursive: true });
|
|
26
|
+
if (!fs.existsSync(this.logDir)) fs.mkdirSync(this.logDir, { recursive: true });
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// --- Profile ---
|
|
30
|
+
|
|
31
|
+
loadProfile() {
|
|
32
|
+
if (!fs.existsSync(this.profilePath)) {
|
|
33
|
+
return { mode: 'personal', platform: null, lastActive: null, created: new Date().toISOString() };
|
|
34
|
+
}
|
|
35
|
+
return JSON.parse(fs.readFileSync(this.profilePath, 'utf-8'));
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
saveProfile(profile) {
|
|
39
|
+
profile.lastActive = new Date().toISOString();
|
|
40
|
+
fs.writeFileSync(this.profilePath, JSON.stringify(profile, null, 2));
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// --- Recent messages (rolling window) ---
|
|
44
|
+
|
|
45
|
+
loadRecent() {
|
|
46
|
+
if (!fs.existsSync(this.recentPath)) return [];
|
|
47
|
+
try {
|
|
48
|
+
return JSON.parse(fs.readFileSync(this.recentPath, 'utf-8'));
|
|
49
|
+
} catch {
|
|
50
|
+
return [];
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
saveRecent(messages) {
|
|
55
|
+
fs.writeFileSync(this.recentPath, JSON.stringify(messages, null, 2));
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
appendMessage(role, content, timestamp = null) {
|
|
59
|
+
const messages = this.loadRecent();
|
|
60
|
+
messages.push({
|
|
61
|
+
role,
|
|
62
|
+
content,
|
|
63
|
+
timestamp: timestamp || new Date().toISOString()
|
|
64
|
+
});
|
|
65
|
+
this.saveRecent(messages);
|
|
66
|
+
return messages;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
trimRecent(keepLast = 5) {
|
|
70
|
+
const messages = this.loadRecent();
|
|
71
|
+
if (messages.length <= keepLast) return messages;
|
|
72
|
+
const trimmed = messages.slice(-keepLast);
|
|
73
|
+
this.saveRecent(trimmed);
|
|
74
|
+
return trimmed;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// --- Durable memory (memory.md) ---
|
|
78
|
+
|
|
79
|
+
loadMemory() {
|
|
80
|
+
if (!fs.existsSync(this.memoryPath)) return '';
|
|
81
|
+
return fs.readFileSync(this.memoryPath, 'utf-8');
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
appendMemory(notes) {
|
|
85
|
+
const header = `\n## ${new Date().toISOString()}\n\n`;
|
|
86
|
+
fs.appendFileSync(this.memoryPath, header + notes.trim() + '\n');
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
clearMemory() {
|
|
90
|
+
fs.writeFileSync(this.memoryPath, '');
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// --- Daily log (append-only) ---
|
|
94
|
+
|
|
95
|
+
appendToLog(role, content) {
|
|
96
|
+
const now = new Date();
|
|
97
|
+
const dateStr = now.toISOString().slice(0, 10);
|
|
98
|
+
const timeStr = now.toISOString().slice(11, 19);
|
|
99
|
+
const logFile = path.join(this.logDir, `${dateStr}.md`);
|
|
100
|
+
const entry = `### ${timeStr} [${role}]\n${content}\n\n`;
|
|
101
|
+
fs.appendFileSync(logFile, entry);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// --- Capture threshold ---
|
|
105
|
+
|
|
106
|
+
shouldCapture(threshold = 20) {
|
|
107
|
+
const messages = this.loadRecent();
|
|
108
|
+
return messages.length >= threshold;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Get or create a ChatMemoryManager for a chatId.
|
|
114
|
+
* Uses a Map cache to avoid re-creating managers.
|
|
115
|
+
*/
|
|
116
|
+
function getMemoryManager(cache, chatId) {
|
|
117
|
+
if (!cache.has(chatId)) {
|
|
118
|
+
cache.set(chatId, new ChatMemoryManager(chatId));
|
|
119
|
+
}
|
|
120
|
+
return cache.get(chatId);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
module.exports = { ChatMemoryManager, getMemoryManager };
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Abstract platform base class.
|
|
3
|
+
* All platform adapters (Telegram, Beeper, etc.) extend this.
|
|
4
|
+
*/
|
|
5
|
+
class Platform {
|
|
6
|
+
constructor(name, config) {
|
|
7
|
+
this.name = name;
|
|
8
|
+
this.config = config;
|
|
9
|
+
this._messageCallback = null;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
async start() {
|
|
13
|
+
throw new Error(`${this.name}: start() not implemented`);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
async stop() {
|
|
17
|
+
throw new Error(`${this.name}: stop() not implemented`);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Send a text message to a chat.
|
|
22
|
+
* @param {string} chatId
|
|
23
|
+
* @param {string} text
|
|
24
|
+
*/
|
|
25
|
+
async send(chatId, text) {
|
|
26
|
+
throw new Error(`${this.name}: send() not implemented`);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Register callback for incoming messages.
|
|
31
|
+
* Callback receives Message objects.
|
|
32
|
+
*/
|
|
33
|
+
onMessage(callback) {
|
|
34
|
+
this._messageCallback = callback;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
module.exports = { Platform };
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const { Platform } = require('./base');
|
|
4
|
+
const { Message } = require('./message');
|
|
5
|
+
const { logAudit } = require('../governance/audit');
|
|
6
|
+
|
|
7
|
+
const DEFAULT_URL = 'http://localhost:23373';
|
|
8
|
+
const DEFAULT_POLL_INTERVAL = 3000;
|
|
9
|
+
const TOKEN_FILE = path.join(process.env.HOME || process.env.USERPROFILE, '.multis', 'beeper-token.json');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Beeper Desktop API platform adapter.
|
|
13
|
+
* Polls localhost:23373 for new messages, processes // commands from self.
|
|
14
|
+
*/
|
|
15
|
+
class BeeperPlatform extends Platform {
|
|
16
|
+
constructor(config) {
|
|
17
|
+
super('beeper', config);
|
|
18
|
+
const bc = config.platforms?.beeper || {};
|
|
19
|
+
this.baseUrl = bc.url || DEFAULT_URL;
|
|
20
|
+
this.pollInterval = bc.poll_interval || DEFAULT_POLL_INTERVAL;
|
|
21
|
+
this.commandPrefix = bc.command_prefix || '//';
|
|
22
|
+
this.token = null;
|
|
23
|
+
this.selfIds = new Set(); // account user IDs to detect self-messages
|
|
24
|
+
this._pollTimer = null;
|
|
25
|
+
this._lastSeen = {}; // chatId -> last message ID (numeric) we processed
|
|
26
|
+
this._initialized = false; // first poll seeds _lastSeen without processing
|
|
27
|
+
this._personalChats = new Set(); // chatIds that are self/note-to-self chats
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
async start() {
|
|
31
|
+
this.token = this._loadToken();
|
|
32
|
+
if (!this.token) {
|
|
33
|
+
console.error('Beeper: no token found. Run: node src/cli/setup-beeper.js');
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Verify token and discover self IDs
|
|
38
|
+
try {
|
|
39
|
+
const accounts = await this._api('GET', '/v1/accounts');
|
|
40
|
+
const list = Array.isArray(accounts) ? accounts : accounts.items || [];
|
|
41
|
+
for (const acc of list) {
|
|
42
|
+
if (acc.user?.id) this.selfIds.add(acc.user.id);
|
|
43
|
+
if (acc.accountID) this.selfIds.add(acc.accountID);
|
|
44
|
+
}
|
|
45
|
+
console.log(`Beeper: connected (${list.length} accounts)`);
|
|
46
|
+
} catch (err) {
|
|
47
|
+
console.error(`Beeper: token invalid or Desktop not running — ${err.message}`);
|
|
48
|
+
return;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Seed _lastSeen with current message IDs so we don't process old messages
|
|
52
|
+
await this._seedLastSeen();
|
|
53
|
+
this._initialized = true;
|
|
54
|
+
|
|
55
|
+
// Start polling
|
|
56
|
+
this._pollTimer = setInterval(() => this._poll(), this.pollInterval);
|
|
57
|
+
console.log(`Beeper: polling every ${this.pollInterval}ms for ${this.commandPrefix} commands`);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
async stop() {
|
|
61
|
+
if (this._pollTimer) {
|
|
62
|
+
clearInterval(this._pollTimer);
|
|
63
|
+
this._pollTimer = null;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
async send(chatId, text) {
|
|
68
|
+
// Prefix responses so they're distinguishable from user's own messages
|
|
69
|
+
const prefixed = `[multis] ${text}`;
|
|
70
|
+
await this._api('POST', `/v1/chats/${encodeURIComponent(chatId)}/messages`, { text: prefixed });
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
async _seedLastSeen() {
|
|
74
|
+
try {
|
|
75
|
+
const data = await this._api('GET', '/v1/chats?limit=20');
|
|
76
|
+
const chats = data.items || [];
|
|
77
|
+
for (const chat of chats) {
|
|
78
|
+
const chatId = chat.id || chat.chatID;
|
|
79
|
+
if (!chatId) continue;
|
|
80
|
+
|
|
81
|
+
// Detect self/note-to-self chats by type or participant count
|
|
82
|
+
const chatType = chat.type || '';
|
|
83
|
+
const participants = chat.participants || chat.members || [];
|
|
84
|
+
if (chatType === 'single' && participants.length <= 1) {
|
|
85
|
+
this._personalChats.add(chatId);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const msgData = await this._api('GET', `/v1/chats/${encodeURIComponent(chatId)}/messages?limit=1`);
|
|
89
|
+
const messages = msgData.items || [];
|
|
90
|
+
if (messages.length > 0) {
|
|
91
|
+
const id = messages[0].id || messages[0].messageID;
|
|
92
|
+
if (id) this._lastSeen[chatId] = Number(id);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
if (this._personalChats.size > 0) {
|
|
96
|
+
console.log(`Beeper: detected ${this._personalChats.size} personal/self chat(s)`);
|
|
97
|
+
}
|
|
98
|
+
} catch (err) {
|
|
99
|
+
console.error(`Beeper: seed error — ${err.message}`);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async _poll() {
|
|
104
|
+
if (!this._initialized) return;
|
|
105
|
+
try {
|
|
106
|
+
const data = await this._api('GET', '/v1/chats?limit=20');
|
|
107
|
+
const chats = data.items || [];
|
|
108
|
+
|
|
109
|
+
for (const chat of chats) {
|
|
110
|
+
const chatId = chat.id || chat.chatID;
|
|
111
|
+
if (!chatId) continue;
|
|
112
|
+
|
|
113
|
+
const msgData = await this._api('GET', `/v1/chats/${encodeURIComponent(chatId)}/messages?limit=5`);
|
|
114
|
+
const messages = msgData.items || [];
|
|
115
|
+
|
|
116
|
+
// Messages are newest-first; process in reverse (oldest-first) for correct ordering
|
|
117
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
118
|
+
const msg = messages[i];
|
|
119
|
+
const msgId = Number(msg.id || msg.messageID);
|
|
120
|
+
if (!msgId) continue;
|
|
121
|
+
|
|
122
|
+
// Skip already-seen messages
|
|
123
|
+
const lastSeen = this._lastSeen[chatId] || 0;
|
|
124
|
+
if (msgId <= lastSeen) continue;
|
|
125
|
+
|
|
126
|
+
// Update last seen
|
|
127
|
+
this._lastSeen[chatId] = msgId;
|
|
128
|
+
|
|
129
|
+
const isSelf = this._isSelf(msg);
|
|
130
|
+
const text = msg.text || '';
|
|
131
|
+
|
|
132
|
+
// Skip our own responses to avoid cascade
|
|
133
|
+
if (text.startsWith('[multis]')) continue;
|
|
134
|
+
|
|
135
|
+
if (!this._messageCallback) continue;
|
|
136
|
+
|
|
137
|
+
// Detect self/personal chats (single participant or DM type)
|
|
138
|
+
const isPersonalChat = this._personalChats.has(chatId);
|
|
139
|
+
const mode = this._getChatMode(chatId);
|
|
140
|
+
|
|
141
|
+
// Determine how to route this message
|
|
142
|
+
let routeAs = null;
|
|
143
|
+
let shouldProcess = false;
|
|
144
|
+
|
|
145
|
+
if (isSelf && text.startsWith(this.commandPrefix)) {
|
|
146
|
+
// Explicit command: //ask, //mode, etc.
|
|
147
|
+
shouldProcess = true;
|
|
148
|
+
} else if (isSelf && isPersonalChat && !text.startsWith(this.commandPrefix)) {
|
|
149
|
+
// Self-message in personal/note-to-self chat → natural language ask
|
|
150
|
+
routeAs = 'natural';
|
|
151
|
+
shouldProcess = true;
|
|
152
|
+
} else if (!isSelf && mode === 'business') {
|
|
153
|
+
// Incoming message in a business-mode chat → auto-respond
|
|
154
|
+
routeAs = 'business';
|
|
155
|
+
shouldProcess = true;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
if (shouldProcess) {
|
|
159
|
+
console.log(`Beeper: ${routeAs || 'command'} from ${chat.title || chatId}: ${text.slice(0, 80)}`);
|
|
160
|
+
|
|
161
|
+
const normalized = new Message({
|
|
162
|
+
id: msgId,
|
|
163
|
+
platform: 'beeper',
|
|
164
|
+
chatId,
|
|
165
|
+
chatName: chat.title || chat.name || '',
|
|
166
|
+
senderId: msg.senderID || msg.sender || '',
|
|
167
|
+
senderName: msg.senderName || '',
|
|
168
|
+
isSelf,
|
|
169
|
+
text,
|
|
170
|
+
raw: msg,
|
|
171
|
+
routeAs,
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
await this._messageCallback(normalized, this);
|
|
176
|
+
} catch (err) {
|
|
177
|
+
console.error(`Beeper: handler error — ${err.message}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// Clear poll error flag on success
|
|
184
|
+
this._pollErrorLogged = false;
|
|
185
|
+
} catch (err) {
|
|
186
|
+
if (!this._pollErrorLogged) {
|
|
187
|
+
console.error(`Beeper: poll error — ${err.message}`);
|
|
188
|
+
this._pollErrorLogged = true;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
_isSelf(msg) {
|
|
194
|
+
const sender = msg.senderID || msg.sender || '';
|
|
195
|
+
return this.selfIds.has(sender);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
_getChatMode(chatId) {
|
|
199
|
+
const modes = this.config.platforms?.beeper?.chat_modes;
|
|
200
|
+
if (modes && modes[chatId]) return modes[chatId];
|
|
201
|
+
return this.config.platforms?.beeper?.default_mode || 'personal';
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
_loadToken() {
|
|
205
|
+
try {
|
|
206
|
+
const data = JSON.parse(fs.readFileSync(TOKEN_FILE, 'utf8'));
|
|
207
|
+
return data.access_token;
|
|
208
|
+
} catch {
|
|
209
|
+
// Also check legacy location
|
|
210
|
+
const legacyPath = path.join(__dirname, '..', '..', '.beeper-storage', 'desktop-token.json');
|
|
211
|
+
try {
|
|
212
|
+
const data = JSON.parse(fs.readFileSync(legacyPath, 'utf8'));
|
|
213
|
+
return data.access_token;
|
|
214
|
+
} catch {
|
|
215
|
+
return null;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
async _api(method, apiPath, body) {
|
|
221
|
+
const opts = {
|
|
222
|
+
method,
|
|
223
|
+
headers: {
|
|
224
|
+
'Authorization': `Bearer ${this.token}`,
|
|
225
|
+
'Content-Type': 'application/json',
|
|
226
|
+
},
|
|
227
|
+
};
|
|
228
|
+
if (body) opts.body = JSON.stringify(body);
|
|
229
|
+
const res = await fetch(`${this.baseUrl}${apiPath}`, opts);
|
|
230
|
+
if (!res.ok) {
|
|
231
|
+
const text = await res.text();
|
|
232
|
+
throw new Error(`${res.status} ${res.statusText}: ${text}`);
|
|
233
|
+
}
|
|
234
|
+
return res.json();
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
module.exports = { BeeperPlatform };
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Normalized message across all platforms.
|
|
3
|
+
* Telegram bot messages are always commands.
|
|
4
|
+
* Beeper messages are commands only when prefixed with //.
|
|
5
|
+
*/
|
|
6
|
+
class Message {
|
|
7
|
+
constructor({ id, platform, chatId, chatName, senderId, senderName, isSelf, text, raw, routeAs }) {
|
|
8
|
+
this.id = id;
|
|
9
|
+
this.platform = platform;
|
|
10
|
+
this.chatId = chatId;
|
|
11
|
+
this.chatName = chatName || '';
|
|
12
|
+
this.senderId = senderId;
|
|
13
|
+
this.senderName = senderName || '';
|
|
14
|
+
this.isSelf = isSelf || false;
|
|
15
|
+
this.text = text || '';
|
|
16
|
+
this.raw = raw || null;
|
|
17
|
+
/** @type {'natural'|'business'|null} Set by platform for non-command routing */
|
|
18
|
+
this.routeAs = routeAs || null;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Is this message a command for multis?
|
|
23
|
+
* Telegram: all messages to the bot are commands (it's a dedicated bot).
|
|
24
|
+
* Beeper: only messages starting with // are commands.
|
|
25
|
+
*/
|
|
26
|
+
isCommand() {
|
|
27
|
+
if (this.platform === 'telegram') return true;
|
|
28
|
+
if (this.platform === 'beeper') return this.isSelf && this.text.startsWith('//');
|
|
29
|
+
return false;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Get the command text with platform prefix stripped.
|
|
34
|
+
* Telegram: "/exec ls" -> "exec ls", plain text -> text as-is
|
|
35
|
+
* Beeper: "//exec ls" -> "exec ls"
|
|
36
|
+
*/
|
|
37
|
+
commandText() {
|
|
38
|
+
if (this.platform === 'telegram') {
|
|
39
|
+
return this.text.startsWith('/') ? this.text.slice(1) : this.text;
|
|
40
|
+
}
|
|
41
|
+
if (this.platform === 'beeper') {
|
|
42
|
+
return this.text.startsWith('//') ? this.text.slice(2).trimStart() : this.text;
|
|
43
|
+
}
|
|
44
|
+
return this.text;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Parse command name and arguments.
|
|
49
|
+
* Returns { command, args } or null if not a command.
|
|
50
|
+
*/
|
|
51
|
+
parseCommand() {
|
|
52
|
+
if (!this.isCommand()) return null;
|
|
53
|
+
const text = this.commandText();
|
|
54
|
+
// Handle Telegram @bot suffix: /command@botname args
|
|
55
|
+
const match = text.match(/^([^\s@]+)(?:@\S+)?(?:\s+(.*)|$)/s);
|
|
56
|
+
if (!match) return null;
|
|
57
|
+
return { command: match[1].toLowerCase(), args: match[2] };
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
module.exports = { Message };
|