kernelbot 1.0.33 → 1.0.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/.env.example +11 -0
  2. package/README.md +76 -341
  3. package/bin/kernel.js +134 -15
  4. package/config.example.yaml +2 -1
  5. package/goals.md +20 -0
  6. package/knowledge_base/index.md +11 -0
  7. package/package.json +2 -1
  8. package/src/agent.js +166 -19
  9. package/src/automation/automation-manager.js +16 -0
  10. package/src/automation/automation.js +6 -2
  11. package/src/bot.js +295 -163
  12. package/src/conversation.js +70 -3
  13. package/src/life/engine.js +87 -68
  14. package/src/life/evolution.js +4 -8
  15. package/src/life/improvements.js +2 -6
  16. package/src/life/journal.js +3 -6
  17. package/src/life/memory.js +3 -10
  18. package/src/life/share-queue.js +4 -9
  19. package/src/prompts/orchestrator.js +21 -12
  20. package/src/prompts/persona.md +27 -0
  21. package/src/providers/base.js +51 -8
  22. package/src/providers/google-genai.js +198 -0
  23. package/src/providers/index.js +6 -1
  24. package/src/providers/models.js +6 -2
  25. package/src/providers/openai-compat.js +25 -11
  26. package/src/security/auth.js +38 -1
  27. package/src/services/stt.js +10 -1
  28. package/src/tools/docker.js +37 -15
  29. package/src/tools/git.js +6 -0
  30. package/src/tools/github.js +6 -0
  31. package/src/tools/jira.js +5 -0
  32. package/src/tools/monitor.js +13 -15
  33. package/src/tools/network.js +22 -18
  34. package/src/tools/os.js +37 -2
  35. package/src/tools/process.js +21 -14
  36. package/src/utils/config.js +66 -0
  37. package/src/utils/date.js +19 -0
  38. package/src/utils/display.js +1 -1
  39. package/src/utils/ids.js +12 -0
  40. package/src/utils/shell.js +31 -0
  41. package/src/utils/temporal-awareness.js +199 -0
  42. package/src/utils/timeUtils.js +110 -0
  43. package/src/utils/truncate.js +42 -0
  44. package/src/worker.js +2 -18
package/src/bot.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import TelegramBot from 'node-telegram-bot-api';
2
2
  import { createReadStream, readFileSync } from 'fs';
3
- import { isAllowedUser, getUnauthorizedMessage } from './security/auth.js';
3
+ import { isAllowedUser, getUnauthorizedMessage, alertAdmin } from './security/auth.js';
4
4
  import { getLogger } from './utils/logger.js';
5
5
  import { PROVIDERS } from './providers/models.js';
6
6
  import {
@@ -15,6 +15,55 @@ import {
15
15
  import { TTSService } from './services/tts.js';
16
16
  import { STTService } from './services/stt.js';
17
17
  import { getClaudeAuthStatus, claudeLogout } from './claude-auth.js';
18
+ import { isQuietHours } from './utils/timeUtils.js';
19
+
20
+ /**
21
+ * Simulate a human-like typing delay based on response length.
22
+ * Short replies (casual chat) get a brief pause; longer replies get more.
23
+ * Keeps the typing indicator alive during the delay so the user sees "typing...".
24
+ *
25
+ * @param {TelegramBot} bot - Telegram bot instance
26
+ * @param {number} chatId - Chat to show typing in
27
+ * @param {string} text - The reply text (used to calculate delay)
28
+ * @returns {Promise<void>}
29
+ */
30
+ async function simulateTypingDelay(bot, chatId, text) {
31
+ const length = (text || '').length;
32
+
33
+ // ~25ms per character, clamped between 0.4s and 4s
34
+ // Short "hey ❤️" (~6 chars) → 0.4s | Medium reply (~120 chars) → 3s | Long reply → 4s cap
35
+ const delay = Math.min(4000, Math.max(400, length * 25));
36
+
37
+ // Add a small random jitter (±15%) so it doesn't feel mechanical
38
+ const jitter = delay * (0.85 + Math.random() * 0.3);
39
+ const finalDelay = Math.round(jitter);
40
+
41
+ // Keep the typing indicator alive during the delay
42
+ bot.sendChatAction(chatId, 'typing').catch(() => {});
43
+
44
+ return new Promise((resolve) => setTimeout(resolve, finalDelay));
45
+ }
46
+
47
+ /**
48
+ * Simulate a brief pause between consecutive message chunks.
49
+ * When a long reply is split into multiple Telegram messages, firing them
50
+ * all instantly feels robotic. This adds a short, natural delay with a
51
+ * typing indicator so multi-part replies feel more human.
52
+ *
53
+ * @param {TelegramBot} bot - Telegram bot instance
54
+ * @param {number} chatId - Chat to show typing in
55
+ * @param {string} nextChunk - The upcoming chunk (used to scale the pause)
56
+ * @returns {Promise<void>}
57
+ */
58
+ async function simulateInterChunkDelay(bot, chatId, nextChunk) {
59
+ // Shorter than the initial typing delay: 0.3s – 1.5s based on chunk length
60
+ const length = (nextChunk || '').length;
61
+ const base = Math.min(1500, Math.max(300, length * 8));
62
+ const jitter = base * (0.85 + Math.random() * 0.3);
63
+
64
+ bot.sendChatAction(chatId, 'typing').catch(() => {});
65
+ return new Promise((resolve) => setTimeout(resolve, Math.round(jitter)));
66
+ }
18
67
 
19
68
  function splitMessage(text, maxLength = 4096) {
20
69
  if (text.length <= maxLength) return [text];
@@ -35,9 +84,90 @@ function splitMessage(text, maxLength = 4096) {
35
84
  return chunks;
36
85
  }
37
86
 
87
+ /**
88
+ * Create an onUpdate callback that sends or edits Telegram messages.
89
+ * Tries Markdown first, falls back to plain text.
90
+ */
91
+ function createOnUpdate(bot, chatId) {
92
+ const logger = getLogger();
93
+ return async (update, opts = {}) => {
94
+ if (opts.editMessageId) {
95
+ try {
96
+ const edited = await bot.editMessageText(update, {
97
+ chat_id: chatId,
98
+ message_id: opts.editMessageId,
99
+ parse_mode: 'Markdown',
100
+ });
101
+ return edited.message_id;
102
+ } catch (mdErr) {
103
+ logger.debug(`[Bot] Markdown edit failed for chat ${chatId}, retrying plain: ${mdErr.message}`);
104
+ try {
105
+ const edited = await bot.editMessageText(update, {
106
+ chat_id: chatId,
107
+ message_id: opts.editMessageId,
108
+ });
109
+ return edited.message_id;
110
+ } catch (plainErr) {
111
+ logger.debug(`[Bot] Plain-text edit also failed for chat ${chatId}, sending new message: ${plainErr.message}`);
112
+ }
113
+ }
114
+ }
115
+ const parts = splitMessage(update);
116
+ let lastMsgId = null;
117
+ for (const part of parts) {
118
+ try {
119
+ const sent = await bot.sendMessage(chatId, part, { parse_mode: 'Markdown' });
120
+ lastMsgId = sent.message_id;
121
+ } catch (mdErr) {
122
+ logger.debug(`[Bot] Markdown send failed for chat ${chatId}, falling back to plain: ${mdErr.message}`);
123
+ const sent = await bot.sendMessage(chatId, part);
124
+ lastMsgId = sent.message_id;
125
+ }
126
+ }
127
+ return lastMsgId;
128
+ };
129
+ }
130
+
131
+ /**
132
+ * Create a sendPhoto callback that sends a photo with optional caption.
133
+ * Tries Markdown caption first, falls back to plain caption.
134
+ */
135
+ function createSendPhoto(bot, chatId, logger) {
136
+ return async (filePath, caption) => {
137
+ const fileOpts = { contentType: 'image/png' };
138
+ try {
139
+ await bot.sendPhoto(chatId, createReadStream(filePath), {
140
+ caption: caption || '',
141
+ parse_mode: 'Markdown',
142
+ }, fileOpts);
143
+ } catch {
144
+ try {
145
+ await bot.sendPhoto(chatId, createReadStream(filePath), {
146
+ caption: caption || '',
147
+ }, fileOpts);
148
+ } catch (err) {
149
+ logger.error(`Failed to send photo: ${err.message}`);
150
+ }
151
+ }
152
+ };
153
+ }
154
+
155
+ /**
156
+ * Create a sendReaction callback for reacting to messages with emoji.
157
+ */
158
+ function createSendReaction(bot) {
159
+ return async (targetChatId, targetMsgId, emoji, isBig = false) => {
160
+ await bot.setMessageReaction(targetChatId, targetMsgId, {
161
+ reaction: [{ type: 'emoji', emoji }],
162
+ is_big: isBig,
163
+ });
164
+ };
165
+ }
166
+
38
167
  /**
39
168
  * Simple per-chat queue to serialize agent processing.
40
169
  * Each chat gets its own promise chain so messages are processed in order.
170
+ * Automatically cleans up finished queues to avoid unbounded Map growth.
41
171
  */
42
172
  class ChatQueue {
43
173
  constructor() {
@@ -45,9 +175,21 @@ class ChatQueue {
45
175
  }
46
176
 
47
177
  enqueue(chatId, fn) {
178
+ const logger = getLogger();
48
179
  const key = String(chatId);
49
180
  const prev = this.queues.get(key) || Promise.resolve();
50
- const next = prev.then(() => fn()).catch(() => {});
181
+ const next = prev
182
+ .then(() => fn())
183
+ .catch((err) => {
184
+ logger.error(`[ChatQueue] Error processing message for chat ${key}: ${err.message}`);
185
+ })
186
+ .finally(() => {
187
+ // Clean up the queue entry once this is the last item in the chain,
188
+ // preventing the Map from growing unboundedly over long-running sessions.
189
+ if (this.queues.get(key) === next) {
190
+ this.queues.delete(key);
191
+ }
192
+ });
51
193
  this.queues.set(key, next);
52
194
  return next;
53
195
  }
@@ -119,54 +261,8 @@ export function startBot(config, agent, conversationManager, jobManager, automat
119
261
  const sendAction = (chatId, action) => bot.sendChatAction(chatId, action).catch(() => {});
120
262
 
121
263
  const agentFactory = (chatId) => {
122
- const onUpdate = async (update, opts = {}) => {
123
- if (opts.editMessageId) {
124
- try {
125
- const edited = await bot.editMessageText(update, {
126
- chat_id: chatId,
127
- message_id: opts.editMessageId,
128
- parse_mode: 'Markdown',
129
- });
130
- return edited.message_id;
131
- } catch {
132
- try {
133
- const edited = await bot.editMessageText(update, {
134
- chat_id: chatId,
135
- message_id: opts.editMessageId,
136
- });
137
- return edited.message_id;
138
- } catch {
139
- // Edit failed — fall through to send new message
140
- }
141
- }
142
- }
143
- const parts = splitMessage(update);
144
- let lastMsgId = null;
145
- for (const part of parts) {
146
- try {
147
- const sent = await bot.sendMessage(chatId, part, { parse_mode: 'Markdown' });
148
- lastMsgId = sent.message_id;
149
- } catch {
150
- const sent = await bot.sendMessage(chatId, part);
151
- lastMsgId = sent.message_id;
152
- }
153
- }
154
- return lastMsgId;
155
- };
156
-
157
- const sendPhoto = async (filePath, caption) => {
158
- const fileOpts = { contentType: 'image/png' };
159
- try {
160
- await bot.sendPhoto(chatId, createReadStream(filePath), { caption: caption || '', parse_mode: 'Markdown' }, fileOpts);
161
- } catch {
162
- try {
163
- await bot.sendPhoto(chatId, createReadStream(filePath), { caption: caption || '' }, fileOpts);
164
- } catch (err) {
165
- logger.error(`[Automation] Failed to send photo: ${err.message}`);
166
- }
167
- }
168
- };
169
-
264
+ const onUpdate = createOnUpdate(bot, chatId);
265
+ const sendPhoto = createSendPhoto(bot, chatId, logger);
170
266
  return { agent, onUpdate, sendPhoto };
171
267
  };
172
268
 
@@ -194,6 +290,13 @@ export function startBot(config, agent, conversationManager, jobManager, automat
194
290
 
195
291
  if (!isAllowedUser(query.from.id, config)) {
196
292
  await bot.answerCallbackQuery(query.id, { text: 'Unauthorized' });
293
+ await alertAdmin(bot, {
294
+ userId: query.from.id,
295
+ username: query.from.username,
296
+ firstName: query.from.first_name,
297
+ text: `🔘 زر: ${query.data || 'unknown'}`,
298
+ type: 'callback',
299
+ });
197
300
  return;
198
301
  }
199
302
 
@@ -683,6 +786,13 @@ export function startBot(config, agent, conversationManager, jobManager, automat
683
786
  if (msg.text || msg.document) {
684
787
  logger.warn(`Unauthorized access attempt from ${username} (${userId})`);
685
788
  await bot.sendMessage(chatId, getUnauthorizedMessage());
789
+ await alertAdmin(bot, {
790
+ userId,
791
+ username: msg.from.username,
792
+ firstName: msg.from.first_name,
793
+ text: msg.text || (msg.document ? `📎 ملف: ${msg.document.file_name || 'unknown'}` : undefined),
794
+ type: 'رسالة',
795
+ });
686
796
  }
687
797
  return;
688
798
  }
@@ -748,6 +858,37 @@ export function startBot(config, agent, conversationManager, jobManager, automat
748
858
  }
749
859
  }
750
860
 
861
+ // Handle photo messages — download, convert to base64, and pass to LLM for vision analysis
862
+ let imageAttachment = null;
863
+ if (msg.photo && msg.photo.length > 0) {
864
+ logger.info(`[Bot] Photo message from ${username} (${userId}) in chat ${chatId}`);
865
+ try {
866
+ // Use highest resolution (last item in array)
867
+ const photo = msg.photo[msg.photo.length - 1];
868
+ const fileLink = await bot.getFileLink(photo.file_id);
869
+ const response = await fetch(fileLink);
870
+ if (!response.ok) throw new Error(`Failed to download photo: ${response.statusText}`);
871
+ const buffer = Buffer.from(await response.arrayBuffer());
872
+ const base64Data = buffer.toString('base64');
873
+
874
+ // Determine media type from URL extension, default to jpeg
875
+ const ext = fileLink.split('.').pop().split('?')[0].toLowerCase();
876
+ const extToMime = { jpg: 'image/jpeg', jpeg: 'image/jpeg', png: 'image/png', gif: 'image/gif', webp: 'image/webp' };
877
+ const mediaType = extToMime[ext] || 'image/jpeg';
878
+
879
+ imageAttachment = { type: 'base64', media_type: mediaType, data: base64Data };
880
+ // Use caption as text, or default prompt
881
+ if (!msg.text) {
882
+ msg.text = msg.caption || 'What do you see in this image? Describe it in detail.';
883
+ }
884
+ logger.info(`[Bot] Photo downloaded and encoded (${Math.round(base64Data.length / 1024)}KB base64, ${mediaType})`);
885
+ } catch (err) {
886
+ logger.error(`[Bot] Photo processing failed: ${err.message}`);
887
+ await bot.sendMessage(chatId, 'Failed to process the image. Please try again.');
888
+ return;
889
+ }
890
+ }
891
+
751
892
  if (!msg.text) return; // ignore non-text (and non-document) messages
752
893
 
753
894
  let text = msg.text.trim();
@@ -1580,90 +1721,38 @@ export function startBot(config, agent, conversationManager, jobManager, automat
1580
1721
  bot.sendChatAction(chatId, 'typing').catch(() => {});
1581
1722
 
1582
1723
  try {
1583
- const onUpdate = async (update, opts = {}) => {
1584
- // Edit an existing message instead of sending a new one
1585
- if (opts.editMessageId) {
1586
- try {
1587
- const edited = await bot.editMessageText(update, {
1588
- chat_id: chatId,
1589
- message_id: opts.editMessageId,
1590
- parse_mode: 'Markdown',
1591
- });
1592
- return edited.message_id;
1593
- } catch {
1594
- try {
1595
- const edited = await bot.editMessageText(update, {
1596
- chat_id: chatId,
1597
- message_id: opts.editMessageId,
1598
- });
1599
- return edited.message_id;
1600
- } catch {
1601
- // Edit failed — fall through to send new message
1602
- }
1603
- }
1604
- }
1605
-
1606
- // Send new message(s) — also reached when edit fails
1607
- const parts = splitMessage(update);
1608
- let lastMsgId = null;
1609
- for (const part of parts) {
1610
- try {
1611
- const sent = await bot.sendMessage(chatId, part, { parse_mode: 'Markdown' });
1612
- lastMsgId = sent.message_id;
1613
- } catch {
1614
- const sent = await bot.sendMessage(chatId, part);
1615
- lastMsgId = sent.message_id;
1616
- }
1617
- }
1618
- return lastMsgId;
1619
- };
1620
-
1621
- const sendPhoto = async (filePath, caption) => {
1622
- const fileOpts = { contentType: 'image/png' };
1623
- try {
1624
- await bot.sendPhoto(chatId, createReadStream(filePath), {
1625
- caption: caption || '',
1626
- parse_mode: 'Markdown',
1627
- }, fileOpts);
1628
- } catch {
1629
- try {
1630
- await bot.sendPhoto(chatId, createReadStream(filePath), {
1631
- caption: caption || '',
1632
- }, fileOpts);
1633
- } catch (err) {
1634
- logger.error(`Failed to send photo: ${err.message}`);
1635
- }
1636
- }
1637
- };
1638
-
1639
- const sendReaction = async (targetChatId, targetMsgId, emoji, isBig = false) => {
1640
- await bot.setMessageReaction(targetChatId, targetMsgId, {
1641
- reaction: [{ type: 'emoji', emoji }],
1642
- is_big: isBig,
1643
- });
1644
- };
1724
+ const onUpdate = createOnUpdate(bot, chatId);
1725
+ const sendPhoto = createSendPhoto(bot, chatId, logger);
1726
+ const sendReaction = createSendReaction(bot);
1645
1727
 
1646
1728
  logger.debug(`[Bot] Sending to orchestrator: chat ${chatId}, text="${mergedText.slice(0, 80)}"`);
1647
1729
  const reply = await agent.processMessage(chatId, mergedText, {
1648
1730
  id: userId,
1649
1731
  username,
1650
- }, onUpdate, sendPhoto, { sendReaction, messageId: msg.message_id });
1732
+ }, onUpdate, sendPhoto, { sendReaction, messageId: msg.message_id, imageAttachment });
1651
1733
 
1652
1734
  clearInterval(typingInterval);
1653
1735
 
1736
+ // Simulate human-like typing delay before sending the reply
1737
+ await simulateTypingDelay(bot, chatId, reply || '');
1738
+
1654
1739
  logger.info(`[Bot] Reply for chat ${chatId}: ${(reply || '').length} chars`);
1655
1740
  const chunks = splitMessage(reply || 'Done.');
1656
- for (const chunk of chunks) {
1741
+ for (let i = 0; i < chunks.length; i++) {
1742
+ // Brief pause between consecutive chunks so multi-part replies feel natural
1743
+ if (i > 0) await simulateInterChunkDelay(bot, chatId, chunks[i]);
1657
1744
  try {
1658
- await bot.sendMessage(chatId, chunk, { parse_mode: 'Markdown' });
1745
+ await bot.sendMessage(chatId, chunks[i], { parse_mode: 'Markdown' });
1659
1746
  } catch {
1660
1747
  // Fallback to plain text if Markdown fails
1661
- await bot.sendMessage(chatId, chunk);
1748
+ await bot.sendMessage(chatId, chunks[i]);
1662
1749
  }
1663
1750
  }
1664
1751
 
1665
- // Send voice reply if TTS is available and the reply isn't too short
1666
- if (ttsService.isAvailable() && reply && reply.length > 5) {
1752
+ // Send voice reply only when the user explicitly requests it
1753
+ const voiceKeywords = ['صوت', 'صوتك', 'صوتية', 'صوتي', 'voice', 'speak', 'hear you'];
1754
+ const wantsVoice = voiceKeywords.some((kw) => mergedText.toLowerCase().includes(kw));
1755
+ if (wantsVoice && ttsService.isAvailable() && reply && reply.length > 5) {
1667
1756
  try {
1668
1757
  const audioPath = await ttsService.synthesize(reply);
1669
1758
  if (audioPath) {
@@ -1687,7 +1776,18 @@ export function startBot(config, agent, conversationManager, jobManager, automat
1687
1776
  const userId = reaction.user?.id;
1688
1777
  const username = reaction.user?.username || reaction.user?.first_name || 'unknown';
1689
1778
 
1690
- if (!userId || !isAllowedUser(userId, config)) return;
1779
+ if (!userId || !isAllowedUser(userId, config)) {
1780
+ if (userId) {
1781
+ await alertAdmin(bot, {
1782
+ userId,
1783
+ username: reaction.user?.username,
1784
+ firstName: reaction.user?.first_name,
1785
+ text: `${(reaction.new_reaction || []).filter(r => r.type === 'emoji').map(r => r.emoji).join(' ') || 'reaction'}`,
1786
+ type: 'تفاعل',
1787
+ });
1788
+ }
1789
+ return;
1790
+ }
1691
1791
 
1692
1792
  const newReactions = reaction.new_reaction || [];
1693
1793
  const emojis = newReactions
@@ -1701,65 +1801,39 @@ export function startBot(config, agent, conversationManager, jobManager, automat
1701
1801
  const reactionText = `[User reacted with ${emojis.join(' ')} to your message]`;
1702
1802
 
1703
1803
  chatQueue.enqueue(chatId, async () => {
1704
- try {
1705
- const onUpdate = async (update, opts = {}) => {
1706
- if (opts.editMessageId) {
1707
- try {
1708
- const edited = await bot.editMessageText(update, {
1709
- chat_id: chatId,
1710
- message_id: opts.editMessageId,
1711
- parse_mode: 'Markdown',
1712
- });
1713
- return edited.message_id;
1714
- } catch {
1715
- try {
1716
- const edited = await bot.editMessageText(update, {
1717
- chat_id: chatId,
1718
- message_id: opts.editMessageId,
1719
- });
1720
- return edited.message_id;
1721
- } catch {
1722
- // fall through
1723
- }
1724
- }
1725
- }
1726
- const parts = splitMessage(update);
1727
- let lastMsgId = null;
1728
- for (const part of parts) {
1729
- try {
1730
- const sent = await bot.sendMessage(chatId, part, { parse_mode: 'Markdown' });
1731
- lastMsgId = sent.message_id;
1732
- } catch {
1733
- const sent = await bot.sendMessage(chatId, part);
1734
- lastMsgId = sent.message_id;
1735
- }
1736
- }
1737
- return lastMsgId;
1738
- };
1804
+ // Show typing indicator while processing the reaction
1805
+ const typingInterval = setInterval(() => {
1806
+ bot.sendChatAction(chatId, 'typing').catch(() => {});
1807
+ }, 4000);
1808
+ bot.sendChatAction(chatId, 'typing').catch(() => {});
1739
1809
 
1740
- const sendReaction = async (targetChatId, targetMsgId, emoji, isBig = false) => {
1741
- await bot.setMessageReaction(targetChatId, targetMsgId, {
1742
- reaction: [{ type: 'emoji', emoji }],
1743
- is_big: isBig,
1744
- });
1745
- };
1810
+ try {
1811
+ const onUpdate = createOnUpdate(bot, chatId);
1812
+ const sendReaction = createSendReaction(bot);
1746
1813
 
1747
1814
  const reply = await agent.processMessage(chatId, reactionText, {
1748
1815
  id: userId,
1749
1816
  username,
1750
1817
  }, onUpdate, null, { sendReaction, messageId: reaction.message_id });
1751
1818
 
1819
+ clearInterval(typingInterval);
1820
+
1752
1821
  if (reply && reply.trim()) {
1822
+ // Simulate human-like typing delay before responding to the reaction
1823
+ await simulateTypingDelay(bot, chatId, reply);
1824
+
1753
1825
  const chunks = splitMessage(reply);
1754
- for (const chunk of chunks) {
1826
+ for (let i = 0; i < chunks.length; i++) {
1827
+ if (i > 0) await simulateInterChunkDelay(bot, chatId, chunks[i]);
1755
1828
  try {
1756
- await bot.sendMessage(chatId, chunk, { parse_mode: 'Markdown' });
1829
+ await bot.sendMessage(chatId, chunks[i], { parse_mode: 'Markdown' });
1757
1830
  } catch {
1758
- await bot.sendMessage(chatId, chunk);
1831
+ await bot.sendMessage(chatId, chunks[i]);
1759
1832
  }
1760
1833
  }
1761
1834
  }
1762
1835
  } catch (err) {
1836
+ clearInterval(typingInterval);
1763
1837
  logger.error(`[Bot] Error processing reaction in chat ${chatId}: ${err.message}`);
1764
1838
  }
1765
1839
  });
@@ -1769,5 +1843,63 @@ export function startBot(config, agent, conversationManager, jobManager, automat
1769
1843
  logger.error(`Telegram polling error: ${err.message}`);
1770
1844
  });
1771
1845
 
1846
+ // ── Resume active chats after restart ────────────────────────
1847
+ setTimeout(async () => {
1848
+ const sendMsg = async (chatId, text) => {
1849
+ try {
1850
+ await bot.sendMessage(chatId, text, { parse_mode: 'Markdown' });
1851
+ } catch {
1852
+ await bot.sendMessage(chatId, text);
1853
+ }
1854
+ };
1855
+ try {
1856
+ await agent.resumeActiveChats(sendMsg);
1857
+ } catch (err) {
1858
+ logger.error(`[Bot] Resume active chats failed: ${err.message}`);
1859
+ }
1860
+ }, 5000);
1861
+
1862
+ // ── Proactive share delivery (randomized, self-rearming) ────
1863
+ const armShareDelivery = (delivered) => {
1864
+ // If we just delivered something, wait longer (1–4h) before next check
1865
+ // If nothing was delivered, check again sooner (10–45min) in case new shares appear
1866
+ const minMin = delivered ? 60 : 10;
1867
+ const maxMin = delivered ? 240 : 45;
1868
+ const delayMs = (minMin + Math.random() * (maxMin - minMin)) * 60_000;
1869
+
1870
+ logger.debug(`[Bot] Next share check in ${Math.round(delayMs / 60_000)}m`);
1871
+
1872
+ setTimeout(async () => {
1873
+ // Respect quiet hours (env vars → YAML config → defaults 02:00–06:00)
1874
+ if (isQuietHours(config.life)) {
1875
+ armShareDelivery(false);
1876
+ return;
1877
+ }
1878
+
1879
+ const sendMsg = async (chatId, text) => {
1880
+ try {
1881
+ await bot.sendMessage(chatId, text, { parse_mode: 'Markdown' });
1882
+ } catch {
1883
+ await bot.sendMessage(chatId, text);
1884
+ }
1885
+ };
1886
+
1887
+ let didDeliver = false;
1888
+ try {
1889
+ const before = shareQueue ? shareQueue.getPending(null, 1).length : 0;
1890
+ await agent.deliverPendingShares(sendMsg);
1891
+ const after = shareQueue ? shareQueue.getPending(null, 1).length : 0;
1892
+ didDeliver = before > 0 && after < before;
1893
+ } catch (err) {
1894
+ logger.error(`[Bot] Proactive share delivery failed: ${err.message}`);
1895
+ }
1896
+
1897
+ armShareDelivery(didDeliver);
1898
+ }, delayMs);
1899
+ };
1900
+
1901
+ // Start the first check after a random 10–30 min
1902
+ armShareDelivery(false);
1903
+
1772
1904
  return bot;
1773
1905
  }