@poolzin/pool-bot 2026.4.36 → 2026.4.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "2026.4.36",
3
- "commit": "713ea950b901906b93c4bd9d8a313731671f40e5",
4
- "builtAt": "2026-04-07T11:46:07.534Z"
2
+ "version": "2026.4.37",
3
+ "commit": "10cf8ee6ea482359cdb4eb35103b39d6e2f10a55",
4
+ "builtAt": "2026-04-07T16:52:50.808Z"
5
5
  }
@@ -1 +1 @@
1
- {"version":3,"file":"mcp-cli.d.ts","sourceRoot":"","sources":["../../src/cli/mcp-cli.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,WAAW,CAAC;AASzC,wBAAgB,cAAc,CAAC,OAAO,EAAE,OAAO,QAqD9C"}
1
+ {"version":3,"file":"mcp-cli.d.ts","sourceRoot":"","sources":["../../src/cli/mcp-cli.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,WAAW,CAAC;AASzC,wBAAgB,cAAc,CAAC,OAAO,EAAE,OAAO,QA2D9C"}
@@ -0,0 +1,30 @@
1
+ /**
2
+ * Heartbeat health monitoring and auto-recovery.
3
+ * Detects when heartbeat stops running and triggers recovery.
4
+ */
5
+ interface MonitorState {
6
+ lastCheck: number;
7
+ lastRecovery: number;
8
+ consecutiveFailures: number;
9
+ }
10
+ /**
11
+ * Monitor heartbeat health.
12
+ * Call this periodically (e.g., every 15 minutes).
13
+ */
14
+ export declare function monitorHeartbeatHealth(workspaceDir: string): {
15
+ healthy: boolean;
16
+ lastAudit?: string;
17
+ action?: string;
18
+ };
19
+ /**
20
+ * Get monitor status for debugging.
21
+ */
22
+ export declare function getMonitorStatus(): MonitorState & {
23
+ lastCheckAgo: string;
24
+ };
25
+ /**
26
+ * Reset monitor state (for testing/manual recovery).
27
+ */
28
+ export declare function resetMonitorState(): void;
29
+ export {};
30
+ //# sourceMappingURL=heartbeat-monitor.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"heartbeat-monitor.d.ts","sourceRoot":"","sources":["../../src/infra/heartbeat-monitor.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAsBH,UAAU,YAAY;IACpB,SAAS,EAAE,MAAM,CAAC;IAClB,YAAY,EAAE,MAAM,CAAC;IACrB,mBAAmB,EAAE,MAAM,CAAC;CAC7B;AAyDD;;;GAGG;AACH,wBAAgB,sBAAsB,CAAC,YAAY,EAAE,MAAM,GAAG;IAC5D,OAAO,EAAE,OAAO,CAAC;IACjB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB,CAsDA;AAED;;GAEG;AACH,wBAAgB,gBAAgB,IAAI,YAAY,GAAG;IAAE,YAAY,EAAE,MAAM,CAAA;CAAE,CAQ1E;AAED;;GAEG;AACH,wBAAgB,iBAAiB,IAAI,IAAI,CAOxC"}
@@ -0,0 +1,128 @@
1
+ /**
2
+ * Heartbeat health monitoring and auto-recovery.
3
+ * Detects when heartbeat stops running and triggers recovery.
4
+ */
5
+ import { readFileSync, existsSync } from 'node:fs';
6
+ import { join } from 'node:path';
7
+ import { createSubsystemLogger } from '../logging/subsystem.js';
8
+ const log = createSubsystemLogger('heartbeat-monitor');
9
+ const HEARTBEAT_MAX_AGE_MS = 90 * 60 * 1000; // 90 minutes (should run every 30min)
10
+ const RECOVERY_COOLDOWN_MS = 60 * 60 * 1000; // 1 hour between recovery attempts
11
+ let monitorState = {
12
+ lastCheck: 0,
13
+ lastRecovery: 0,
14
+ consecutiveFailures: 0,
15
+ };
16
+ /**
17
+ * Load heartbeat state file.
18
+ */
19
+ function loadHeartbeatState(workspaceDir) {
20
+ const statePath = join(workspaceDir, 'memory', 'heartbeat-state.json');
21
+ if (!existsSync(statePath)) {
22
+ return null;
23
+ }
24
+ try {
25
+ const content = readFileSync(statePath, 'utf-8');
26
+ return JSON.parse(content);
27
+ }
28
+ catch {
29
+ return null;
30
+ }
31
+ }
32
+ /**
33
+ * Check if heartbeat is stale.
34
+ */
35
+ function isHeartbeatStale(state) {
36
+ if (!state)
37
+ return true;
38
+ const lastAudit = state.lastSecurityAudit;
39
+ if (!lastAudit)
40
+ return true;
41
+ try {
42
+ const lastAuditTime = new Date(lastAudit).getTime();
43
+ const now = Date.now();
44
+ return now - lastAuditTime > HEARTBEAT_MAX_AGE_MS;
45
+ }
46
+ catch {
47
+ return true;
48
+ }
49
+ }
50
+ /**
51
+ * Trigger recovery action.
52
+ */
53
+ async function triggerRecovery() {
54
+ log.info('[heartbeat-monitor] Triggering recovery action...');
55
+ // In a full implementation, this would:
56
+ // 1. Send alert to admin channel
57
+ // 2. Try to restart heartbeat cron job
58
+ // 3. Log recovery attempt
59
+ monitorState.lastRecovery = Date.now();
60
+ monitorState.consecutiveFailures = 0;
61
+ }
62
+ /**
63
+ * Monitor heartbeat health.
64
+ * Call this periodically (e.g., every 15 minutes).
65
+ */
66
+ export function monitorHeartbeatHealth(workspaceDir) {
67
+ const now = Date.now();
68
+ // Don't check too frequently
69
+ if (now - monitorState.lastCheck < 15 * 60 * 1000) {
70
+ return { healthy: true };
71
+ }
72
+ monitorState.lastCheck = now;
73
+ const state = loadHeartbeatState(workspaceDir);
74
+ const stale = isHeartbeatStale(state);
75
+ if (stale) {
76
+ monitorState.consecutiveFailures++;
77
+ const lastAudit = state?.lastSecurityAudit || 'never';
78
+ log.warn(`[heartbeat-monitor] Heartbeat is STALE! Last audit: ${lastAudit}. Failures: ${monitorState.consecutiveFailures}`);
79
+ // Trigger recovery if:
80
+ // 1. Consecutive failures >= 2 (heartbeat missed 2+ cycles)
81
+ // 2. Recovery cooldown has passed
82
+ const canRecover = now - monitorState.lastRecovery > RECOVERY_COOLDOWN_MS;
83
+ if (monitorState.consecutiveFailures >= 2 && canRecover) {
84
+ void triggerRecovery();
85
+ return {
86
+ healthy: false,
87
+ lastAudit,
88
+ action: 'recovery_triggered',
89
+ };
90
+ }
91
+ return {
92
+ healthy: false,
93
+ lastAudit,
94
+ action: monitorState.consecutiveFailures >= 2 ? 'recovery_pending' : 'monitoring',
95
+ };
96
+ }
97
+ // Heartbeat is healthy
98
+ if (monitorState.consecutiveFailures > 0) {
99
+ log.info(`[heartbeat-monitor] Heartbeat recovered after ${monitorState.consecutiveFailures} failures`);
100
+ monitorState.consecutiveFailures = 0;
101
+ }
102
+ return {
103
+ healthy: true,
104
+ lastAudit: state?.lastSecurityAudit,
105
+ };
106
+ }
107
+ /**
108
+ * Get monitor status for debugging.
109
+ */
110
+ export function getMonitorStatus() {
111
+ const agoMs = Date.now() - monitorState.lastCheck;
112
+ const agoMin = Math.round(agoMs / 60000);
113
+ return {
114
+ ...monitorState,
115
+ lastCheckAgo: `${agoMin}m ago`,
116
+ };
117
+ }
118
+ /**
119
+ * Reset monitor state (for testing/manual recovery).
120
+ */
121
+ export function resetMonitorState() {
122
+ monitorState = {
123
+ lastCheck: 0,
124
+ lastRecovery: 0,
125
+ consecutiveFailures: 0,
126
+ };
127
+ log.info('[heartbeat-monitor] Monitor state reset');
128
+ }
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Telegram API retry logic with exponential backoff.
3
+ * Prevents transient network errors from breaking message delivery.
4
+ */
5
+ import { Context } from 'grammy';
6
+ /**
7
+ * Execute a Telegram API call with retry logic.
8
+ */
9
+ export declare function withRetry<T>(operation: () => Promise<T>, context: {
10
+ action: string;
11
+ chatId?: string | number;
12
+ }, log?: (msg: string) => void): Promise<T>;
13
+ /**
14
+ * Send message with retry logic.
15
+ */
16
+ export declare function sendMessageWithRetry(ctx: Context, text: string, log?: (msg: string) => void): Promise<void>;
17
+ /**
18
+ * Send chat action with retry logic.
19
+ */
20
+ export declare function sendChatActionWithRetry(ctx: Context, action: 'typing' | 'upload_photo' | 'record_video' | 'upload_video' | 'record_voice' | 'upload_voice' | 'upload_document' | 'find_location' | 'record_video_note', log?: (msg: string) => void): Promise<void>;
21
+ /**
22
+ * Edit message with retry logic.
23
+ */
24
+ export declare function editMessageWithRetry(_ctx: Context, _messageId: number, _text: string, _log?: (msg: string) => void): Promise<void>;
25
+ //# sourceMappingURL=retry-logic.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"retry-logic.d.ts","sourceRoot":"","sources":["../../src/telegram/retry-logic.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAAE,OAAO,EAAE,MAAM,QAAQ,CAAC;AAMjC;;GAEG;AACH,wBAAsB,SAAS,CAAC,CAAC,EAC/B,SAAS,EAAE,MAAM,OAAO,CAAC,CAAC,CAAC,EAC3B,OAAO,EAAE;IAAE,MAAM,EAAE,MAAM,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,CAAA;CAAE,EACrD,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,GAC1B,OAAO,CAAC,CAAC,CAAC,CA6DZ;AAED;;GAEG;AACH,wBAAsB,oBAAoB,CACxC,GAAG,EAAE,OAAO,EACZ,IAAI,EAAE,MAAM,EACZ,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,GAC1B,OAAO,CAAC,IAAI,CAAC,CAMf;AAED;;GAEG;AACH,wBAAsB,uBAAuB,CAC3C,GAAG,EAAE,OAAO,EACZ,MAAM,EAAE,QAAQ,GAAG,cAAc,GAAG,cAAc,GAAG,cAAc,GAAG,cAAc,GAAG,cAAc,GAAG,iBAAiB,GAAG,eAAe,GAAG,mBAAmB,EACjK,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,GAC1B,OAAO,CAAC,IAAI,CAAC,CAMf;AAED;;GAEG;AACH,wBAAsB,oBAAoB,CACxC,IAAI,EAAE,OAAO,EACb,UAAU,EAAE,MAAM,EAClB,KAAK,EAAE,MAAM,EACb,IAAI,CAAC,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,GAC3B,OAAO,CAAC,IAAI,CAAC,CAGf"}
@@ -0,0 +1,75 @@
1
+ /**
2
+ * Telegram API retry logic with exponential backoff.
3
+ * Prevents transient network errors from breaking message delivery.
4
+ */
5
+ const MAX_RETRIES = 3;
6
+ const BASE_DELAY_MS = 1000; // 1 second
7
+ const MAX_DELAY_MS = 10000; // 10 seconds
8
+ /**
9
+ * Execute a Telegram API call with retry logic.
10
+ */
11
+ export async function withRetry(operation, context, log) {
12
+ let lastError = null;
13
+ for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
14
+ try {
15
+ return await operation();
16
+ }
17
+ catch (error) {
18
+ lastError = error instanceof Error ? error : new Error(String(error));
19
+ const errorMsg = lastError.message.toLowerCase();
20
+ // Don't retry on certain errors
21
+ if (errorMsg.includes('bot was blocked') ||
22
+ errorMsg.includes('chat not found') ||
23
+ errorMsg.includes('user not found') ||
24
+ errorMsg.includes('message not modified')) {
25
+ throw lastError;
26
+ }
27
+ // Network errors, rate limits - retry
28
+ const isRetryable = errorMsg.includes('network') ||
29
+ errorMsg.includes('fetch') ||
30
+ errorMsg.includes('timeout') ||
31
+ errorMsg.includes('rate limit') ||
32
+ errorMsg.includes('429') ||
33
+ errorMsg.includes('50') || // 500, 502, 503, etc.
34
+ errorMsg.includes('sendchataction');
35
+ if (!isRetryable) {
36
+ throw lastError;
37
+ }
38
+ // Calculate delay with exponential backoff + jitter
39
+ const delay = Math.min(BASE_DELAY_MS * Math.pow(2, attempt - 1) + Math.random() * 1000, MAX_DELAY_MS);
40
+ const logMsg = `[telegram/retry] ${context.action} failed (attempt ${attempt}/${MAX_RETRIES}): ${lastError.message}. Retrying in ${Math.round(delay)}ms...`;
41
+ if (log) {
42
+ log(logMsg);
43
+ }
44
+ else {
45
+ console.warn(logMsg);
46
+ }
47
+ await new Promise(resolve => setTimeout(resolve, delay));
48
+ }
49
+ }
50
+ // All retries exhausted
51
+ const finalError = new Error(`[telegram/retry] ${context.action} failed after ${MAX_RETRIES} attempts: ${lastError?.message}`);
52
+ if (log) {
53
+ log(finalError.message);
54
+ }
55
+ throw finalError;
56
+ }
57
+ /**
58
+ * Send message with retry logic.
59
+ */
60
+ export async function sendMessageWithRetry(ctx, text, log) {
61
+ await withRetry(() => ctx.reply(text), { action: 'sendMessage', chatId: ctx.chat?.id }, log);
62
+ }
63
+ /**
64
+ * Send chat action with retry logic.
65
+ */
66
+ export async function sendChatActionWithRetry(ctx, action, log) {
67
+ await withRetry(() => ctx.api.sendChatAction(ctx.chat.id, action), { action: 'sendChatAction', chatId: ctx.chat?.id }, log);
68
+ }
69
+ /**
70
+ * Edit message with retry logic.
71
+ */
72
+ export async function editMessageWithRetry(_ctx, _messageId, _text, _log) {
73
+ // Placeholder - implement based on actual API
74
+ throw new Error('Not implemented');
75
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@poolzin/pool-bot",
3
- "version": "2026.4.36",
3
+ "version": "2026.4.37",
4
4
  "description": "🎱 Pool Bot - AI assistant with PLCODE integrations",
5
5
  "keywords": [],
6
6
  "license": "MIT",