nexus-fca 3.1.4 → 3.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/memory-usage.md +50 -0
- package/package.json +1 -1
- package/src/listenMqtt.js +45 -13
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# Nexus-FCA Memory Usage Guide
|
|
2
|
+
|
|
3
|
+
## Built-in guardrails
|
|
4
|
+
|
|
5
|
+
- **Outbound MQTT buffers** (`src/sendMessageMqtt.js`, `src/listenMqtt.js`)
|
|
6
|
+
- `_pendingOutbound` tracks only outstanding message IDs and is pruned immediately when an ACK lands.
|
|
7
|
+
- Health metrics (`lib/health/HealthMetrics.js`) cap ACK samples at 50 entries and keep averages via exponential smoothing, so telemetry never balloons in RAM.
|
|
8
|
+
- **Pending edit queue** (`src/editMessage.js`)
|
|
9
|
+
- `globalOptions.editSettings.maxPendingEdits` defaults to 200; oldest edits are dropped once the cap is hit, and TTL checks clear stale entries.
|
|
10
|
+
- Every ACK removes the corresponding entry, and `HealthMetrics` mirrors the size so you can alert if it starts climbing.
|
|
11
|
+
- **Group send queue** (`index.js`)
|
|
12
|
+
- Each group’s queue is capped (default 100 messages) and sweeps run every 5 minutes to drop idle or over-capacity queues.
|
|
13
|
+
- Sweeper stats surface through `ctx.health.recordGroupQueuePrune`, so you can confirm that cleanup is happening.
|
|
14
|
+
- **Performance caches** (`lib/performance/PerformanceManager.js`)
|
|
15
|
+
- Cache maps are bounded by `cacheSize` (default 1000) and enforce TTL, while request time windows keep only the last 100 samples.
|
|
16
|
+
- `PerformanceOptimizer` trims its request history to 1000 entries and halves the buffer on each cleanup pass.
|
|
17
|
+
- **Database write queue** (`lib/database/EnhancedDatabase.js`)
|
|
18
|
+
- Writes batch in chunks of 100 and the queue processor re-runs every second. Long outages are the only way to accumulate large queues.
|
|
19
|
+
- **Safety timers** (`lib/safety/FacebookSafety.js`)
|
|
20
|
+
- All recurring timers are stored and cleared before new ones are scheduled, preventing runaway intervals during reconnect churn.
|
|
21
|
+
|
|
22
|
+
## Situations that can increase memory
|
|
23
|
+
|
|
24
|
+
| Area | Why it grows | Mitigation |
|
|
25
|
+
| --- | --- | --- |
|
|
26
|
+
| Database write queue | Target SQLite/SQL server offline → `writeQueue` keeps buffering | Monitor `writeQueue.length` or add alerts around `EnhancedDatabase.processQueue`; if storage is optional, disable DB integration entirely.
|
|
27
|
+
| Multiple PerformanceManager / PerformanceOptimizer instances | Each instance spawns its own metrics intervals and caches | Treat both managers as singletons; share them via dependency injection instead of `new`ing per feature.
|
|
28
|
+
| Elevated group queue caps | Setting `setGroupQueueCapacity` >> 100 multiplies per-thread memory | Keep caps small; rely on `_flushGroupQueue` for bursts instead of raising the ceiling.
|
|
29
|
+
| Pending edit saturation | Frequent edit retries without ACKs hit the 200-item cap | Investigate upstream failures (usually edit rights or MQTT drops). `api.getMemoryMetrics()` will show `pendingEditsDropped` climbing when this happens.
|
|
30
|
+
| Large custom caches | If you override `cacheSize` or TTLs to very large values, the Map will grow | Pick realistic TTLs; if the workload is mostly transient, disable cache (`enableCache: false`).
|
|
31
|
+
|
|
32
|
+
## Monitoring checklist
|
|
33
|
+
|
|
34
|
+
1. **Runtime snapshot** – call `api.getMemoryMetrics()` to read pending edit counts, outbound depth, and memory guard actions.
|
|
35
|
+
2. **Health dashboard** – `ctx.health.snapshot()` (or the API wrapper `getHealthMetrics`) exposes ACK latency samples and queue stats.
|
|
36
|
+
3. **Performance events** – `PerformanceManager` emits `metricsUpdate` every 30s; attach a listener and pipe to your logger or Prometheus bridge.
|
|
37
|
+
4. **Node heap checks** – pair the built-in metrics with `process.memoryUsage()` or `--inspect` tooling if you suspect leaks from user code.
|
|
38
|
+
|
|
39
|
+
## Configuration tips
|
|
40
|
+
|
|
41
|
+
- Tune `globalOptions.editSettings` if your bot edits aggressively; lower `maxPendingEdits` to 100 and `editTTLms` to 2–3 minutes for tighter control.
|
|
42
|
+
- Use `api.setGroupQueueCapacity(n)` to keep per-thread queues bounded; the sweeper already limits idle queues to 30 minutes, but lower values (10–20) reduce burst memory further.
|
|
43
|
+
- If you don’t need database analytics, avoid initializing `EnhancedDatabase`/`DatabaseManager`; the rest of the stack runs without it.
|
|
44
|
+
- Disable extra instrumentation when running on low-memory hardware:
|
|
45
|
+
```js
|
|
46
|
+
const perfManager = new PerformanceManager({ enableMetrics: false, enableCache: false });
|
|
47
|
+
```
|
|
48
|
+
- Always reuse the same `PerformanceOptimizer`/`PerformanceManager` instead of instantiating per request handler, so their intervals remain singular.
|
|
49
|
+
|
|
50
|
+
Following the defaults keeps Nexus-FCA comfortably under a few hundred megabytes even on small VPS nodes. When memory spikes appear, start by sampling `api.getMemoryMetrics()` and check the table above to see which subsystem is accumulating work. Adjust the related caps or temporarily disable the optional feature until the upstream issue (DB outage, repeated edit failures, etc.) is resolved.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "nexus-fca",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.5",
|
|
4
4
|
"description": "Nexus-FCA 3.1 – THE BEST, SAFEST, MOST STABLE Facebook Messenger API! Email/password + appState login, proxy support (HTTP/HTTPS/SOCKS5), random user agent, proactive cookie refresh, MQTT stability, session protection, and TypeScript support.",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"repository": {
|
package/src/listenMqtt.js
CHANGED
|
@@ -93,19 +93,34 @@ function fetchSeqID(defaultFuncs, api, ctx, callback) {
|
|
|
93
93
|
|
|
94
94
|
// Adaptive backoff state (per-process singleton like) - tie to ctx
|
|
95
95
|
function getBackoffState(ctx){
|
|
96
|
+
const envBase = parseInt(process.env.NEXUS_MQTT_BACKOFF_BASE, 10) || 1000;
|
|
97
|
+
const envMax = parseInt(process.env.NEXUS_MQTT_BACKOFF_MAX, 10) || (5 * 60 * 1000);
|
|
98
|
+
const envFactor = parseFloat(process.env.NEXUS_MQTT_BACKOFF_FACTOR) || 2;
|
|
99
|
+
const backoffOverrides = (ctx.globalOptions && ctx.globalOptions.backoff) || {};
|
|
100
|
+
const resolved = {
|
|
101
|
+
base: Number.isFinite(backoffOverrides.baseMs) ? backoffOverrides.baseMs : envBase,
|
|
102
|
+
max: Number.isFinite(backoffOverrides.maxMs) ? backoffOverrides.maxMs : envMax,
|
|
103
|
+
factor: Number.isFinite(backoffOverrides.factor) ? backoffOverrides.factor : envFactor,
|
|
104
|
+
jitter: typeof backoffOverrides.jitter === 'number' ? backoffOverrides.jitter : 0.25,
|
|
105
|
+
resetAfterMs: Number.isFinite(backoffOverrides.resetAfterMs) ? backoffOverrides.resetAfterMs : (3 * 60 * 1000)
|
|
106
|
+
};
|
|
96
107
|
if(!ctx._adaptiveReconnect){
|
|
97
|
-
const envBase = parseInt(process.env.NEXUS_MQTT_BACKOFF_BASE, 10) || 1000;
|
|
98
|
-
const envMax = parseInt(process.env.NEXUS_MQTT_BACKOFF_MAX, 10) || (5 * 60 * 1000);
|
|
99
|
-
const envFactor = parseFloat(process.env.NEXUS_MQTT_BACKOFF_FACTOR) || 2;
|
|
100
108
|
ctx._adaptiveReconnect = {
|
|
101
|
-
base:
|
|
102
|
-
max:
|
|
103
|
-
factor:
|
|
104
|
-
jitter:
|
|
109
|
+
base: resolved.base,
|
|
110
|
+
max: resolved.max,
|
|
111
|
+
factor: resolved.factor,
|
|
112
|
+
jitter: resolved.jitter,
|
|
113
|
+
resetAfterMs: resolved.resetAfterMs,
|
|
105
114
|
current: 0,
|
|
106
115
|
lastResetTs: 0,
|
|
107
116
|
consecutiveFails: 0 // Track consecutive failures
|
|
108
117
|
};
|
|
118
|
+
} else {
|
|
119
|
+
ctx._adaptiveReconnect.base = resolved.base;
|
|
120
|
+
ctx._adaptiveReconnect.max = resolved.max;
|
|
121
|
+
ctx._adaptiveReconnect.factor = resolved.factor;
|
|
122
|
+
ctx._adaptiveReconnect.jitter = resolved.jitter;
|
|
123
|
+
ctx._adaptiveReconnect.resetAfterMs = resolved.resetAfterMs;
|
|
109
124
|
}
|
|
110
125
|
return ctx._adaptiveReconnect;
|
|
111
126
|
}
|
|
@@ -388,7 +403,7 @@ function listenMqtt(defaultFuncs, api, ctx, globalCallback) {
|
|
|
388
403
|
fetchSeqID(defaultFuncs, api, ctx, (err) => {
|
|
389
404
|
if (err) {
|
|
390
405
|
log.warn("listenMqtt", "Failed to refresh SeqID on error, falling back to adaptive reconnect...");
|
|
391
|
-
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback);
|
|
406
|
+
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback, 'seqid-refresh-failed');
|
|
392
407
|
} else {
|
|
393
408
|
listenMqtt(defaultFuncs, api, ctx, globalCallback);
|
|
394
409
|
}
|
|
@@ -418,7 +433,15 @@ function listenMqtt(defaultFuncs, api, ctx, globalCallback) {
|
|
|
418
433
|
|
|
419
434
|
if (!ctx.loggedIn) return; // avoid loops if logged out
|
|
420
435
|
if (ctx.globalOptions.autoReconnect) {
|
|
421
|
-
|
|
436
|
+
const backoffState = getBackoffState(ctx);
|
|
437
|
+
const resetThreshold = backoffState.resetAfterMs || (3 * 60 * 1000);
|
|
438
|
+
let reconnectReason = 'close';
|
|
439
|
+
if (duration >= resetThreshold) {
|
|
440
|
+
log.info('listenMqtt', `Resetting MQTT backoff after ${seconds}s healthy session.`);
|
|
441
|
+
resetBackoff(backoffState);
|
|
442
|
+
reconnectReason = 'close-long';
|
|
443
|
+
}
|
|
444
|
+
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback, reconnectReason);
|
|
422
445
|
}
|
|
423
446
|
});
|
|
424
447
|
|
|
@@ -439,7 +462,15 @@ function listenMqtt(defaultFuncs, api, ctx, globalCallback) {
|
|
|
439
462
|
|
|
440
463
|
if (!ctx.loggedIn) return;
|
|
441
464
|
if (ctx.globalOptions.autoReconnect) {
|
|
442
|
-
|
|
465
|
+
const backoffState = getBackoffState(ctx);
|
|
466
|
+
const resetThreshold = backoffState.resetAfterMs || (3 * 60 * 1000);
|
|
467
|
+
let reconnectReason = 'disconnect';
|
|
468
|
+
if (duration >= resetThreshold) {
|
|
469
|
+
log.info('listenMqtt', `Resetting MQTT backoff after ${seconds}s healthy session.`);
|
|
470
|
+
resetBackoff(backoffState);
|
|
471
|
+
reconnectReason = 'disconnect-long';
|
|
472
|
+
}
|
|
473
|
+
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback, reconnectReason);
|
|
443
474
|
}
|
|
444
475
|
});
|
|
445
476
|
|
|
@@ -522,7 +553,7 @@ function listenMqtt(defaultFuncs, api, ctx, globalCallback) {
|
|
|
522
553
|
const rTimeout = setTimeout(function () {
|
|
523
554
|
ctx.health.onError('timeout_no_t_ms');
|
|
524
555
|
mqttClient.end();
|
|
525
|
-
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback);
|
|
556
|
+
scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback, 'tms-timeout');
|
|
526
557
|
}, 5000);
|
|
527
558
|
ctx.tmsWait = function () {
|
|
528
559
|
clearTimeout(rTimeout);
|
|
@@ -619,11 +650,12 @@ function listenMqtt(defaultFuncs, api, ctx, globalCallback) {
|
|
|
619
650
|
}, 55000 + Math.floor(Math.random()*20000));
|
|
620
651
|
}
|
|
621
652
|
}
|
|
622
|
-
function scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback){
|
|
653
|
+
function scheduleAdaptiveReconnect(defaultFuncs, api, ctx, globalCallback, reason){
|
|
623
654
|
const state = getBackoffState(ctx);
|
|
624
655
|
const delay = computeNextDelay(state);
|
|
625
656
|
ctx.health.onReconnectScheduled(delay);
|
|
626
|
-
|
|
657
|
+
const suffix = reason ? ` (${reason})` : '';
|
|
658
|
+
log.warn('listenMqtt', `Reconnecting in ${delay} ms (adaptive backoff)${suffix}`);
|
|
627
659
|
setTimeout(()=>listenMqtt(defaultFuncs, api, ctx, globalCallback), delay);
|
|
628
660
|
}
|
|
629
661
|
function getTaskResponseData(taskType, payload) {
|