nodejs-poolcontroller 7.6.1 → 7.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.json +44 -44
- package/.github/ISSUE_TEMPLATE/1-bug-report.yml +84 -0
- package/.github/ISSUE_TEMPLATE/2-docs.md +12 -0
- package/.github/ISSUE_TEMPLATE/3-proposal.md +28 -0
- package/.github/ISSUE_TEMPLATE/config.yml +8 -0
- package/CONTRIBUTING.md +74 -74
- package/Changelog +220 -215
- package/Dockerfile +17 -17
- package/Gruntfile.js +40 -40
- package/LICENSE +661 -661
- package/README.md +191 -191
- package/app.ts +1 -1
- package/config/Config.ts +14 -0
- package/config/VersionCheck.ts +2 -2
- package/controller/Constants.ts +2 -1
- package/controller/Equipment.ts +2484 -2459
- package/controller/Errors.ts +180 -180
- package/controller/Lockouts.ts +502 -436
- package/controller/State.ts +106 -30
- package/controller/boards/AquaLinkBoard.ts +1000 -0
- package/controller/boards/BoardFactory.ts +49 -45
- package/controller/boards/EasyTouchBoard.ts +2859 -2653
- package/controller/boards/IntelliCenterBoard.ts +4198 -4230
- package/controller/boards/IntelliComBoard.ts +63 -63
- package/controller/boards/IntelliTouchBoard.ts +273 -241
- package/controller/boards/NixieBoard.ts +1728 -1675
- package/controller/boards/SystemBoard.ts +4925 -4697
- package/controller/comms/Comms.ts +442 -479
- package/controller/comms/messages/Messages.ts +171 -25
- package/controller/comms/messages/config/ChlorinatorMessage.ts +5 -2
- package/controller/comms/messages/config/CircuitGroupMessage.ts +0 -0
- package/controller/comms/messages/config/CircuitMessage.ts +1 -0
- package/controller/comms/messages/config/ConfigMessage.ts +0 -0
- package/controller/comms/messages/config/CoverMessage.ts +0 -0
- package/controller/comms/messages/config/CustomNameMessage.ts +30 -30
- package/controller/comms/messages/config/EquipmentMessage.ts +0 -0
- package/controller/comms/messages/config/ExternalMessage.ts +0 -0
- package/controller/comms/messages/config/FeatureMessage.ts +0 -0
- package/controller/comms/messages/config/GeneralMessage.ts +0 -0
- package/controller/comms/messages/config/HeaterMessage.ts +142 -10
- package/controller/comms/messages/config/IntellichemMessage.ts +0 -0
- package/controller/comms/messages/config/OptionsMessage.ts +4 -21
- package/controller/comms/messages/config/PumpMessage.ts +53 -35
- package/controller/comms/messages/config/RemoteMessage.ts +0 -0
- package/controller/comms/messages/config/ScheduleMessage.ts +350 -347
- package/controller/comms/messages/config/SecurityMessage.ts +0 -0
- package/controller/comms/messages/config/ValveMessage.ts +1 -1
- package/controller/comms/messages/status/ChlorinatorStateMessage.ts +38 -86
- package/controller/comms/messages/status/EquipmentStateMessage.ts +58 -22
- package/controller/comms/messages/status/HeaterStateMessage.ts +116 -86
- package/controller/comms/messages/status/IntelliChemStateMessage.ts +445 -445
- package/controller/comms/messages/status/IntelliValveStateMessage.ts +35 -35
- package/controller/comms/messages/status/PumpStateMessage.ts +23 -1
- package/controller/comms/messages/status/VersionMessage.ts +0 -0
- package/controller/nixie/Nixie.ts +162 -162
- package/controller/nixie/NixieEquipment.ts +103 -103
- package/controller/nixie/bodies/Body.ts +120 -120
- package/controller/nixie/bodies/Filter.ts +135 -135
- package/controller/nixie/chemistry/ChemController.ts +2511 -2498
- package/controller/nixie/chemistry/Chlorinator.ts +363 -314
- package/controller/nixie/circuits/Circuit.ts +261 -248
- package/controller/nixie/heaters/Heater.ts +650 -648
- package/controller/nixie/pumps/Pump.ts +906 -661
- package/controller/nixie/schedules/Schedule.ts +313 -257
- package/controller/nixie/valves/Valve.ts +170 -170
- package/defaultConfig.json +306 -286
- package/logger/DataLogger.ts +448 -448
- package/logger/Logger.ts +0 -0
- package/package.json +56 -56
- package/tsconfig.json +25 -25
- package/web/Server.ts +92 -47
- package/web/bindings/aqualinkD.json +505 -0
- package/web/bindings/influxDB.json +1051 -1021
- package/web/bindings/mqtt.json +702 -654
- package/web/bindings/mqttAlt.json +731 -684
- package/web/bindings/rulesManager.json +54 -54
- package/web/bindings/smartThings-Hubitat.json +31 -31
- package/web/bindings/valveRelays.json +20 -20
- package/web/bindings/vera.json +25 -25
- package/web/interfaces/baseInterface.ts +137 -136
- package/web/interfaces/httpInterface.ts +145 -124
- package/web/interfaces/influxInterface.ts +276 -245
- package/web/interfaces/mqttInterface.ts +535 -475
- package/web/services/config/Config.ts +39 -18
- package/web/services/config/ConfigSocket.ts +0 -0
- package/web/services/state/State.ts +10 -0
- package/web/services/state/StateSocket.ts +4 -4
- package/web/services/utilities/Utilities.ts +44 -42
- package/.github/ISSUE_TEMPLATE/bug_report.md +0 -52
- package/config copy.json +0 -300
- package/issue_template.md +0 -52
|
@@ -22,39 +22,67 @@ import { logger } from '../../logger/Logger';
|
|
|
22
22
|
import * as net from 'net';
|
|
23
23
|
import { setTimeout, setInterval } from 'timers';
|
|
24
24
|
import { Message, Outbound, Inbound, Response } from './messages/Messages';
|
|
25
|
-
import { InvalidOperationError, MessageError, OutboundMessageError } from '../Errors';
|
|
25
|
+
import { InvalidEquipmentDataError, InvalidOperationError, MessageError, OutboundMessageError } from '../Errors';
|
|
26
26
|
import { utils } from "../Constants";
|
|
27
|
+
import { sys } from "../Equipment";
|
|
27
28
|
import { webApp } from "../../web/Server";
|
|
28
29
|
const extend = require("extend");
|
|
29
30
|
export class Connection {
|
|
30
|
-
constructor() {
|
|
31
|
-
|
|
31
|
+
constructor() {}
|
|
32
|
+
public rs485Ports: RS485Port[] = [];
|
|
33
|
+
public get mockPort(): boolean {
|
|
34
|
+
let port = this.findPortById(0);
|
|
35
|
+
return typeof port !== 'undefined' && port.mockPort ? true : false;
|
|
32
36
|
}
|
|
33
|
-
public
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
private _port: any;
|
|
37
|
-
public mockPort: boolean = false;
|
|
38
|
-
private isPaused: boolean = false;
|
|
39
|
-
public buffer: SendRecieveBuffer;
|
|
40
|
-
private connTimer: NodeJS.Timeout;
|
|
41
|
-
protected resetConnTimer(...args) {
|
|
42
|
-
//console.log(`resetting connection timer`);
|
|
43
|
-
if (conn.connTimer !== null) clearTimeout(conn.connTimer);
|
|
44
|
-
if (!conn._cfg.mockPort && conn._cfg.inactivityRetry > 0 && !conn._closing) conn.connTimer = setTimeout(async () => {
|
|
45
|
-
try {
|
|
46
|
-
await conn.openAsync();
|
|
47
|
-
}
|
|
48
|
-
catch (err) { logger.error(`Error resetting RS485 port on inactivity: ${err.message}`); };
|
|
49
|
-
}, conn._cfg.inactivityRetry * 1000);
|
|
37
|
+
public isPortEnabled(portId: number) {
|
|
38
|
+
let port: RS485Port = this.findPortById(portId);
|
|
39
|
+
return typeof port === 'undefined' ? false : port.enabled;
|
|
50
40
|
}
|
|
51
|
-
public
|
|
52
|
-
public emitter: EventEmitter;
|
|
53
|
-
public get enabled(): boolean { return typeof this._cfg !== 'undefined' && this._cfg.enabled; }
|
|
54
|
-
public async setPortAsync(data: any) : Promise<any> {
|
|
41
|
+
public async deleteAuxPort(data: any): Promise<any> {
|
|
55
42
|
try {
|
|
43
|
+
let portId = parseInt(data.portId, 10);
|
|
44
|
+
if (isNaN(portId)) return Promise.reject(new InvalidEquipmentDataError(`A valid port id was not provided to be deleted`, 'RS485Port', data.id));
|
|
45
|
+
if (portId === 0) return Promise.reject(new InvalidEquipmentDataError(`You may not delete the primart RS485 Port`, 'RS485Port', data.id));
|
|
46
|
+
let port = this.findPortById(portId);
|
|
47
|
+
this.removePortById(portId);
|
|
48
|
+
let section = `controller.comms` + (portId === 0 ? '' : portId);
|
|
49
|
+
let cfg = config.getSection(section, {});
|
|
50
|
+
config.removeSection(section);
|
|
51
|
+
return cfg;
|
|
52
|
+
} catch (err) { logger.error(`Error deleting aux port`) }
|
|
53
|
+
}
|
|
54
|
+
public async setPortAsync(data: any): Promise<any> {
|
|
55
|
+
try {
|
|
56
|
+
let ccfg = config.getSection('controller');
|
|
57
|
+
let pConfig;
|
|
58
|
+
let portId;
|
|
59
|
+
let maxId = -1;
|
|
60
|
+
for (let sec in ccfg) {
|
|
61
|
+
if (sec.startsWith('comms')) {
|
|
62
|
+
let p = ccfg[sec];
|
|
63
|
+
maxId = Math.max(p.portId, maxId);
|
|
64
|
+
if (p.portId === data.portId) pConfig = p;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
if (typeof pConfig === 'undefined') {
|
|
68
|
+
// We are adding a new one.
|
|
69
|
+
if (data.portId === -1 || typeof data.portId === 'undefined') portId = maxId + 1;
|
|
70
|
+
else portId = data.portId;
|
|
71
|
+
}
|
|
72
|
+
else portId = pConfig.portId;
|
|
73
|
+
if (isNaN(portId) || portId < 0) return Promise.reject(new InvalidEquipmentDataError(`Invalid port id defined ${portId}`, 'RS485Port', data.portId));
|
|
74
|
+
let section = `controller.comms` + (portId === 0 ? '' : portId);
|
|
56
75
|
// Lets set the config data.
|
|
57
|
-
let pdata = config.getSection(
|
|
76
|
+
let pdata = config.getSection(section, {
|
|
77
|
+
portId: portId,
|
|
78
|
+
rs485Port: "/dev/ttyUSB0",
|
|
79
|
+
portSettings: { baudRate: 9600, dataBits: 8, parity: 'none', stopBits: 1, flowControl: false, autoOpen: false, lock: false },
|
|
80
|
+
mockPort: false,
|
|
81
|
+
netConnect: false,
|
|
82
|
+
netHost: "raspberrypi",
|
|
83
|
+
netPort: 9801,
|
|
84
|
+
inactivityRetry: 10
|
|
85
|
+
});
|
|
58
86
|
pdata.enabled = typeof data.enabled !== 'undefined' ? utils.makeBool(data.enabled) : utils.makeBool(pdata.enabled);
|
|
59
87
|
pdata.netConnect = typeof data.netConnect !== 'undefined' ? utils.makeBool(data.netConnect) : utils.makeBool(pdata.netConnect);
|
|
60
88
|
pdata.rs485Port = typeof data.rs485Port !== 'undefined' ? data.rs485Port : pdata.rs485Port;
|
|
@@ -63,11 +91,17 @@ export class Connection {
|
|
|
63
91
|
pdata.netHost = typeof data.netHost !== 'undefined' ? data.netHost : pdata.netHost;
|
|
64
92
|
pdata.netPort = typeof data.netPort === 'number' ? data.netPort : pdata.netPort;
|
|
65
93
|
}
|
|
66
|
-
if (
|
|
67
|
-
|
|
94
|
+
if (typeof data.portSettings !== 'undefined') {
|
|
95
|
+
pdata.portSettings = extend(true, { baudRate: 9600, dataBits: 8, parity: 'none', stopBits: 1, flowControl: false, autoOpen: false, lock: false }, pdata.portSettings, data.portSettings);
|
|
68
96
|
}
|
|
69
|
-
|
|
70
|
-
|
|
97
|
+
let existing = this.findPortById(portId);
|
|
98
|
+
if (typeof existing !== 'undefined') {
|
|
99
|
+
if (!await existing.closeAsync()) {
|
|
100
|
+
return Promise.reject(new InvalidOperationError(`Unable to close the current RS485 port`, 'setPortAsync'));
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
config.setSection(section, pdata);
|
|
104
|
+
let cfg = config.getSection(section, {
|
|
71
105
|
rs485Port: "/dev/ttyUSB0",
|
|
72
106
|
portSettings: { baudRate: 9600, dataBits: 8, parity: 'none', stopBits: 1, flowControl: false, autoOpen: false, lock: false },
|
|
73
107
|
mockPort: false,
|
|
@@ -76,164 +110,205 @@ export class Connection {
|
|
|
76
110
|
netPort: 9801,
|
|
77
111
|
inactivityRetry: 10
|
|
78
112
|
});
|
|
79
|
-
|
|
80
|
-
|
|
113
|
+
existing = this.getPortById(cfg);
|
|
114
|
+
if (typeof existing !== 'undefined') {
|
|
115
|
+
existing.reconnects = 0;
|
|
116
|
+
if (!await existing.openAsync(cfg)) {
|
|
117
|
+
return Promise.reject(new InvalidOperationError(`Unable to open RS485 port ${pdata.rs485Port}`, 'setPortAsync'));
|
|
118
|
+
}
|
|
81
119
|
}
|
|
82
|
-
return
|
|
120
|
+
return cfg;
|
|
83
121
|
} catch (err) { return Promise.reject(err); }
|
|
84
122
|
}
|
|
85
|
-
|
|
86
|
-
// are issues related to the event listeners and the construction of a socket. We want an implementation that awaits until the socket
|
|
87
|
-
// is completely open before continuing.
|
|
88
|
-
// We also need to be able to destroy the port without it restarting on its own when tearing the port down or deliberately closing it. This means
|
|
89
|
-
// that the listeners need to be removed before closing via method but re-open the port when the close is hit prematurely.
|
|
90
|
-
protected async openNetSerialPort(): Promise<boolean> {
|
|
123
|
+
public async stopAsync() {
|
|
91
124
|
try {
|
|
92
|
-
let
|
|
93
|
-
|
|
94
|
-
port
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
let ret = await new Promise<boolean>((resolve, _) => {
|
|
99
|
-
let nc = net.createConnection(opts, () => {
|
|
100
|
-
nc.on('connect', () => { logger.info(`Net connect (socat) connected to: ${this._cfg.netHost}:${this._cfg.netPort}`); }); // Socket is opened but not yet ready.
|
|
101
|
-
nc.on('ready', () => {
|
|
102
|
-
this.isOpen = true;
|
|
103
|
-
this.isRTS = true;
|
|
104
|
-
logger.info(`Net connect (socat) ready and communicating: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
105
|
-
nc.on('data', (data) => {
|
|
106
|
-
if (data.length > 0 && !this.isPaused) this.emitter.emit('packetread', data);
|
|
107
|
-
});
|
|
108
|
-
this._port = nc;
|
|
109
|
-
// After the port is fully opened, set an inactivity timeout to restart it when it stops communicating.
|
|
110
|
-
nc.setTimeout(Math.max(this._cfg.inactivityRetry, 10) * 1000, async () => {
|
|
111
|
-
logger.warn(`Net connect (socat) connection idle: ${this._cfg.netHost}:${this._cfg.netPort} retrying connection.`);
|
|
112
|
-
try {
|
|
113
|
-
await conn.endAsync();
|
|
114
|
-
await conn.openAsync();
|
|
115
|
-
} catch (err) { logger.error(`Net connect (socat) error retrying connection ${err.message}`); }
|
|
116
|
-
});
|
|
117
|
-
resolve(true);
|
|
118
|
-
});
|
|
119
|
-
nc.on('close', (hadError: boolean) => {
|
|
120
|
-
this.isOpen = false;
|
|
121
|
-
if (typeof this._port !== 'undefined') {
|
|
122
|
-
this._port.destroy();
|
|
123
|
-
this._port.removeAllListeners();
|
|
124
|
-
}
|
|
125
|
-
this._port = undefined;
|
|
126
|
-
this.buffer.clearOutbound();
|
|
127
|
-
logger.info(`Net connect (socat) closed ${hadError === true ? 'due to error' : ''}: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
128
|
-
if (!this._closing) {
|
|
129
|
-
// If we are closing manually this event should have been cleared already and should never be called. If this is fired out
|
|
130
|
-
// of sequence then we will check the closing flag to ensure we are not forcibly closing the socket.
|
|
131
|
-
if (typeof this.connTimer !== 'undefined' && this.connTimer) {
|
|
132
|
-
clearTimeout(this.connTimer);
|
|
133
|
-
this.connTimer = null;
|
|
134
|
-
}
|
|
135
|
-
this.connTimer = setTimeout(async () => {
|
|
136
|
-
try {
|
|
137
|
-
// We are already closed so give some inactivity retry and try again.
|
|
138
|
-
await conn.openAsync();
|
|
139
|
-
} catch (err) { }
|
|
140
|
-
}, this._cfg.inactivityRetry * 1000);
|
|
141
|
-
}
|
|
142
|
-
});
|
|
143
|
-
nc.on('end', () => { // Happens when the other end of the socket closes.
|
|
144
|
-
this.isOpen = false;
|
|
145
|
-
logger.info(`Net connect (socat) end event was fired`);
|
|
146
|
-
});
|
|
147
|
-
});
|
|
148
|
-
nc.once('error', (err) => {
|
|
149
|
-
// if the promise has already been fulfilled, but the error happens later, we don't want to call the promise again.
|
|
150
|
-
if (this._cfg.inactivityRetry > 0) {
|
|
151
|
-
logger.error(`Net connect (socat) connection error: ${err}. Retry in ${this._cfg.inactivityRetry} seconds`);
|
|
152
|
-
this.connTimer = setTimeout(async () => {
|
|
153
|
-
try {
|
|
154
|
-
await conn.closeAsync();
|
|
155
|
-
await conn.openAsync();
|
|
156
|
-
} catch (err) { }
|
|
157
|
-
}, this._cfg.inactivityRetry * 1000);
|
|
158
|
-
}
|
|
159
|
-
else logger.error(`Net connect (socat) connection error: ${err}. Never retrying -- No retry time set`);
|
|
160
|
-
resolve(false);
|
|
161
|
-
});
|
|
162
|
-
});
|
|
163
|
-
return ret;
|
|
164
|
-
} catch (err) { logger.error(`Error opening net serial port. ${this._cfg.netHost}:${this._cfg.netPort}`); }
|
|
125
|
+
for (let i = this.rs485Ports.length - 1; i >= 0; i--) {
|
|
126
|
+
let port = this.rs485Ports[i];
|
|
127
|
+
await port.closeAsync();
|
|
128
|
+
}
|
|
129
|
+
logger.info(`Closed all serial communications connection.`);
|
|
130
|
+
} catch (err) { logger.error(`Error closing comms connection: ${err.message} `); }
|
|
165
131
|
}
|
|
166
|
-
|
|
132
|
+
public async initAsync() {
|
|
167
133
|
try {
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
if (err) {
|
|
177
|
-
logger.error(`Net connect (socat) error closing ${this._cfg.netHost}:${this._cfg.netPort}: ${err}`);
|
|
178
|
-
resolve(false);
|
|
179
|
-
}
|
|
180
|
-
else {
|
|
181
|
-
conn._port = undefined;
|
|
182
|
-
this.isOpen = false;
|
|
183
|
-
logger.info(`Successfully closed (socat) port ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
184
|
-
resolve(true);
|
|
185
|
-
}
|
|
186
|
-
});
|
|
187
|
-
this._port.once('close', (p) => {
|
|
188
|
-
this.isOpen = false;
|
|
189
|
-
this._port = undefined;
|
|
190
|
-
logger.info(`Net connect (socat) successfully closed: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
191
|
-
resolve(true);
|
|
192
|
-
});
|
|
193
|
-
this._port.destroy();
|
|
194
|
-
}
|
|
195
|
-
else {
|
|
196
|
-
resolve(true);
|
|
197
|
-
conn._port = undefined;
|
|
198
|
-
}
|
|
199
|
-
});
|
|
200
|
-
if (success) {
|
|
201
|
-
if (typeof conn.buffer !== 'undefined') conn.buffer.close();
|
|
134
|
+
// So now that we are now allowing multiple comm ports we need to initialize each one. We are keeping the comms section from the config.json
|
|
135
|
+
// simply because I have no idea what the Docker folks do with this. So the default comms will be the one with an OCP or if there are no aux ports.
|
|
136
|
+
let cfg = config.getSection('controller');
|
|
137
|
+
for (let section in cfg) {
|
|
138
|
+
if (section.startsWith('comms')) {
|
|
139
|
+
let port = new RS485Port(cfg[section]);
|
|
140
|
+
this.rs485Ports.push(port);
|
|
141
|
+
await port.openAsync();
|
|
202
142
|
}
|
|
203
|
-
return success;
|
|
204
143
|
}
|
|
205
|
-
|
|
206
|
-
|
|
144
|
+
} catch (err) { logger.error(`Error initializing RS485 ports ${err.message}`); }
|
|
145
|
+
}
|
|
146
|
+
public findPortById(portId?: number): RS485Port { return this.rs485Ports.find(elem => elem.portId === (portId || 0)); }
|
|
147
|
+
public async removePortById(portId: number) {
|
|
148
|
+
for (let i = this.rs485Ports.length - 1; i >= 0; i--) {
|
|
149
|
+
let port = this.rs485Ports[i];
|
|
150
|
+
if (port.portId === portId) {
|
|
151
|
+
await port.closeAsync();
|
|
152
|
+
// Don't remove the primary port. You cannot delete this one.
|
|
153
|
+
if(portId !== 0) this.rs485Ports.splice(i, 1);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
public getPortById(cfg: any) {
|
|
158
|
+
let port = this.findPortById(cfg.portId || 0);
|
|
159
|
+
if (typeof port === 'undefined') {
|
|
160
|
+
port = new RS485Port(cfg);
|
|
161
|
+
this.rs485Ports.push(port);
|
|
162
|
+
}
|
|
163
|
+
return port;
|
|
164
|
+
}
|
|
165
|
+
public async listInstalledPorts(): Promise<any> {
|
|
166
|
+
try {
|
|
167
|
+
let ports = [];
|
|
168
|
+
// So now that we are now allowing multiple comm ports we need to initialize each one. We are keeping the comms section from the config.json
|
|
169
|
+
// simply because I have no idea what the Docker folks do with this. So the default comms will be the one with an OCP or if there are no aux ports.
|
|
170
|
+
let cfg = config.getSection('controller');
|
|
171
|
+
for (let section in cfg) {
|
|
172
|
+
if (section.startsWith('comms')) {
|
|
173
|
+
let port = config.getSection(`controller.${section}`);
|
|
174
|
+
if (port.portId === 0) port.name = 'Primary';
|
|
175
|
+
else port.name = `Aux${port.portId}`;
|
|
176
|
+
let p = this.findPortById(port.portId);
|
|
177
|
+
port.isOpen = typeof p !== 'undefined' ? p.isOpen : false;
|
|
178
|
+
ports.push(port);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
return ports;
|
|
182
|
+
} catch (err) { logger.error(`Error listing installed RS485 ports ${err.message}`); }
|
|
207
183
|
|
|
208
184
|
}
|
|
209
|
-
public
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
185
|
+
public queueSendMessage(msg: Outbound) {
|
|
186
|
+
let port = this.findPortById(msg.portId);
|
|
187
|
+
if (typeof port !== 'undefined')
|
|
188
|
+
port.emitter.emit('messagewrite', msg);
|
|
189
|
+
else
|
|
190
|
+
logger.error(`queueSendMessage: Message was targeted for undefined port ${msg.portId || 0}`);
|
|
191
|
+
}
|
|
192
|
+
public pauseAll() {
|
|
193
|
+
for (let i = 0; i < this.rs485Ports.length; i++) {
|
|
194
|
+
let port = this.rs485Ports[i];
|
|
195
|
+
port.pause();
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
public resumeAll() {
|
|
199
|
+
for (let i = 0; i < this.rs485Ports.length; i++) {
|
|
200
|
+
let port = this.rs485Ports[i];
|
|
201
|
+
port.resume();
|
|
214
202
|
}
|
|
203
|
+
}
|
|
204
|
+
public async getLocalPortsAsync(): Promise<any> {
|
|
205
|
+
try {
|
|
206
|
+
return await SerialPort.list();
|
|
207
|
+
} catch (err) { logger.error(`Error retrieving local ports ${err.message}`); }
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
export class Counter {
|
|
211
|
+
constructor() {
|
|
212
|
+
this.bytesReceived = 0;
|
|
213
|
+
this.recSuccess = 0;
|
|
214
|
+
this.recFailed = 0;
|
|
215
|
+
this.recCollisions = 0;
|
|
216
|
+
this.bytesSent = 0;
|
|
217
|
+
this.sndAborted = 0;
|
|
218
|
+
this.sndRetries = 0;
|
|
219
|
+
this.sndSuccess = 0;
|
|
220
|
+
this.recFailureRate = 0;
|
|
221
|
+
this.sndFailureRate = 0;
|
|
222
|
+
this.recRewinds = 0;
|
|
223
|
+
}
|
|
224
|
+
public bytesReceived: number;
|
|
225
|
+
public bytesSent: number;
|
|
226
|
+
public recSuccess: number;
|
|
227
|
+
public recFailed: number;
|
|
228
|
+
public recCollisions: number;
|
|
229
|
+
public recFailureRate: number;
|
|
230
|
+
public sndSuccess: number;
|
|
231
|
+
public sndAborted: number;
|
|
232
|
+
public sndRetries: number;
|
|
233
|
+
public sndFailureRate: number;
|
|
234
|
+
public recRewinds: number;
|
|
235
|
+
public updatefailureRate(): void {
|
|
236
|
+
this.recFailureRate = (this.recFailed + this.recSuccess) !== 0 ? (this.recFailed / (this.recFailed + this.recSuccess) * 100) : 0;
|
|
237
|
+
this.sndFailureRate = (this.sndAborted + this.sndSuccess) !== 0 ? (this.sndAborted / (this.sndAborted + this.sndSuccess) * 100) : 0;
|
|
238
|
+
}
|
|
239
|
+
public toLog(): string {
|
|
240
|
+
return `{ "bytesReceived": ${this.bytesReceived} "success": ${this.recSuccess}, "failed": ${this.recFailed}, "bytesSent": ${this.bytesSent}, "collisions": ${this.recCollisions}, "failureRate": ${this.recFailureRate.toFixed(2)}% }`;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
// The following class allows njsPC to have multiple RS485 buses. Each port has its own buffer and message processor
|
|
244
|
+
// so that devices on the bus can be isolated to a particular port. By doing this the communications are such that multiple
|
|
245
|
+
// ports can be used to accommodate differing port speeds and fixed port addresses. If an
|
|
246
|
+
export class RS485Port {
|
|
247
|
+
constructor(cfg: any) {
|
|
248
|
+
this._cfg = cfg;
|
|
249
|
+
|
|
250
|
+
this.emitter = new EventEmitter();
|
|
251
|
+
this._inBuffer = [];
|
|
252
|
+
this._outBuffer = [];
|
|
253
|
+
this.procTimer = null;
|
|
254
|
+
this.emitter.on('messagewrite', (msg) => { this.pushOut(msg); });
|
|
255
|
+
}
|
|
256
|
+
public isRTS: boolean = true;
|
|
257
|
+
public reconnects:number = 0;
|
|
258
|
+
public emitter: EventEmitter;
|
|
259
|
+
public get portId() { return typeof this._cfg !== 'undefined' && typeof this._cfg.portId !== 'undefined' ? this._cfg.portId : 0; }
|
|
260
|
+
public isOpen: boolean = false;
|
|
261
|
+
private _closing: boolean = false;
|
|
262
|
+
private _cfg: any;
|
|
263
|
+
private _port: any;
|
|
264
|
+
public mockPort: boolean = false;
|
|
265
|
+
private isPaused: boolean = false;
|
|
266
|
+
private connTimer: NodeJS.Timeout;
|
|
267
|
+
//public buffer: SendRecieveBuffer;
|
|
268
|
+
public get enabled(): boolean { return typeof this._cfg !== 'undefined' && this._cfg.enabled; }
|
|
269
|
+
public counter: Counter = new Counter();
|
|
270
|
+
private procTimer: NodeJS.Timeout;
|
|
271
|
+
private _processing: boolean = false;
|
|
272
|
+
private _inBytes: number[] = [];
|
|
273
|
+
private _inBuffer: number[] = [];
|
|
274
|
+
private _outBuffer: Outbound[] = [];
|
|
275
|
+
private _waitingPacket: Outbound;
|
|
276
|
+
private _msg: Inbound;
|
|
277
|
+
// Connection management functions
|
|
278
|
+
public async openAsync(cfg?: any): Promise<boolean> {
|
|
279
|
+
if (this.isOpen) await this.closeAsync();
|
|
280
|
+
if (typeof cfg !== 'undefined') this._cfg = cfg;
|
|
281
|
+
if (!this._cfg.enabled) return true;
|
|
215
282
|
if (this._cfg.netConnect && !this._cfg.mockPort) {
|
|
216
|
-
|
|
283
|
+
let sock: net.Socket = this._port as net.Socket;
|
|
284
|
+
if (typeof this._port !== 'undefined' && this.isOpen) {
|
|
217
285
|
// This used to try to reconnect and recreate events even though the socket was already connected. This resulted in
|
|
218
|
-
// instances where multiple event processors were present.
|
|
219
|
-
|
|
286
|
+
// instances where multiple event processors were present. Node doesn't give us any indication that the socket is
|
|
287
|
+
// still viable or if it is closing from either end.
|
|
288
|
+
return true;
|
|
289
|
+
}
|
|
290
|
+
else if (typeof this._port !== 'undefined') {
|
|
291
|
+
// We need to kill the existing connection by ending it.
|
|
292
|
+
this._port.end();
|
|
220
293
|
}
|
|
221
294
|
let nc: net.Socket = new net.Socket();
|
|
222
|
-
nc.
|
|
223
|
-
nc.
|
|
295
|
+
nc.once('connect', () => { logger.info(`Net connect (socat) ${this._cfg.portId} connected to: ${this._cfg.netHost}:${this._cfg.netPort}`); }); // Socket is opened but not yet ready.
|
|
296
|
+
nc.once('ready', () => {
|
|
224
297
|
this.isOpen = true;
|
|
225
298
|
this.isRTS = true;
|
|
226
|
-
logger.info(`Net connect (socat) ready and communicating: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
299
|
+
logger.info(`Net connect (socat) ${this._cfg.portId} ready and communicating: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
227
300
|
nc.on('data', (data) => {
|
|
228
301
|
//this.resetConnTimer();
|
|
229
|
-
if (data.length > 0 && !this.isPaused) this.
|
|
302
|
+
if (data.length > 0 && !this.isPaused) this.pushIn(data);
|
|
230
303
|
});
|
|
304
|
+
this.emitPortStats();
|
|
231
305
|
});
|
|
232
|
-
nc.
|
|
306
|
+
nc.once('close', (p) => {
|
|
233
307
|
this.isOpen = false;
|
|
234
|
-
if (typeof this._port !== 'undefined') this._port.destroy();
|
|
308
|
+
if (typeof this._port !== 'undefined' && !this._port.destroyed) this._port.destroy();
|
|
235
309
|
this._port = undefined;
|
|
236
|
-
this.
|
|
310
|
+
this.clearOutboundBuffer();
|
|
311
|
+
this.emitPortStats();
|
|
237
312
|
if (!this._closing) {
|
|
238
313
|
// If we are closing manually this event should have been cleared already and should never be called. If this is fired out
|
|
239
314
|
// of sequence then we will check the closing flag to ensure we are not forcibly closing the socket.
|
|
@@ -244,16 +319,15 @@ export class Connection {
|
|
|
244
319
|
this.connTimer = setTimeout(async () => {
|
|
245
320
|
try {
|
|
246
321
|
// We are already closed so give some inactivity retry and try again.
|
|
247
|
-
await
|
|
322
|
+
await this.openAsync();
|
|
248
323
|
} catch (err) { }
|
|
249
324
|
}, this._cfg.inactivityRetry * 1000);
|
|
250
325
|
}
|
|
251
|
-
logger.info(`Net connect (socat) closed ${p === true ? 'due to error' : ''}: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
326
|
+
logger.info(`Net connect (socat) ${this._cfg.portId} closed ${p === true ? 'due to error' : ''}: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
252
327
|
});
|
|
253
328
|
nc.on('end', () => { // Happens when the other end of the socket closes.
|
|
254
329
|
this.isOpen = false;
|
|
255
|
-
|
|
256
|
-
logger.info(`Net connect (socat) end event was fired`);
|
|
330
|
+
logger.info(`Net connect (socat) ${this.portId} end event was fired`);
|
|
257
331
|
});
|
|
258
332
|
//nc.on('drain', () => { logger.info(`The drain event was fired.`); });
|
|
259
333
|
//nc.on('lookup', (o) => { logger.info(`The lookup event was fired ${o}`); });
|
|
@@ -261,13 +335,15 @@ export class Connection {
|
|
|
261
335
|
// left the connection in a weird state where the previous connection was processing events and the new connection was
|
|
262
336
|
// doing so as well. This isn't an error it is a warning as the RS485 bus will most likely be communicating at all times.
|
|
263
337
|
//nc.on('timeout', () => { logger.warn(`Net connect (socat) Connection Idle: ${this._cfg.netHost}:${this._cfg.netPort}`); });
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
338
|
+
if (this._cfg.inactivityRetry > 0) {
|
|
339
|
+
nc.setTimeout(Math.max(this._cfg.inactivityRetry, 10) * 1000, async () => {
|
|
340
|
+
logger.warn(`Net connect (socat) connection idle: ${this._cfg.netHost}:${this._cfg.netPort} retrying connection.`);
|
|
341
|
+
try {
|
|
342
|
+
await this.closeAsync();
|
|
343
|
+
await this.openAsync();
|
|
344
|
+
} catch (err) { logger.error(`Net connect (socat)$ {this.portId} error retrying connection ${err.message}`); }
|
|
345
|
+
});
|
|
346
|
+
}
|
|
271
347
|
|
|
272
348
|
return await new Promise<boolean>((resolve, _) => {
|
|
273
349
|
// We only connect an error once as we will destroy this connection on error then recreate a new socket on failure.
|
|
@@ -275,19 +351,21 @@ export class Connection {
|
|
|
275
351
|
//logger.error(`Net connect (socat) Connection: ${err}. ${this._cfg.inactivityRetry > 0 ? `Retry in ${this._cfg.inactivityRetry} seconds` : `Never retrying; inactivityRetry set to ${this._cfg.inactivityRetry}`}`);
|
|
276
352
|
//this.resetConnTimer();
|
|
277
353
|
this.isOpen = false;
|
|
354
|
+
this.emitPortStats();
|
|
278
355
|
// if the promise has already been fulfilled, but the error happens later, we don't want to call the promise again.
|
|
279
356
|
if (typeof resolve !== 'undefined') { resolve(false); }
|
|
280
357
|
if (this._cfg.inactivityRetry > 0) {
|
|
281
|
-
logger.error(`Net connect (socat) connection error: ${err}. Retry in ${this._cfg.inactivityRetry} seconds`);
|
|
282
|
-
setTimeout(async () => { try { await
|
|
358
|
+
logger.error(`Net connect (socat) connection ${this.portId} error: ${err}. Retry in ${this._cfg.inactivityRetry} seconds`);
|
|
359
|
+
setTimeout(async () => { try { await this.openAsync(); } catch (err) { } }, this._cfg.inactivityRetry * 1000);
|
|
283
360
|
}
|
|
284
|
-
else logger.error(`Net connect (socat) connection error: ${err}. Never retrying -- No retry time set`);
|
|
361
|
+
else logger.error(`Net connect (socat) connection ${this.portId} error: ${err}. Never retrying -- No retry time set`);
|
|
285
362
|
});
|
|
286
|
-
nc.connect(
|
|
287
|
-
if (typeof this._port !== 'undefined') logger.warn(
|
|
288
|
-
logger.info(`Net connect (socat) Connection connected`);
|
|
363
|
+
nc.connect(this._cfg.netPort, this._cfg.netHost, () => {
|
|
364
|
+
if (typeof this._port !== 'undefined') logger.warn(`Net connect (socat) ${this.portId} recovered from lost connection.`);
|
|
365
|
+
logger.info(`Net connect (socat) Connection ${this.portId} connected`);
|
|
289
366
|
this._port = nc;
|
|
290
367
|
this.isOpen = true;
|
|
368
|
+
this.emitPortStats();
|
|
291
369
|
resolve(true);
|
|
292
370
|
resolve = undefined;
|
|
293
371
|
});
|
|
@@ -296,9 +374,9 @@ export class Connection {
|
|
|
296
374
|
else {
|
|
297
375
|
if (typeof this._port !== 'undefined' && this._port.isOpen) {
|
|
298
376
|
// This used to try to reconnect even though the serial port was already connected. This resulted in
|
|
299
|
-
// instances where an access denied error was emitted.
|
|
377
|
+
// instances where an access denied error was emitted. So if the port is open we will simply return.
|
|
300
378
|
this.resetConnTimer();
|
|
301
|
-
return
|
|
379
|
+
return true;
|
|
302
380
|
}
|
|
303
381
|
let sp: SerialPort = null;
|
|
304
382
|
if (this._cfg.mockPort) {
|
|
@@ -310,9 +388,9 @@ export class Connection {
|
|
|
310
388
|
}
|
|
311
389
|
else {
|
|
312
390
|
this.mockPort = false;
|
|
313
|
-
sp = new SerialPort(
|
|
391
|
+
sp = new SerialPort(this._cfg.rs485Port, this._cfg.portSettings);
|
|
314
392
|
}
|
|
315
|
-
return new Promise<boolean>((resolve, _) => {
|
|
393
|
+
return await new Promise<boolean>((resolve, _) => {
|
|
316
394
|
// The serial port open method calls the callback just once. Unfortunately that is not the case for
|
|
317
395
|
// network serial port connections. There really isn't a way to make it syncronous. The openAsync will truly
|
|
318
396
|
// be open if a hardware interface is used and this method returns.
|
|
@@ -320,7 +398,7 @@ export class Connection {
|
|
|
320
398
|
if (err) {
|
|
321
399
|
this.resetConnTimer();
|
|
322
400
|
this.isOpen = false;
|
|
323
|
-
logger.error(`Error opening port: ${err.message}. ${this._cfg.inactivityRetry > 0 ? `Retry in ${this._cfg.inactivityRetry} seconds` : `Never retrying; inactivityRetry set to ${this._cfg.inactivityRetry}`}`);
|
|
401
|
+
logger.error(`Error opening port ${this.portId}: ${err.message}. ${this._cfg.inactivityRetry > 0 ? `Retry in ${this._cfg.inactivityRetry} seconds` : `Never retrying; inactivityRetry set to ${this._cfg.inactivityRetry}`}`);
|
|
324
402
|
resolve(false);
|
|
325
403
|
}
|
|
326
404
|
else resolve(true);
|
|
@@ -330,28 +408,31 @@ export class Connection {
|
|
|
330
408
|
// won't be called until long after the promise is resolved above. Yes we should never reject this promise. The resolution is true
|
|
331
409
|
// for a successul connect and false otherwise.
|
|
332
410
|
sp.on('open', () => {
|
|
333
|
-
if (typeof
|
|
334
|
-
else logger.info(`Serial port: ${this._cfg.rs485Port} request to open successful`);
|
|
411
|
+
if (typeof this._port !== 'undefined') logger.info(`Serial Port ${this.portId}: ${this._cfg.rs485Port} recovered from lost connection.`)
|
|
412
|
+
else logger.info(`Serial port: ${this._cfg.rs485Port} request to open successful ${this._cfg.portSettings.baudRate}b ${this._cfg.portSettings.dataBits}-${this._cfg.portSettings.parity}-${this._cfg.portSettings.stopBits}`);
|
|
335
413
|
this._port = sp;
|
|
336
414
|
this.isOpen = true;
|
|
337
|
-
sp.on('data', (data) => { if (!this.mockPort && !this.isPaused) this.
|
|
415
|
+
sp.on('data', (data) => { if (!this.mockPort && !this.isPaused) this.resetConnTimer(); this.pushIn(data); });
|
|
338
416
|
this.resetConnTimer();
|
|
417
|
+
this.emitPortStats();
|
|
339
418
|
});
|
|
340
419
|
sp.on('close', (err) => {
|
|
341
420
|
this.isOpen = false;
|
|
342
|
-
logger.info(`Serial Port has been closed: ${err ? JSON.stringify(err) : ''}`);
|
|
421
|
+
logger.info(`Serial Port ${this.portId} has been closed ${this.portId}: ${err ? JSON.stringify(err) : ''}`);
|
|
343
422
|
});
|
|
344
423
|
sp.on('error', (err) => {
|
|
345
424
|
this.isOpen = false;
|
|
346
425
|
if (sp.isOpen) sp.close((err) => { }); // call this with the error callback so that it doesn't emit to the error again.
|
|
347
426
|
this.resetConnTimer();
|
|
348
|
-
logger.error(`Serial Port: An error occurred : ${this._cfg.rs485Port}: ${JSON.stringify(err)}`);
|
|
427
|
+
logger.error(`Serial Port ${this.portId}: An error occurred : ${this._cfg.rs485Port}: ${JSON.stringify(err)}`);
|
|
428
|
+
this.emitPortStats();
|
|
349
429
|
});
|
|
350
430
|
});
|
|
351
431
|
}
|
|
352
432
|
}
|
|
353
433
|
public async closeAsync(): Promise<boolean> {
|
|
354
434
|
try {
|
|
435
|
+
if (this._closing) return false;
|
|
355
436
|
this._closing = true;
|
|
356
437
|
if (this.connTimer) clearTimeout(this.connTimer);
|
|
357
438
|
if (typeof this._port !== 'undefined' && this.isOpen) {
|
|
@@ -360,33 +441,39 @@ export class Connection {
|
|
|
360
441
|
this._port.removeAllListeners();
|
|
361
442
|
this._port.once('error', (err) => {
|
|
362
443
|
if (err) {
|
|
363
|
-
logger.error(`Error closing ${this._cfg.netHost}
|
|
444
|
+
logger.error(`Error closing ${this.portId} ${ this._cfg.netHost }: ${ this._cfg.netPort } / ${ this._cfg.rs485Port }: ${ err }`);
|
|
364
445
|
resolve(false);
|
|
365
446
|
}
|
|
366
447
|
else {
|
|
367
|
-
|
|
448
|
+
this._port = undefined;
|
|
368
449
|
this.isOpen = false;
|
|
369
|
-
logger.info(`Successfully closed (socat) port ${this._cfg.netHost}:${this._cfg.netPort}
|
|
450
|
+
logger.info(`Successfully closed (socat) ${this.portId} port ${this._cfg.netHost}:${this._cfg.netPort} / ${this._cfg.rs485Port}`);
|
|
370
451
|
resolve(true);
|
|
371
452
|
}
|
|
372
453
|
});
|
|
454
|
+
this._port.once('end', () => {
|
|
455
|
+
logger.info(`Net connect (socat) ${this.portId} closing: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
456
|
+
});
|
|
373
457
|
this._port.once('close', (p) => {
|
|
374
458
|
this.isOpen = false;
|
|
375
459
|
this._port = undefined;
|
|
376
|
-
logger.info(`Net connect (socat) successfully closed: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
460
|
+
logger.info(`Net connect (socat) ${this.portId} successfully closed: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
377
461
|
resolve(true);
|
|
378
462
|
});
|
|
463
|
+
logger.info(`Net connect (socat) ${this.portId} request close: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
464
|
+
// Unfortunately the end call does not actually work in node. It will simply not return anything so we are going to
|
|
465
|
+
// just call destroy and forcibly close it.
|
|
379
466
|
this._port.destroy();
|
|
380
467
|
}
|
|
381
|
-
else if (typeof
|
|
382
|
-
|
|
468
|
+
else if (typeof this._port.close === 'function') {
|
|
469
|
+
this._port.close((err) => {
|
|
383
470
|
if (err) {
|
|
384
|
-
logger.error(`Error closing ${this._cfg.rs485Port}: ${err}`);
|
|
471
|
+
logger.error(`Error closing ${this.portId} serial port ${this._cfg.rs485Port}: ${err}`);
|
|
385
472
|
resolve(false);
|
|
386
473
|
}
|
|
387
474
|
else {
|
|
388
|
-
|
|
389
|
-
logger.info(`Successfully closed
|
|
475
|
+
this._port = undefined;
|
|
476
|
+
logger.info(`Successfully closed ${this.portId} serial port ${this._cfg.rs485Port}`);
|
|
390
477
|
resolve(true);
|
|
391
478
|
this.isOpen = false;
|
|
392
479
|
}
|
|
@@ -394,269 +481,167 @@ export class Connection {
|
|
|
394
481
|
}
|
|
395
482
|
else {
|
|
396
483
|
resolve(true);
|
|
397
|
-
|
|
484
|
+
this._port = undefined;
|
|
398
485
|
}
|
|
399
486
|
});
|
|
400
|
-
if (success) {
|
|
401
|
-
if (typeof conn.buffer !== 'undefined') conn.buffer.close();
|
|
402
|
-
}
|
|
487
|
+
if (success) { this.closeBuffer(); }
|
|
403
488
|
return success;
|
|
404
489
|
}
|
|
405
490
|
return true;
|
|
406
|
-
} catch (err) { logger.error(`Error closing comms connection: ${err.message}`); return Promise.resolve(false); }
|
|
491
|
+
} catch (err) { logger.error(`Error closing comms connection ${this.portId}: ${err.message}`); return Promise.resolve(false); }
|
|
492
|
+
finally { this._closing = false; this.emitPortStats(); }
|
|
407
493
|
}
|
|
408
|
-
public
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
logger.info(`Successfully closed (socat) port ${this._cfg.netHost}:${this._cfg.netPort}/${this._cfg.rs485Port}`);
|
|
425
|
-
resolve(true);
|
|
426
|
-
}
|
|
427
|
-
});
|
|
428
|
-
this._port.once('close', (p) => {
|
|
429
|
-
this.isOpen = false;
|
|
430
|
-
this._port = undefined;
|
|
431
|
-
logger.info(`Net connect (socat) successfully closed: ${this._cfg.netHost}:${this._cfg.netPort}`);
|
|
432
|
-
resolve(true);
|
|
433
|
-
});
|
|
434
|
-
this._port.destroy();
|
|
435
|
-
}
|
|
436
|
-
else if (typeof conn._port.close === 'function') {
|
|
437
|
-
conn._port.close((err) => {
|
|
438
|
-
if (err) {
|
|
439
|
-
logger.error(`Error closing ${this._cfg.rs485Port}: ${err}`);
|
|
440
|
-
resolve(false);
|
|
441
|
-
}
|
|
442
|
-
else {
|
|
443
|
-
conn._port = undefined;
|
|
444
|
-
logger.info(`Successfully closed seral port ${this._cfg.rs485Port}`);
|
|
445
|
-
resolve(true);
|
|
446
|
-
this.isOpen = false;
|
|
447
|
-
}
|
|
448
|
-
});
|
|
449
|
-
}
|
|
450
|
-
else {
|
|
451
|
-
resolve(true);
|
|
452
|
-
conn._port = undefined;
|
|
453
|
-
}
|
|
454
|
-
});
|
|
455
|
-
if (success) {
|
|
456
|
-
if (typeof conn.buffer !== 'undefined') conn.buffer.close();
|
|
457
|
-
}
|
|
458
|
-
return success;
|
|
494
|
+
public pause() { this.isPaused = true; this.clearBuffer(); this.drain(function (err) { }); }
|
|
495
|
+
// RKS: Resume is executed in a closure. This is because we want the current async process to complete
|
|
496
|
+
// before we resume. This way the messages are cleared right before we restart.
|
|
497
|
+
public resume() { if (this.isPaused) setTimeout(() => { this.clearBuffer(); this.isPaused = false; }, 0); }
|
|
498
|
+
protected resetConnTimer(...args) {
|
|
499
|
+
//console.log(`resetting connection timer`);
|
|
500
|
+
if (this.connTimer !== null) clearTimeout(this.connTimer);
|
|
501
|
+
if (!this._cfg.mockPort && this._cfg.inactivityRetry > 0 && !this._closing) this.connTimer = setTimeout(async () => {
|
|
502
|
+
try {
|
|
503
|
+
if (this._cfg.netConnect)
|
|
504
|
+
logger.warn(`Inactivity timeout for ${this.portId} serial port ${this._cfg.netHost}:${this._cfg.netPort}/${this._cfg.rs485Port} after ${this._cfg.inactivityRetry} seconds`);
|
|
505
|
+
else
|
|
506
|
+
logger.warn(`Inactivity timeout for ${this.portId} serial port ${this._cfg.rs485Port} after ${this._cfg.inactivityRetry} seconds`);
|
|
507
|
+
//await this.closeAsync();
|
|
508
|
+
this.reconnects++;
|
|
509
|
+
await this.openAsync();
|
|
459
510
|
}
|
|
460
|
-
|
|
461
|
-
}
|
|
462
|
-
finally { this._closing = false; }
|
|
511
|
+
catch (err) { logger.error(`Error resetting RS485 port on inactivity: ${err.message}`); };
|
|
512
|
+
}, this._cfg.inactivityRetry * 1000);
|
|
463
513
|
}
|
|
514
|
+
// Data management functions
|
|
464
515
|
public drain(cb: Function) {
|
|
465
|
-
if (typeof
|
|
466
|
-
|
|
516
|
+
if (typeof this._port === 'undefined') {
|
|
517
|
+
logger.debug(`Serial Port ${this.portId}: Cannot perform drain function on port that is not open.`);
|
|
518
|
+
cb();
|
|
519
|
+
}
|
|
520
|
+
if (typeof (this._port.drain) === 'function')
|
|
521
|
+
this._port.drain(cb);
|
|
467
522
|
else // Call the method immediately as the port doesn't wait to send.
|
|
468
523
|
cb();
|
|
469
524
|
}
|
|
470
525
|
public write(bytes: Buffer, cb: Function) {
|
|
471
|
-
if (
|
|
526
|
+
if (this._cfg.netConnect) {
|
|
472
527
|
// SOCAT drops the connection and destroys the stream. Could be weeks or as little as a day.
|
|
473
|
-
if (typeof
|
|
474
|
-
|
|
475
|
-
|
|
528
|
+
if (typeof this._port === 'undefined' || this._port.destroyed !== false) {
|
|
529
|
+
this.openAsync().then(() => {
|
|
530
|
+
this._port.write(bytes, 'binary', cb);
|
|
476
531
|
});
|
|
477
532
|
}
|
|
478
533
|
else
|
|
479
|
-
|
|
534
|
+
this._port.write(bytes, 'binary', cb);
|
|
480
535
|
}
|
|
481
536
|
else
|
|
482
|
-
|
|
537
|
+
this._port.write(bytes, cb);
|
|
483
538
|
}
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
netPort: 9801,
|
|
510
|
-
inactivityRetry: 10
|
|
511
|
-
}));
|
|
512
|
-
});
|
|
513
|
-
}
|
|
514
|
-
public reloadConfig(cfg) {
|
|
515
|
-
let c = extend({
|
|
516
|
-
rs485Port: "/dev/ttyUSB0",
|
|
517
|
-
portSettings: { baudRate: 9600, dataBits: 8, parity: 'none', stopBits: 1, flowControl: false, autoOpen: false, lock: false },
|
|
518
|
-
mockPort: false,
|
|
519
|
-
netConnect: false,
|
|
520
|
-
netHost: "raspberrypi",
|
|
521
|
-
netPort: 9801,
|
|
522
|
-
inactivityRetry: 10
|
|
523
|
-
}, cfg);
|
|
524
|
-
if (JSON.stringify(c) !== JSON.stringify(this._cfg)) {
|
|
525
|
-
this.closeAsync();
|
|
526
|
-
this._cfg = c;
|
|
527
|
-
if (this._cfg.enabled) this.openAsync();
|
|
539
|
+
private pushIn(pkt) { this._inBuffer.push.apply(this._inBuffer, pkt.toJSON().data); if(sys.isReady) setImmediate(() => { this.processPackets(); }); }
|
|
540
|
+
private pushOut(msg) { this._outBuffer.push(msg); setImmediate(() => { this.processPackets(); }); }
|
|
541
|
+
private clearBuffer() { this._inBuffer.length = 0; this.clearOutboundBuffer(); }
|
|
542
|
+
private closeBuffer() { clearTimeout(this.procTimer); this.clearBuffer(); this._msg = undefined; }
|
|
543
|
+
private clearOutboundBuffer() {
|
|
544
|
+
let processing = this._processing;
|
|
545
|
+
clearTimeout(this.procTimer);
|
|
546
|
+
this.procTimer = null;
|
|
547
|
+
this._processing = true;
|
|
548
|
+
this.isRTS = false;
|
|
549
|
+
let msg: Outbound = typeof this._waitingPacket !== 'undefined' ? this._waitingPacket : this._outBuffer.shift();
|
|
550
|
+
this._waitingPacket = null;
|
|
551
|
+
while (typeof msg !== 'undefined' && msg) {
|
|
552
|
+
// Fail the message.
|
|
553
|
+
msg.failed = true;
|
|
554
|
+
if (typeof msg.onAbort === 'function') msg.onAbort();
|
|
555
|
+
else logger.warn(`Message cleared from outbound buffer: ${msg.toShortPacket()} `);
|
|
556
|
+
let err = new OutboundMessageError(msg, `Message cleared from outbound buffer: ${msg.toShortPacket()} `);
|
|
557
|
+
if (typeof msg.onComplete === 'function') msg.onComplete(err, undefined);
|
|
558
|
+
if (msg.requiresResponse) {
|
|
559
|
+
// Wait for this current process to complete then bombard all the processes with the callback.
|
|
560
|
+
if (msg.response instanceof Response && typeof (msg.response.callback) === 'function') setImmediate(msg.response.callback, msg);
|
|
561
|
+
}
|
|
562
|
+
this.counter.sndAborted++;
|
|
563
|
+
msg = this._outBuffer.shift();
|
|
528
564
|
}
|
|
565
|
+
this._processing = processing;
|
|
566
|
+
this.isRTS = true;
|
|
529
567
|
}
|
|
530
|
-
public queueSendMessage(msg: Outbound) { conn.emitter.emit('messagewrite', msg); }
|
|
531
|
-
public pause() { conn.isPaused = true; conn.buffer.clear(); conn.drain(function (err) { }); }
|
|
532
|
-
// RKS: Resume is executed in a closure. This is because we want the current async process to complete
|
|
533
|
-
// before we resume. This way the messages are cleared right before we restart.
|
|
534
|
-
public resume() { if (this.isPaused) setTimeout(function () { conn.buffer.clear(); conn.isPaused = false; }, 0); }
|
|
535
|
-
// RKS: This appears to not be used.
|
|
536
|
-
//public queueReceiveMessage(pkt: Inbound) {
|
|
537
|
-
// logger.info(`Receiving ${ pkt.action } `);
|
|
538
|
-
// conn.buffer.pushIn(pkt);
|
|
539
|
-
//}
|
|
540
|
-
}
|
|
541
|
-
export class SendRecieveBuffer {
|
|
542
|
-
constructor() {
|
|
543
|
-
this._inBuffer = [];
|
|
544
|
-
this._outBuffer = [];
|
|
545
|
-
this.procTimer = null;//setInterval(this.processPackets, 175);
|
|
546
|
-
}
|
|
547
|
-
public counter: Counter = new Counter();
|
|
548
|
-
private procTimer: NodeJS.Timeout;
|
|
549
|
-
private _processing: boolean = false;
|
|
550
|
-
private _inBytes: number[] = [];
|
|
551
|
-
private _inBuffer: number[] = [];
|
|
552
|
-
private _outBuffer: Outbound[] = [];
|
|
553
|
-
private _waitingPacket: Outbound;
|
|
554
|
-
private _msg: Inbound;
|
|
555
|
-
public pushIn(pkt) {
|
|
556
|
-
let self = this;
|
|
557
|
-
conn.buffer._inBuffer.push.apply(conn.buffer._inBuffer, pkt.toJSON().data); setTimeout(() => { self.processPackets(); }, 0);
|
|
558
|
-
}
|
|
559
|
-
public pushOut(msg) { conn.buffer._outBuffer.push(msg); setTimeout(() => { this.processPackets(); }, 0); }
|
|
560
|
-
public clear() { conn.buffer._inBuffer.length = 0; conn.buffer._outBuffer.length = 0; }
|
|
561
|
-
public close() { clearTimeout(conn.buffer.procTimer); conn.buffer.clear(); this._msg = undefined; }
|
|
562
|
-
public clearOutbound() { conn.buffer._outBuffer.length = 0; conn.buffer._waitingPacket = undefined; }
|
|
563
|
-
/********************************************************************
|
|
564
|
-
* RKS: 06-06-20
|
|
565
|
-
* This used to process every 175ms. While the processing was light
|
|
566
|
-
* when there was nothing to process this should have always been
|
|
567
|
-
* event based so the processing timer has been reworked.
|
|
568
|
-
*
|
|
569
|
-
* Now this method gets called only during the following conditions.
|
|
570
|
-
* 1. A packetread event comes from the serial port and has data
|
|
571
|
-
* 2. A message is placed onto the outbound queue
|
|
572
|
-
* 3. The outbound queue has messages that are waiting to send. In
|
|
573
|
-
* this instance this method is called every 200ms until the queue
|
|
574
|
-
* is empty. If one of the above conditions are met then this method
|
|
575
|
-
* will be triggered earlier.
|
|
576
|
-
*
|
|
577
|
-
****************************************************************** */
|
|
578
568
|
private processPackets() {
|
|
579
|
-
if (
|
|
580
|
-
if (
|
|
581
|
-
clearTimeout(
|
|
582
|
-
|
|
569
|
+
if (this._processing) return;
|
|
570
|
+
if (this.procTimer) {
|
|
571
|
+
clearTimeout(this.procTimer);
|
|
572
|
+
this.procTimer = null;
|
|
583
573
|
}
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
574
|
+
this._processing = true;
|
|
575
|
+
this.processInboundPackets();
|
|
576
|
+
this.processOutboundPackets();
|
|
577
|
+
this._processing = false;
|
|
588
578
|
}
|
|
589
579
|
private processWaitPacket(): boolean {
|
|
590
|
-
if (typeof
|
|
591
|
-
let timeout =
|
|
580
|
+
if (typeof this._waitingPacket !== 'undefined' && this._waitingPacket) {
|
|
581
|
+
let timeout = this._waitingPacket.timeout || 1000;
|
|
592
582
|
let dt = new Date();
|
|
593
|
-
if (
|
|
594
|
-
logger.silly(`Retrying outbound message after ${(dt.getTime() -
|
|
595
|
-
|
|
596
|
-
|
|
583
|
+
if (this._waitingPacket.timestamp.getTime() + timeout < dt.getTime()) {
|
|
584
|
+
logger.silly(`Retrying outbound message after ${(dt.getTime() - this._waitingPacket.timestamp.getTime()) / 1000} secs with ${this._waitingPacket.remainingTries} attempt(s) left. - ${this._waitingPacket.toShortPacket()} `);
|
|
585
|
+
this.counter.sndRetries++;
|
|
586
|
+
this.writeMessage(this._waitingPacket);
|
|
597
587
|
}
|
|
598
588
|
return true;
|
|
599
589
|
}
|
|
600
590
|
return false;
|
|
601
591
|
}
|
|
602
|
-
protected
|
|
592
|
+
protected processOutboundPackets() {
|
|
603
593
|
let msg: Outbound;
|
|
604
|
-
if (!
|
|
605
|
-
if (
|
|
606
|
-
if (
|
|
607
|
-
msg =
|
|
594
|
+
if (!this.processWaitPacket() && this._outBuffer.length > 0) {
|
|
595
|
+
if (this.isOpen) {
|
|
596
|
+
if (this.isRTS) {
|
|
597
|
+
msg = this._outBuffer.shift();
|
|
608
598
|
if (typeof msg === 'undefined' || !msg) return;
|
|
609
599
|
// If the serial port is busy we don't want to process any outbound. However, this used to
|
|
610
600
|
// not process the outbound even when the incoming bytes didn't mean anything. Now we only delay
|
|
611
601
|
// the outbound when we actually have a message signatures to process.
|
|
612
|
-
|
|
602
|
+
this.writeMessage(msg);
|
|
613
603
|
}
|
|
614
604
|
}
|
|
615
605
|
else {
|
|
616
606
|
// port is closed, reject message
|
|
617
|
-
msg =
|
|
607
|
+
msg = this._outBuffer.shift();
|
|
618
608
|
msg.failed = true;
|
|
619
|
-
logger.warn(`Comms port is not open.Message aborted: ${msg.toShortPacket()} `);
|
|
609
|
+
logger.warn(`Comms port ${msg.portId} is not open. Message aborted: ${msg.toShortPacket()} `);
|
|
620
610
|
// This is a hard fail. We don't have any more tries left and the message didn't
|
|
621
611
|
// make it onto the wire.
|
|
622
|
-
|
|
612
|
+
if (typeof msg.onAbort === 'function') msg.onAbort();
|
|
613
|
+
else logger.warn(`Message aborted after ${msg.tries} attempt(s): ${msg.toShortPacket()} `);
|
|
614
|
+
let error = new OutboundMessageError(msg, `Comms port ${msg.portId} is not open. Message aborted: ${msg.toShortPacket()} `);
|
|
623
615
|
if (typeof msg.onComplete === 'function') msg.onComplete(error, undefined);
|
|
624
|
-
|
|
616
|
+
this._waitingPacket = null;
|
|
617
|
+
this.counter.sndAborted++;
|
|
618
|
+
this.counter.updatefailureRate();
|
|
619
|
+
this.emitPortStats();
|
|
625
620
|
}
|
|
626
621
|
}
|
|
627
622
|
// RG: added the last `|| typeof msg !== 'undef'` because virtual chem controller only sends a single packet
|
|
628
|
-
// but this condition would be eval'd before the callback of
|
|
623
|
+
// but this condition would be eval'd before the callback of port.write was calls and the outbound packet
|
|
629
624
|
// would be sitting idle for eternity.
|
|
630
|
-
if (
|
|
625
|
+
if (this._outBuffer.length > 0 || typeof this._waitingPacket !== 'undefined' || this._waitingPacket || typeof msg !== 'undefined') {
|
|
631
626
|
// Come back later as we still have items to send.
|
|
632
627
|
let self = this;
|
|
633
|
-
|
|
628
|
+
this.procTimer = setTimeout(() => self.processPackets(), 100);
|
|
634
629
|
}
|
|
635
630
|
}
|
|
636
|
-
/*
|
|
637
|
-
* Writing messages on the queue is tricky to harden. The async nature of the serial port in node doesn't appropriately drain the port after each message
|
|
638
|
-
* so even though the callback is called for the .write method it doesn't guarantee that it has been written. Not such an issue when we are dealing with full-duplex
|
|
639
|
-
* but in this half-duplex environment we don't have an RTS. This is further complicated by the fact that no event is raised when the port finally gets around to
|
|
640
|
-
* dumping it's buffer on the wire. The only time we are notified is when there is a failure. Even then it does not point to a particular message since the
|
|
641
|
-
* port is unaware of our protocol.
|
|
642
|
-
*
|
|
643
|
-
* To that end we need to create a semaphore so that we don't place two messages back to back while we are waiting on the callback to return.
|
|
644
|
-
*/
|
|
645
|
-
|
|
646
631
|
private writeMessage(msg: Outbound) {
|
|
647
632
|
// Make sure we are not re-entrant while the the port.write is going on.
|
|
648
633
|
// This ends in goofiness as it can send more than one message at a time while it
|
|
649
634
|
// waits for the command buffer to be flushed. NOTE: There is no success message and the callback to
|
|
650
635
|
// write only verifies that the buffer got ahold of it.
|
|
651
|
-
if (!
|
|
652
|
-
|
|
636
|
+
if (!this.isRTS || this.mockPort) return;
|
|
637
|
+
this.isRTS = false;
|
|
653
638
|
var bytes = msg.toPacket();
|
|
654
|
-
if (
|
|
639
|
+
if (this.isOpen) {
|
|
655
640
|
if (msg.remainingTries <= 0) {
|
|
656
641
|
// It will almost never fall into here. The rare case where
|
|
657
642
|
// we have an RTS semaphore and a waiting response might make it go here.
|
|
658
643
|
msg.failed = true;
|
|
659
|
-
|
|
644
|
+
this._waitingPacket = null;
|
|
660
645
|
if (typeof msg.onAbort === 'function') msg.onAbort();
|
|
661
646
|
else logger.warn(`Message aborted after ${msg.tries} attempt(s): ${msg.toShortPacket()} `);
|
|
662
647
|
let err = new OutboundMessageError(msg, `Message aborted after ${msg.tries} attempt(s): ${msg.toShortPacket()} `);
|
|
@@ -666,20 +651,20 @@ export class SendRecieveBuffer {
|
|
|
666
651
|
setTimeout(msg.response.callback, 100, msg);
|
|
667
652
|
}
|
|
668
653
|
}
|
|
669
|
-
|
|
670
|
-
|
|
654
|
+
this.counter.sndAborted++;
|
|
655
|
+
this.isRTS = true;
|
|
671
656
|
return;
|
|
672
657
|
}
|
|
673
|
-
|
|
658
|
+
this.counter.bytesSent += bytes.length;
|
|
674
659
|
msg.timestamp = new Date();
|
|
675
660
|
logger.packet(msg);
|
|
676
|
-
|
|
661
|
+
this.write(Buffer.from(bytes), (err) => {
|
|
677
662
|
msg.tries++;
|
|
678
|
-
|
|
663
|
+
this.isRTS = true;
|
|
679
664
|
if (err) {
|
|
680
665
|
logger.error('Error writing packet %s', err);
|
|
681
666
|
// We had an error so we need to set the waiting packet if there are retries
|
|
682
|
-
if (msg.remainingTries > 0)
|
|
667
|
+
if (msg.remainingTries > 0) this._waitingPacket = msg;
|
|
683
668
|
else {
|
|
684
669
|
msg.failed = true;
|
|
685
670
|
logger.warn(`Message aborted after ${msg.tries} attempt(s): ${bytes}: ${err} `);
|
|
@@ -687,43 +672,44 @@ export class SendRecieveBuffer {
|
|
|
687
672
|
// make it onto the wire.
|
|
688
673
|
let error = new OutboundMessageError(msg, `Message aborted after ${msg.tries} attempt(s): ${err} `);
|
|
689
674
|
if (typeof msg.onComplete === 'function') msg.onComplete(error, undefined);
|
|
690
|
-
|
|
691
|
-
|
|
675
|
+
this._waitingPacket = null;
|
|
676
|
+
this.counter.sndAborted++;
|
|
692
677
|
}
|
|
693
678
|
}
|
|
694
679
|
else {
|
|
695
680
|
logger.verbose(`Wrote packet[${bytes}].Retries remaining: ${msg.remainingTries} `);
|
|
696
681
|
// We have all the success we are going to get so if the call succeeded then
|
|
697
682
|
// don't set the waiting packet when we aren't actually waiting for a response.
|
|
698
|
-
conn.buffer.counter.sndSuccess++;
|
|
699
683
|
if (!msg.requiresResponse) {
|
|
700
684
|
// As far as we know the message made it to OCP.
|
|
701
|
-
|
|
685
|
+
this._waitingPacket = null;
|
|
686
|
+
this.counter.sndSuccess++;
|
|
702
687
|
if (typeof msg.onComplete === 'function') msg.onComplete(err, undefined);
|
|
703
688
|
|
|
704
689
|
}
|
|
705
690
|
else if (msg.remainingTries >= 0) {
|
|
706
|
-
|
|
691
|
+
this._waitingPacket = msg;
|
|
707
692
|
}
|
|
708
693
|
}
|
|
709
|
-
|
|
710
|
-
|
|
694
|
+
this.counter.updatefailureRate();
|
|
695
|
+
this.emitPortStats();
|
|
711
696
|
});
|
|
712
697
|
}
|
|
713
698
|
}
|
|
714
699
|
private clearResponses(msgIn: Inbound) {
|
|
715
|
-
if (
|
|
700
|
+
if (this._outBuffer.length === 0 && typeof (this._waitingPacket) !== 'object' && this._waitingPacket) return;
|
|
716
701
|
var callback;
|
|
717
|
-
let msgOut =
|
|
718
|
-
if (typeof (
|
|
702
|
+
let msgOut = this._waitingPacket;
|
|
703
|
+
if (typeof (this._waitingPacket) !== 'undefined' && this._waitingPacket) {
|
|
719
704
|
var resp = msgOut.response;
|
|
720
705
|
if (msgOut.requiresResponse) {
|
|
721
706
|
if (resp instanceof Response && resp.isResponse(msgIn, msgOut)) {
|
|
722
|
-
|
|
707
|
+
this._waitingPacket = null;
|
|
723
708
|
if (typeof msgOut.onComplete === 'function') msgOut.onComplete(undefined, msgIn);
|
|
724
709
|
callback = resp.callback;
|
|
725
710
|
resp.message = msgIn;
|
|
726
|
-
|
|
711
|
+
this.counter.sndSuccess++;
|
|
712
|
+
if (resp.ack) this.pushOut(resp.ack);
|
|
727
713
|
}
|
|
728
714
|
}
|
|
729
715
|
}
|
|
@@ -731,9 +717,9 @@ export class SendRecieveBuffer {
|
|
|
731
717
|
// RG - when would there be additional packets besides the first in the outbuffer that needs to be removed from a single incoming packet?
|
|
732
718
|
// RKS: This occurs when two of the same message signature is thrown onto the queue. Most often when there is a queue full of configuration requests. The
|
|
733
719
|
// triggers that cause the outbound message may come at the same time that another controller makes a call.
|
|
734
|
-
var i =
|
|
720
|
+
var i = this._outBuffer.length - 1;
|
|
735
721
|
while (i >= 0) {
|
|
736
|
-
let out =
|
|
722
|
+
let out = this._outBuffer[i--];
|
|
737
723
|
if (typeof out === 'undefined') continue;
|
|
738
724
|
let resp = out.response;
|
|
739
725
|
// RG - added check for msgOut because the *Touch chlor packet 153 adds an status packet 217
|
|
@@ -742,7 +728,7 @@ export class SendRecieveBuffer {
|
|
|
742
728
|
if (resp instanceof Response && resp.isResponse(msgIn, out) && (typeof out.scope === 'undefined' || out.scope === msgOut.scope)) {
|
|
743
729
|
resp.message = msgIn;
|
|
744
730
|
if (typeof (resp.callback) === 'function' && resp.callback) callback = resp.callback;
|
|
745
|
-
|
|
731
|
+
this._outBuffer.splice(i, 1);
|
|
746
732
|
}
|
|
747
733
|
}
|
|
748
734
|
}
|
|
@@ -752,95 +738,72 @@ export class SendRecieveBuffer {
|
|
|
752
738
|
// that we also need. This occurs when more than one panel on the bus requests a reconfig at the same time.
|
|
753
739
|
if (typeof (callback) === 'function') { setTimeout(callback, 100, msgOut); }
|
|
754
740
|
}
|
|
741
|
+
public get stats() {
|
|
742
|
+
let status = this.isOpen ? 'open' : this._cfg.enabled ? 'closed' : 'disabled';
|
|
743
|
+
return extend(true, { portId: this.portId, status: status, reconnects: this.reconnects }, this.counter)
|
|
744
|
+
}
|
|
745
|
+
private emitPortStats() {
|
|
746
|
+
webApp.emitToChannel('rs485PortStats', 'rs485Stats', this.stats);
|
|
747
|
+
}
|
|
755
748
|
private processCompletedMessage(msg: Inbound, ndx): number {
|
|
756
749
|
msg.timestamp = new Date();
|
|
750
|
+
msg.portId = this.portId;
|
|
757
751
|
msg.id = Message.nextMessageId;
|
|
758
|
-
|
|
752
|
+
this.counter.recCollisions += msg.collisions;
|
|
753
|
+
this.counter.recRewinds += msg.rewinds;
|
|
759
754
|
logger.packet(msg);
|
|
760
|
-
|
|
755
|
+
this.emitPortStats();
|
|
761
756
|
if (msg.isValid) {
|
|
762
|
-
|
|
763
|
-
|
|
757
|
+
this.counter.recSuccess++;
|
|
758
|
+
this.counter.updatefailureRate();
|
|
764
759
|
msg.process();
|
|
765
|
-
|
|
760
|
+
this.clearResponses(msg);
|
|
766
761
|
}
|
|
767
762
|
else {
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
console.log('RS485 Stats:' +
|
|
763
|
+
this.counter.recFailed++;
|
|
764
|
+
this.counter.updatefailureRate();
|
|
765
|
+
console.log('RS485 Stats:' + this.counter.toLog());
|
|
771
766
|
ndx = this.rewindFailedMessage(msg, ndx);
|
|
772
767
|
}
|
|
773
768
|
return ndx;
|
|
774
769
|
}
|
|
775
770
|
private rewindFailedMessage(msg: Inbound, ndx: number): number {
|
|
771
|
+
this.counter.recRewinds++;
|
|
776
772
|
// Lets see if we can do a rewind to capture another message from the
|
|
777
773
|
// crap on the bus. This will get us to the innermost message. While the outer message may have failed the inner message should
|
|
778
774
|
// be able to buck up and make it happen.
|
|
779
|
-
|
|
775
|
+
this._inBytes = this._inBytes.slice(ndx); // Start by removing all of the bytes related to the original message.
|
|
780
776
|
// Add all of the elements of the message back in reverse.
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
777
|
+
this._inBytes.unshift(...msg.term);
|
|
778
|
+
this._inBytes.unshift(...msg.payload);
|
|
779
|
+
this._inBytes.unshift(...msg.header.slice(1)); // Trim off the first byte from the header. This means it won't find 16,2 or start with a 165. The
|
|
784
780
|
// algorithm looks for the header bytes to determine the protocol so the rewind shouldn't include the 16 in 16,2 otherwise it will just keep rewinding.
|
|
785
|
-
|
|
786
|
-
ndx = msg.readPacket(
|
|
781
|
+
this._msg = msg = new Inbound();
|
|
782
|
+
ndx = msg.readPacket(this._inBytes);
|
|
787
783
|
if (msg.isComplete) { ndx = this.processCompletedMessage(msg, ndx); }
|
|
788
784
|
return ndx;
|
|
789
785
|
}
|
|
790
|
-
protected
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
if (
|
|
786
|
+
protected processInboundPackets() {
|
|
787
|
+
this.counter.bytesReceived += this._inBuffer.length;
|
|
788
|
+
this._inBytes.push.apply(this._inBytes, this._inBuffer.splice(0, this._inBuffer.length));
|
|
789
|
+
if (this._inBytes.length >= 1) { // Wait until we have something to process.
|
|
794
790
|
let ndx: number = 0;
|
|
795
|
-
let msg: Inbound =
|
|
791
|
+
let msg: Inbound = this._msg;
|
|
796
792
|
do {
|
|
797
793
|
if (typeof (msg) === 'undefined' || msg === null || msg.isComplete || !msg.isValid) {
|
|
798
|
-
|
|
799
|
-
ndx = msg.readPacket(
|
|
794
|
+
this._msg = msg = new Inbound();
|
|
795
|
+
ndx = msg.readPacket(this._inBytes);
|
|
800
796
|
}
|
|
801
|
-
else ndx = msg.mergeBytes(
|
|
797
|
+
else ndx = msg.mergeBytes(this._inBytes);
|
|
802
798
|
if (msg.isComplete) ndx = this.processCompletedMessage(msg, ndx);
|
|
803
799
|
if (ndx > 0) {
|
|
804
|
-
|
|
800
|
+
this._inBytes = this._inBytes.slice(ndx);
|
|
805
801
|
ndx = 0;
|
|
806
802
|
}
|
|
807
803
|
else break;
|
|
808
804
|
|
|
809
|
-
} while (ndx <
|
|
805
|
+
} while (ndx < this._inBytes.length);
|
|
810
806
|
}
|
|
811
807
|
}
|
|
812
808
|
}
|
|
813
|
-
export
|
|
814
|
-
constructor() {
|
|
815
|
-
this.bytesReceived = 0;
|
|
816
|
-
this.recSuccess = 0;
|
|
817
|
-
this.recFailed = 0;
|
|
818
|
-
this.recCollisions = 0;
|
|
819
|
-
this.bytesSent = 0;
|
|
820
|
-
this.sndAborted = 0;
|
|
821
|
-
this.sndRetries = 0;
|
|
822
|
-
this.sndSuccess = 0;
|
|
823
|
-
this.recFailureRate = 0;
|
|
824
|
-
this.sndFailureRate = 0;
|
|
825
|
-
}
|
|
826
|
-
public bytesReceived: number;
|
|
827
|
-
public bytesSent: number;
|
|
828
|
-
public recSuccess: number;
|
|
829
|
-
public recFailed: number;
|
|
830
|
-
public recCollisions: number;
|
|
831
|
-
public recFailureRate: number;
|
|
832
|
-
public sndSuccess: number;
|
|
833
|
-
public sndAborted: number;
|
|
834
|
-
public sndRetries: number;
|
|
835
|
-
public sndFailureRate: number;
|
|
836
|
-
public updatefailureRate(): void {
|
|
837
|
-
conn.buffer.counter.recFailureRate = (this.recFailed + this.recSuccess) !== 0 ? (this.recFailed / (this.recFailed + this.recSuccess) * 100) : 0;
|
|
838
|
-
conn.buffer.counter.sndFailureRate = (this.sndAborted + this.sndSuccess) !== 0 ? (this.sndAborted / (this.sndAborted + this.sndSuccess) * 100) : 0;
|
|
839
|
-
//conn.buffer.counter.recFailureRate = `${(conn.buffer.counter.recFailed / (conn.buffer.counter.recFailed + conn.buffer.counter.recSuccess) * 100).toFixed(2)}% `;
|
|
840
|
-
//conn.buffer.counter.sndFailureRate = `${(conn.buffer.counter.sndAborted / (conn.buffer.counter.sndAborted + conn.buffer.counter.sndSuccess) * 100).toFixed(2)}% `;
|
|
841
|
-
}
|
|
842
|
-
public toLog(): string {
|
|
843
|
-
return `{ "bytesReceived": ${this.bytesReceived} "success": ${this.recSuccess}, "failed": ${this.recFailed}, "bytesSent": ${this.bytesSent}, "collisions": ${this.recCollisions}, "failureRate": ${this.recFailureRate.toFixed(2)}% }`;
|
|
844
|
-
}
|
|
845
|
-
}
|
|
846
|
-
export var conn: Connection = new Connection();
|
|
809
|
+
export var conn: Connection = new Connection();
|