@harperfast/harper 5.0.6 → 5.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/status.js +2 -2
- package/bin/stop.js +5 -6
- package/components/OptionsWatcher.ts +9 -1
- package/dataLayer/harperBridge/TableSizeObject.ts +35 -0
- package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.ts +24 -0
- package/dist/bin/status.js +2 -2
- package/dist/bin/status.js.map +1 -1
- package/dist/bin/stop.js +5 -5
- package/dist/bin/stop.js.map +1 -1
- package/dist/components/OptionsWatcher.js +8 -1
- package/dist/components/OptionsWatcher.js.map +1 -1
- package/dist/dataLayer/harperBridge/TableSizeObject.d.ts +20 -0
- package/dist/dataLayer/harperBridge/TableSizeObject.js +32 -0
- package/dist/dataLayer/harperBridge/TableSizeObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.d.ts +6 -6
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js +18 -19
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js.map +1 -1
- package/dist/resources/DatabaseTransaction.js +6 -1
- package/dist/resources/DatabaseTransaction.js.map +1 -1
- package/dist/resources/RecordEncoder.js +10 -1
- package/dist/resources/RecordEncoder.js.map +1 -1
- package/dist/resources/Table.js +13 -2
- package/dist/resources/Table.js.map +1 -1
- package/dist/resources/databases.js +2 -1
- package/dist/resources/databases.js.map +1 -1
- package/dist/resources/graphql.d.ts +3 -8
- package/dist/resources/graphql.js +180 -173
- package/dist/resources/graphql.js.map +1 -1
- package/dist/security/jsLoader.js +16 -2
- package/dist/security/jsLoader.js.map +1 -1
- package/dist/security/keys.js +1 -1
- package/dist/security/keys.js.map +1 -1
- package/dist/server/DurableSubscriptionsSession.js +2 -0
- package/dist/server/DurableSubscriptionsSession.js.map +1 -1
- package/dist/server/serverHelpers/serverUtilities.js +2 -2
- package/dist/server/serverHelpers/serverUtilities.js.map +1 -1
- package/dist/utility/environment/systemInformation.d.ts +178 -49
- package/dist/utility/environment/systemInformation.js +359 -219
- package/dist/utility/environment/systemInformation.js.map +1 -1
- package/dist/utility/operation_authorization.js +2 -2
- package/dist/utility/operation_authorization.js.map +1 -1
- package/package.json +2 -2
- package/resources/DatabaseTransaction.ts +8 -3
- package/resources/RecordEncoder.ts +9 -1
- package/resources/Table.ts +13 -2
- package/resources/databases.ts +2 -1
- package/resources/graphql.ts +13 -5
- package/security/jsLoader.ts +14 -2
- package/security/keys.js +1 -1
- package/server/DurableSubscriptionsSession.ts +1 -0
- package/server/serverHelpers/serverUtilities.ts +2 -5
- package/studio/web/assets/{index-qbLPhOzw.js → index-BftP-yQ8.js} +2 -2
- package/studio/web/assets/{index-qbLPhOzw.js.map → index-BftP-yQ8.js.map} +1 -1
- package/studio/web/index.html +1 -1
- package/utility/environment/systemInformation.ts +698 -0
- package/utility/operation_authorization.js +2 -5
- package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js +0 -25
- package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js +0 -34
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.d.ts +0 -21
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js +0 -24
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js.map +0 -1
- package/utility/environment/systemInformation.js +0 -355
package/studio/web/index.html
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
<link rel="icon" type="dynamic-favicon" href="/favicon_purple.png" />
|
|
7
7
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
8
8
|
<title>Harper Fabric</title>
|
|
9
|
-
<script type="module" crossorigin src="/assets/index-
|
|
9
|
+
<script type="module" crossorigin src="/assets/index-BftP-yQ8.js"></script>
|
|
10
10
|
<link rel="modulepreload" crossorigin href="/assets/rolldown-runtime-FhOqtrmT.js">
|
|
11
11
|
<link rel="modulepreload" crossorigin href="/assets/vendor-datadog-DVU9bdcc.js">
|
|
12
12
|
<link rel="modulepreload" crossorigin href="/assets/vendor-html-VZf1YLCF.js">
|
|
@@ -0,0 +1,698 @@
|
|
|
1
|
+
import { readFile } from 'node:fs/promises';
|
|
2
|
+
import path from 'node:path';
|
|
3
|
+
import si from 'systeminformation';
|
|
4
|
+
import logger from '../logging/harper_logger.js';
|
|
5
|
+
import * as hdbTerms from '../hdbTerms.ts';
|
|
6
|
+
import { lmdbGetTableSize } from '../../dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.ts';
|
|
7
|
+
import { getThreadInfo } from '../../server/threads/manageThreads.js';
|
|
8
|
+
import env from './environmentManager.js';
|
|
9
|
+
import { getDatabases, type Table } from '../../resources/databases.ts';
|
|
10
|
+
import { TableSizeObject } from '../../dataLayer/harperBridge/TableSizeObject.ts';
|
|
11
|
+
import { RocksDatabase, StatsHistogramData } from '@harperfast/rocksdb-js';
|
|
12
|
+
|
|
13
|
+
env.initSync();
|
|
14
|
+
|
|
15
|
+
//this will hold the system_information which is static to improve performance
|
|
16
|
+
let systemInformationCache = undefined;
|
|
17
|
+
|
|
18
|
+
export class SystemInformationRequest {
|
|
19
|
+
operator: string;
|
|
20
|
+
attributes: string[];
|
|
21
|
+
|
|
22
|
+
constructor(attributes) {
|
|
23
|
+
this.operator = hdbTerms.OPERATIONS_ENUM.SYSTEM_INFORMATION;
|
|
24
|
+
this.attributes = attributes;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export class SystemInformationResponse {
|
|
29
|
+
system?: SystemInfo;
|
|
30
|
+
time?: TimeData;
|
|
31
|
+
cpu?: CpuInfo;
|
|
32
|
+
memory?: MemoryInfo;
|
|
33
|
+
disk?: DiskInfo;
|
|
34
|
+
network?: NetworkInfo;
|
|
35
|
+
harperdb_processes?: HarperdbProcesses;
|
|
36
|
+
table_size?: TableSizeObject[];
|
|
37
|
+
metrics?: DatabaseMetrics;
|
|
38
|
+
threads?: Record<string, unknown>;
|
|
39
|
+
|
|
40
|
+
constructor(
|
|
41
|
+
system?: SystemInfo,
|
|
42
|
+
time?: TimeData,
|
|
43
|
+
cpu?: CpuInfo,
|
|
44
|
+
memory?: MemoryInfo,
|
|
45
|
+
disk?: DiskInfo,
|
|
46
|
+
network?: NetworkInfo,
|
|
47
|
+
harperdbProcesses?: HarperdbProcesses,
|
|
48
|
+
tableSize?: TableSizeObject[],
|
|
49
|
+
metrics?: DatabaseMetrics,
|
|
50
|
+
threads?: Record<string, unknown>
|
|
51
|
+
) {
|
|
52
|
+
this.system = system;
|
|
53
|
+
this.time = time;
|
|
54
|
+
this.cpu = cpu;
|
|
55
|
+
this.memory = memory;
|
|
56
|
+
this.disk = disk;
|
|
57
|
+
this.network = network;
|
|
58
|
+
this.harperdb_processes = harperdbProcesses;
|
|
59
|
+
this.table_size = tableSize;
|
|
60
|
+
this.metrics = metrics;
|
|
61
|
+
this.threads = threads;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
type TimeData = si.Systeminformation.TimeData;
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Returns the current local time, uptime, timezone, and timezone name.
|
|
69
|
+
*/
|
|
70
|
+
export function getTimeInfo(): TimeData {
|
|
71
|
+
return si.time();
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
type CpuInfo = Pick<
|
|
75
|
+
si.Systeminformation.CpuData,
|
|
76
|
+
| 'manufacturer'
|
|
77
|
+
| 'brand'
|
|
78
|
+
| 'vendor'
|
|
79
|
+
| 'speed'
|
|
80
|
+
| 'cores'
|
|
81
|
+
| 'physicalCores'
|
|
82
|
+
| 'performanceCores'
|
|
83
|
+
| 'efficiencyCores'
|
|
84
|
+
| 'processors'
|
|
85
|
+
| 'flags'
|
|
86
|
+
| 'virtualization'
|
|
87
|
+
> & {
|
|
88
|
+
cpu_speed: si.Systeminformation.CpuCurrentSpeedData;
|
|
89
|
+
current_load: Pick<
|
|
90
|
+
si.Systeminformation.CurrentLoadData,
|
|
91
|
+
| 'avgLoad'
|
|
92
|
+
| 'currentLoad'
|
|
93
|
+
| 'currentLoadUser'
|
|
94
|
+
| 'currentLoadSystem'
|
|
95
|
+
| 'currentLoadNice'
|
|
96
|
+
| 'currentLoadIdle'
|
|
97
|
+
| 'currentLoadIrq'
|
|
98
|
+
> & {
|
|
99
|
+
cpus: Pick<
|
|
100
|
+
si.Systeminformation.CurrentLoadCpuData,
|
|
101
|
+
'load' | 'loadUser' | 'loadSystem' | 'loadNice' | 'loadIdle' | 'loadIrq'
|
|
102
|
+
>[];
|
|
103
|
+
};
|
|
104
|
+
};
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Detects CPU information such as manufacturer, brand, vendor, speed, cores, physical cores, and
|
|
108
|
+
* processors.
|
|
109
|
+
*/
|
|
110
|
+
export async function getCPUInfo(): Promise<CpuInfo | null> {
|
|
111
|
+
try {
|
|
112
|
+
const [cpu, cpu_speed, loadInfo] = await Promise.all([si.cpu(), si.cpuCurrentSpeed(), si.currentLoad()]);
|
|
113
|
+
|
|
114
|
+
const {
|
|
115
|
+
manufacturer,
|
|
116
|
+
brand,
|
|
117
|
+
vendor,
|
|
118
|
+
speed,
|
|
119
|
+
cores,
|
|
120
|
+
physicalCores,
|
|
121
|
+
performanceCores,
|
|
122
|
+
efficiencyCores,
|
|
123
|
+
processors,
|
|
124
|
+
flags,
|
|
125
|
+
virtualization,
|
|
126
|
+
} = cpu;
|
|
127
|
+
|
|
128
|
+
const {
|
|
129
|
+
avgLoad,
|
|
130
|
+
cpus,
|
|
131
|
+
currentLoad,
|
|
132
|
+
currentLoadUser,
|
|
133
|
+
currentLoadSystem,
|
|
134
|
+
currentLoadNice,
|
|
135
|
+
currentLoadIdle,
|
|
136
|
+
currentLoadIrq,
|
|
137
|
+
} = loadInfo;
|
|
138
|
+
|
|
139
|
+
return {
|
|
140
|
+
manufacturer,
|
|
141
|
+
brand,
|
|
142
|
+
vendor,
|
|
143
|
+
speed,
|
|
144
|
+
cores,
|
|
145
|
+
physicalCores,
|
|
146
|
+
performanceCores,
|
|
147
|
+
efficiencyCores,
|
|
148
|
+
processors,
|
|
149
|
+
flags,
|
|
150
|
+
virtualization,
|
|
151
|
+
cpu_speed,
|
|
152
|
+
current_load: {
|
|
153
|
+
avgLoad,
|
|
154
|
+
cpus: cpus.map(({ load, loadUser, loadSystem, loadNice, loadIdle, loadIrq }) => ({
|
|
155
|
+
load,
|
|
156
|
+
loadUser,
|
|
157
|
+
loadSystem,
|
|
158
|
+
loadNice,
|
|
159
|
+
loadIdle,
|
|
160
|
+
loadIrq,
|
|
161
|
+
})),
|
|
162
|
+
currentLoad,
|
|
163
|
+
currentLoadUser,
|
|
164
|
+
currentLoadSystem,
|
|
165
|
+
currentLoadNice,
|
|
166
|
+
currentLoadIdle,
|
|
167
|
+
currentLoadIrq,
|
|
168
|
+
},
|
|
169
|
+
};
|
|
170
|
+
} catch (e) {
|
|
171
|
+
logger.error(`error in getCPUInfo: ${e}`);
|
|
172
|
+
return null;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
type MemoryInfo = Pick<
|
|
177
|
+
si.Systeminformation.MemData,
|
|
178
|
+
| 'total'
|
|
179
|
+
| 'free'
|
|
180
|
+
| 'used'
|
|
181
|
+
| 'active'
|
|
182
|
+
| 'available'
|
|
183
|
+
| 'reclaimable'
|
|
184
|
+
| 'swaptotal'
|
|
185
|
+
| 'swapused'
|
|
186
|
+
| 'swapfree'
|
|
187
|
+
| 'writeback'
|
|
188
|
+
| 'dirty'
|
|
189
|
+
> &
|
|
190
|
+
NodeJS.MemoryUsage;
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Detect system and Node.js memory usage.
|
|
194
|
+
*/
|
|
195
|
+
export async function getMemoryInfo(): Promise<MemoryInfo | null> {
|
|
196
|
+
try {
|
|
197
|
+
const { total, free, used, active, available, reclaimable, swaptotal, swapused, swapfree, writeback, dirty } =
|
|
198
|
+
await si.mem();
|
|
199
|
+
return {
|
|
200
|
+
total,
|
|
201
|
+
free,
|
|
202
|
+
used,
|
|
203
|
+
active,
|
|
204
|
+
available,
|
|
205
|
+
reclaimable,
|
|
206
|
+
swaptotal,
|
|
207
|
+
swapused,
|
|
208
|
+
swapfree,
|
|
209
|
+
writeback,
|
|
210
|
+
dirty,
|
|
211
|
+
...process.memoryUsage(),
|
|
212
|
+
};
|
|
213
|
+
} catch (e) {
|
|
214
|
+
logger.error(`error in getMemoryInfo: ${e}`);
|
|
215
|
+
return null;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
async function getHdbPid(): Promise<number | null> {
|
|
220
|
+
try {
|
|
221
|
+
return Number.parseInt(
|
|
222
|
+
await readFile(path.join(env.get(hdbTerms.CONFIG_PARAMS.ROOTPATH), hdbTerms.HDB_PID_FILE), 'utf8')
|
|
223
|
+
);
|
|
224
|
+
} catch (err) {
|
|
225
|
+
if (err.code === hdbTerms.NODE_ERROR_CODES.ENOENT) {
|
|
226
|
+
logger.warn(
|
|
227
|
+
`Unable to locate 'hdb.pid' file, try stopping and starting Harper. This could be because Harper is not running.`
|
|
228
|
+
);
|
|
229
|
+
} else {
|
|
230
|
+
throw err;
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
type CoreInfo = si.Systeminformation.ProcessesProcessData & { parent?: string };
|
|
236
|
+
|
|
237
|
+
type HarperdbProcesses = {
|
|
238
|
+
core: CoreInfo[];
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* Detects the Harper process PID and returns the process info.
|
|
243
|
+
* @returns {Promise<{core: []}>}
|
|
244
|
+
*/
|
|
245
|
+
export async function getHDBProcessInfo(): Promise<HarperdbProcesses> {
|
|
246
|
+
const harperdbProcesses: HarperdbProcesses = {
|
|
247
|
+
core: [],
|
|
248
|
+
};
|
|
249
|
+
|
|
250
|
+
try {
|
|
251
|
+
const [processes, hdbPid] = await Promise.all([si.processes(), getHdbPid()]);
|
|
252
|
+
|
|
253
|
+
const proc = processes.list.find((p) => p.pid === hdbPid);
|
|
254
|
+
if (proc) {
|
|
255
|
+
harperdbProcesses.core.push(proc);
|
|
256
|
+
}
|
|
257
|
+
} catch (e) {
|
|
258
|
+
logger.error(`error in getHDBProcessInfo: ${e}`);
|
|
259
|
+
}
|
|
260
|
+
return harperdbProcesses;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
type DiskInfo = {
|
|
264
|
+
io?: Pick<si.Systeminformation.DisksIoData, 'rIO' | 'wIO' | 'tIO'>;
|
|
265
|
+
read_write?: Pick<si.Systeminformation.FsStatsData, 'rx' | 'tx' | 'wx'>;
|
|
266
|
+
size?: si.Systeminformation.FsSizeData[];
|
|
267
|
+
};
|
|
268
|
+
|
|
269
|
+
/**
|
|
270
|
+
* Retrieves disk related info & stats
|
|
271
|
+
* @returns {Promise<DiskInfo>}
|
|
272
|
+
*/
|
|
273
|
+
export async function getDiskInfo(): Promise<DiskInfo> {
|
|
274
|
+
const disk: DiskInfo = {};
|
|
275
|
+
try {
|
|
276
|
+
if (!env.get(hdbTerms.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_DISK)) return disk;
|
|
277
|
+
|
|
278
|
+
const [disksIO, fsStats, fsSize] = await Promise.all([si.disksIO(), si.fsStats(), si.fsSize()]);
|
|
279
|
+
|
|
280
|
+
const { rIO, wIO, tIO } = disksIO;
|
|
281
|
+
disk.io = { rIO, wIO, tIO };
|
|
282
|
+
|
|
283
|
+
const { rx, tx, wx } = fsStats;
|
|
284
|
+
disk.read_write = { rx, tx, wx };
|
|
285
|
+
|
|
286
|
+
disk.size = fsSize;
|
|
287
|
+
} catch (e) {
|
|
288
|
+
logger.error(`error in getDiskInfo: ${e}`);
|
|
289
|
+
}
|
|
290
|
+
return disk;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
type NetworkInfo = {
|
|
294
|
+
default_interface: string | null;
|
|
295
|
+
latency: si.Systeminformation.InetChecksiteData | Record<never, never>;
|
|
296
|
+
interfaces: Pick<
|
|
297
|
+
si.Systeminformation.NetworkInterfacesData,
|
|
298
|
+
| 'iface'
|
|
299
|
+
| 'ifaceName'
|
|
300
|
+
| 'default'
|
|
301
|
+
| 'ip4'
|
|
302
|
+
| 'ip4subnet'
|
|
303
|
+
| 'ip6'
|
|
304
|
+
| 'ip6subnet'
|
|
305
|
+
| 'mac'
|
|
306
|
+
| 'operstate'
|
|
307
|
+
| 'type'
|
|
308
|
+
| 'duplex'
|
|
309
|
+
| 'speed'
|
|
310
|
+
>[];
|
|
311
|
+
stats: any[];
|
|
312
|
+
connections: any[];
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Detects networking connection information & stats
|
|
317
|
+
* @returns {Promise<{interfaces: [], default_interface: null, stats: [], latency: {}, connections: []}>}
|
|
318
|
+
*/
|
|
319
|
+
export async function getNetworkInfo(): Promise<NetworkInfo> {
|
|
320
|
+
const network: NetworkInfo = {
|
|
321
|
+
default_interface: null,
|
|
322
|
+
latency: {},
|
|
323
|
+
interfaces: [],
|
|
324
|
+
stats: [],
|
|
325
|
+
connections: [],
|
|
326
|
+
};
|
|
327
|
+
try {
|
|
328
|
+
if (!env.get(hdbTerms.CONFIG_PARAMS.OPERATIONSAPI_SYSINFO_NETWORK)) return network;
|
|
329
|
+
|
|
330
|
+
const [defaultInterface, latency, nInterfaces, stats] = await Promise.all([
|
|
331
|
+
si.networkInterfaceDefault(),
|
|
332
|
+
si.inetChecksite('https://google.com').catch(() => ({})),
|
|
333
|
+
si.networkInterfaces(),
|
|
334
|
+
si.networkStats(),
|
|
335
|
+
]);
|
|
336
|
+
|
|
337
|
+
network.default_interface = defaultInterface || null;
|
|
338
|
+
network.latency = latency;
|
|
339
|
+
|
|
340
|
+
for (const nInterface of nInterfaces) {
|
|
341
|
+
const {
|
|
342
|
+
iface,
|
|
343
|
+
ifaceName,
|
|
344
|
+
default: isDefault,
|
|
345
|
+
ip4,
|
|
346
|
+
ip4subnet,
|
|
347
|
+
ip6,
|
|
348
|
+
ip6subnet,
|
|
349
|
+
mac,
|
|
350
|
+
operstate,
|
|
351
|
+
type,
|
|
352
|
+
duplex,
|
|
353
|
+
speed,
|
|
354
|
+
} = nInterface;
|
|
355
|
+
network.interfaces.push({
|
|
356
|
+
iface,
|
|
357
|
+
ifaceName,
|
|
358
|
+
default: isDefault,
|
|
359
|
+
ip4,
|
|
360
|
+
ip4subnet,
|
|
361
|
+
ip6,
|
|
362
|
+
ip6subnet,
|
|
363
|
+
mac,
|
|
364
|
+
operstate,
|
|
365
|
+
type,
|
|
366
|
+
duplex,
|
|
367
|
+
speed,
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
for (const nStat of stats) {
|
|
372
|
+
const { iface, operstate, rx_bytes, rx_dropped, rx_errors, tx_bytes, tx_dropped, tx_errors } = nStat;
|
|
373
|
+
network.stats.push({ iface, operstate, rx_bytes, rx_dropped, rx_errors, tx_bytes, tx_dropped, tx_errors });
|
|
374
|
+
}
|
|
375
|
+
} catch (e) {
|
|
376
|
+
logger.error(`error in getNetworkInfo: ${e}`);
|
|
377
|
+
}
|
|
378
|
+
return network;
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
type SystemInfo = Partial<
|
|
382
|
+
Pick<
|
|
383
|
+
si.Systeminformation.OsData,
|
|
384
|
+
'platform' | 'distro' | 'release' | 'codename' | 'kernel' | 'arch' | 'hostname' | 'fqdn'
|
|
385
|
+
>
|
|
386
|
+
> & {
|
|
387
|
+
node_version?: string;
|
|
388
|
+
npm_version?: string;
|
|
389
|
+
};
|
|
390
|
+
|
|
391
|
+
/**
|
|
392
|
+
* Detect operating system and Node.js runtime information.
|
|
393
|
+
* @returns {Promise<SystemInfo>}
|
|
394
|
+
*/
|
|
395
|
+
export async function getSystemInformation(): Promise<SystemInfo> {
|
|
396
|
+
if (systemInformationCache !== undefined) {
|
|
397
|
+
return systemInformationCache;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
let systemInfo: SystemInfo = {};
|
|
401
|
+
try {
|
|
402
|
+
const [osInfo, versions] = await Promise.all([si.osInfo(), si.versions('node, npm')]);
|
|
403
|
+
const { platform, distro, release, codename, kernel, arch, hostname, fqdn } = osInfo;
|
|
404
|
+
const { node, npm } = versions;
|
|
405
|
+
|
|
406
|
+
systemInfo = {
|
|
407
|
+
platform,
|
|
408
|
+
distro,
|
|
409
|
+
release,
|
|
410
|
+
codename,
|
|
411
|
+
kernel,
|
|
412
|
+
arch,
|
|
413
|
+
hostname,
|
|
414
|
+
fqdn,
|
|
415
|
+
node_version: node,
|
|
416
|
+
npm_version: npm,
|
|
417
|
+
};
|
|
418
|
+
systemInformationCache = systemInfo;
|
|
419
|
+
} catch (e) {
|
|
420
|
+
logger.error(`error in getSystemInformation: ${e}`);
|
|
421
|
+
}
|
|
422
|
+
return systemInfo;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
function rocksdbGetTableSize(table: Table): TableSizeObject {
|
|
426
|
+
const rocksdb: RocksDatabase = table.primaryStore;
|
|
427
|
+
const stats = rocksdb.getStats();
|
|
428
|
+
const transactionLogSize = rocksdb
|
|
429
|
+
.listLogs()
|
|
430
|
+
.reduce((sum, logName) => sum + rocksdb.useLog(logName).getLogFileSize(), 0);
|
|
431
|
+
return new TableSizeObject(
|
|
432
|
+
table.databaseName,
|
|
433
|
+
table.tableName,
|
|
434
|
+
(stats['rocksdb.estimate-live-data-size'] as number) ?? 0,
|
|
435
|
+
(stats['rocksdb.estimate-num-keys'] as number) ?? 0,
|
|
436
|
+
transactionLogSize
|
|
437
|
+
// transactionLogRecordCount - currently not supported by `rocksdb-js`
|
|
438
|
+
);
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
/**
|
|
442
|
+
* Retrieves table size information.
|
|
443
|
+
* @returns {TableSizeObject[]}
|
|
444
|
+
*/
|
|
445
|
+
export function getTableSize(): TableSizeObject[] {
|
|
446
|
+
const results: TableSizeObject[] = [];
|
|
447
|
+
const databases = getDatabases();
|
|
448
|
+
|
|
449
|
+
for (const db of Object.values(databases)) {
|
|
450
|
+
for (const table of Object.values(db)) {
|
|
451
|
+
if (table.primaryStore.rootStore instanceof RocksDatabase) {
|
|
452
|
+
results.push(rocksdbGetTableSize(table));
|
|
453
|
+
} else {
|
|
454
|
+
results.push(lmdbGetTableSize(table));
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
return results;
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
type LMDBEnvStats = {
|
|
462
|
+
entryCount: number;
|
|
463
|
+
overflowPages: number;
|
|
464
|
+
pageSize: number;
|
|
465
|
+
treeBranchPageCount: number;
|
|
466
|
+
treeDepth: number;
|
|
467
|
+
treeLeafPageCount: number;
|
|
468
|
+
};
|
|
469
|
+
|
|
470
|
+
type LMDBStats = LMDBEnvStats & {
|
|
471
|
+
free: LMDBEnvStats;
|
|
472
|
+
lastPageNumber: number;
|
|
473
|
+
lastTxnId: number;
|
|
474
|
+
mapSize: number;
|
|
475
|
+
maxReaders: number;
|
|
476
|
+
numReaders: number;
|
|
477
|
+
root: LMDBEnvStats;
|
|
478
|
+
};
|
|
479
|
+
|
|
480
|
+
const rocksDBDatabaseLevelStats = new Set<string>([
|
|
481
|
+
'blockCacheCapacity',
|
|
482
|
+
'blockCacheDataHit',
|
|
483
|
+
'blockCacheDataMiss',
|
|
484
|
+
'blockCacheFilterHit',
|
|
485
|
+
'blockCacheFilterMiss',
|
|
486
|
+
'blockCacheHit',
|
|
487
|
+
'blockCacheIndexHit',
|
|
488
|
+
'blockCacheIndexMiss',
|
|
489
|
+
'blockCacheMiss',
|
|
490
|
+
'blockCachePinnedUsage',
|
|
491
|
+
'blockCacheUsage',
|
|
492
|
+
'bytesRead',
|
|
493
|
+
'bytesWritten',
|
|
494
|
+
'dbFlushMicros',
|
|
495
|
+
'dbGetMicros',
|
|
496
|
+
'dbSeekMicros',
|
|
497
|
+
'dbWriteMicros',
|
|
498
|
+
'noFileErrors',
|
|
499
|
+
'numberKeysRead',
|
|
500
|
+
'numberKeysWritten',
|
|
501
|
+
'numberReseeksIteration',
|
|
502
|
+
'numRunningFlushes',
|
|
503
|
+
'oldestSnapshotTime',
|
|
504
|
+
'stallMicros',
|
|
505
|
+
'txnOverheadMutexOldCommitMap',
|
|
506
|
+
'txnOverheadMutexPrepare',
|
|
507
|
+
'txnOverheadMutexSnapshot',
|
|
508
|
+
]);
|
|
509
|
+
|
|
510
|
+
type RocksDBStats = {
|
|
511
|
+
blockCacheCapacity: number;
|
|
512
|
+
blockCacheDataHit: number;
|
|
513
|
+
blockCacheDataMiss: number;
|
|
514
|
+
blockCacheFilterHit: number;
|
|
515
|
+
blockCacheFilterMiss: number;
|
|
516
|
+
blockCacheHit: number;
|
|
517
|
+
blockCacheIndexHit: number;
|
|
518
|
+
blockCacheIndexMiss: number;
|
|
519
|
+
blockCacheMiss: number;
|
|
520
|
+
blockCachePinnedUsage: number;
|
|
521
|
+
blockCacheUsage: number;
|
|
522
|
+
bytesRead: number;
|
|
523
|
+
bytesWritten: number;
|
|
524
|
+
dbFlushMicros: StatsHistogramData;
|
|
525
|
+
dbGetMicros: StatsHistogramData;
|
|
526
|
+
dbSeekMicros: StatsHistogramData;
|
|
527
|
+
dbWriteMicros: StatsHistogramData;
|
|
528
|
+
noFileErrors: number;
|
|
529
|
+
numberKeysRead: number;
|
|
530
|
+
numberKeysWritten: number;
|
|
531
|
+
numberReseeksIteration: number;
|
|
532
|
+
numRunningFlushes: number;
|
|
533
|
+
oldestSnapshotTime: number;
|
|
534
|
+
stallMicros: number;
|
|
535
|
+
txnOverheadMutexOldCommitMap: number;
|
|
536
|
+
txnOverheadMutexPrepare: number;
|
|
537
|
+
txnOverheadMutexSnapshot: number;
|
|
538
|
+
};
|
|
539
|
+
|
|
540
|
+
type RocksDBTableStats = {
|
|
541
|
+
blobdbValueSize: StatsHistogramData;
|
|
542
|
+
bloomFilterFullPositive: number;
|
|
543
|
+
bloomFilterFullTruePositive: number;
|
|
544
|
+
bloomFilterUseful: number;
|
|
545
|
+
compactReadBytes: number;
|
|
546
|
+
compactWriteBytes: number;
|
|
547
|
+
compactionCancelled: number;
|
|
548
|
+
compactionPending: number;
|
|
549
|
+
compactionTimesMicros: StatsHistogramData;
|
|
550
|
+
curSizeActiveMemTable: number;
|
|
551
|
+
curSizeAllMemTables: number;
|
|
552
|
+
currentSuperVersionNumber: number;
|
|
553
|
+
dbIterBytesRead: number;
|
|
554
|
+
dbWriteStall: StatsHistogramData;
|
|
555
|
+
estimateLiveDataSize: number;
|
|
556
|
+
estimateNumKeys: number;
|
|
557
|
+
estimatePendingCompactionBytes: number;
|
|
558
|
+
liveBlobFileSize: number;
|
|
559
|
+
liveSstFilesSize: number;
|
|
560
|
+
memTableFlushPending: number;
|
|
561
|
+
memtableHit: number;
|
|
562
|
+
memtableMiss: number;
|
|
563
|
+
numBlobFiles: number;
|
|
564
|
+
numDeletesActiveMemTable: number;
|
|
565
|
+
numEntriesActiveMemTable: number;
|
|
566
|
+
numImmutableMemTable: number;
|
|
567
|
+
numImmutableMemTableFlushed: number;
|
|
568
|
+
numLiveVersions: number;
|
|
569
|
+
numRunningCompactions: number;
|
|
570
|
+
readAmpEstimateUsefulBytes: number;
|
|
571
|
+
readAmpTotalReadBytes: number;
|
|
572
|
+
sizeAllMemTables: number;
|
|
573
|
+
sstReadMicros: StatsHistogramData;
|
|
574
|
+
totalBlobFileSize: number;
|
|
575
|
+
totalSstFilesSize: number;
|
|
576
|
+
};
|
|
577
|
+
|
|
578
|
+
type TableStats =
|
|
579
|
+
| RocksDBTableStats
|
|
580
|
+
| Pick<LMDBStats, 'entryCount' | 'overflowPages' | 'treeBranchPageCount' | 'treeDepth' | 'treeLeafPageCount'>;
|
|
581
|
+
|
|
582
|
+
// Strips the "rocksdb." prefix and converts kebab-case to camelCase
|
|
583
|
+
function toRocksDBCamelCase(key: string): string {
|
|
584
|
+
return key.replace(/^rocksdb\./, '').replace(/[-.]([a-z])/g, (_, c: string) => c.toUpperCase());
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
type DBStats = RocksDBStats & {
|
|
588
|
+
audit?: Pick<LMDBStats, 'treeDepth' | 'treeBranchPageCount' | 'treeLeafPageCount' | 'entryCount' | 'overflowPages'>;
|
|
589
|
+
readers?: { pid: string; thread: string; txnid: string }[];
|
|
590
|
+
tables: Record<string, TableStats>;
|
|
591
|
+
};
|
|
592
|
+
|
|
593
|
+
type DatabaseMetrics = {
|
|
594
|
+
[dbName: string]: DBStats;
|
|
595
|
+
};
|
|
596
|
+
|
|
597
|
+
function getRocksDBStats(table: Table, dbStats: DBStats): void {
|
|
598
|
+
const stats = table.primaryStore.getStats();
|
|
599
|
+
const tableStats = (dbStats.tables[table.tableName] = {} as RocksDBTableStats);
|
|
600
|
+
|
|
601
|
+
for (const [key, value] of Object.entries(stats)) {
|
|
602
|
+
const name = toRocksDBCamelCase(key);
|
|
603
|
+
if (rocksDBDatabaseLevelStats.has(name)) {
|
|
604
|
+
dbStats[name] = value;
|
|
605
|
+
} else {
|
|
606
|
+
tableStats[name] = value;
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
function getLMDBStats(table: Table, dbStats: DBStats): void {
|
|
612
|
+
if (!dbStats.readers) {
|
|
613
|
+
const { root: _root, ...stats } = table.primaryStore.rootStore.getStats();
|
|
614
|
+
Object.assign(dbStats, stats);
|
|
615
|
+
dbStats.readers = table.primaryStore.rootStore
|
|
616
|
+
.readerList()
|
|
617
|
+
.split(/\n\s+/)
|
|
618
|
+
.slice(1)
|
|
619
|
+
.map((line) => {
|
|
620
|
+
const [pid, thread, txnid] = line.trim().split(' ');
|
|
621
|
+
return { pid, thread, txnid };
|
|
622
|
+
});
|
|
623
|
+
if (table.auditStore) {
|
|
624
|
+
const { treeDepth, treeBranchPageCount, treeLeafPageCount, entryCount, overflowPages } =
|
|
625
|
+
table.auditStore.getStats();
|
|
626
|
+
dbStats.audit = { treeDepth, treeBranchPageCount, treeLeafPageCount, entryCount, overflowPages };
|
|
627
|
+
}
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
const { entryCount, overflowPages, treeBranchPageCount, treeDepth, treeLeafPageCount } =
|
|
631
|
+
table.primaryStore.getStats();
|
|
632
|
+
dbStats.tables[table.tableName] = { entryCount, overflowPages, treeBranchPageCount, treeDepth, treeLeafPageCount };
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
/**
|
|
636
|
+
* Get RocksDB or LMDB metrics for all databases and tables.
|
|
637
|
+
* @returns {Promise<DatabaseMetrics>}
|
|
638
|
+
*/
|
|
639
|
+
export async function getMetrics(): Promise<DatabaseMetrics> {
|
|
640
|
+
const databaseStats: DatabaseMetrics = {};
|
|
641
|
+
const databases = getDatabases();
|
|
642
|
+
|
|
643
|
+
for (const [dbName, db] of Object.entries(databases)) {
|
|
644
|
+
const dbStats = { tables: {} } as DBStats;
|
|
645
|
+
databaseStats[dbName] = dbStats;
|
|
646
|
+
|
|
647
|
+
for (const [tableName, table] of Object.entries(db)) {
|
|
648
|
+
try {
|
|
649
|
+
if (table.primaryStore.rootStore instanceof RocksDatabase) {
|
|
650
|
+
getRocksDBStats(table, dbStats);
|
|
651
|
+
} else {
|
|
652
|
+
getLMDBStats(table, dbStats);
|
|
653
|
+
}
|
|
654
|
+
} catch (error) {
|
|
655
|
+
// if a database no longer exists, don't want to throw an error
|
|
656
|
+
logger.notify(`Error getting stats for table ${tableName}: ${error}`);
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
return databaseStats;
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
const attributeMap: Record<string, () => Promise<any> | any> = {
|
|
664
|
+
system: getSystemInformation,
|
|
665
|
+
time: getTimeInfo,
|
|
666
|
+
cpu: getCPUInfo,
|
|
667
|
+
memory: getMemoryInfo,
|
|
668
|
+
disk: getDiskInfo,
|
|
669
|
+
network: getNetworkInfo,
|
|
670
|
+
harperdb_processes: getHDBProcessInfo,
|
|
671
|
+
table_size: getTableSize,
|
|
672
|
+
metrics: getMetrics,
|
|
673
|
+
threads: getThreadInfo,
|
|
674
|
+
};
|
|
675
|
+
|
|
676
|
+
/**
|
|
677
|
+
* Retrieves system information for the requested attributes.
|
|
678
|
+
* @param {SystemInformationRequest} systemInfoReq
|
|
679
|
+
* @returns {Promise<SystemInformationResponse>}
|
|
680
|
+
*/
|
|
681
|
+
export async function systemInformation(systemInfoReq: SystemInformationRequest): Promise<SystemInformationResponse> {
|
|
682
|
+
const attributes =
|
|
683
|
+
Array.isArray(systemInfoReq.attributes) && systemInfoReq.attributes.length > 0
|
|
684
|
+
? systemInfoReq.attributes
|
|
685
|
+
: Object.keys(attributeMap);
|
|
686
|
+
const response = new SystemInformationResponse();
|
|
687
|
+
await Promise.all(
|
|
688
|
+
attributes
|
|
689
|
+
.filter((attr) => attr in attributeMap)
|
|
690
|
+
.map(async (attr) => {
|
|
691
|
+
if (attr === 'database_metrics') {
|
|
692
|
+
attr = 'metrics';
|
|
693
|
+
}
|
|
694
|
+
response[attr] = await attributeMap[attr]();
|
|
695
|
+
})
|
|
696
|
+
);
|
|
697
|
+
return response;
|
|
698
|
+
}
|