@nsshunt/stsappframework 2.19.282 → 2.19.284

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/index.js +5 -0
  2. package/dist/index.js.map +1 -1
  3. package/dist/influxdb/influxDBManager.js +334 -0
  4. package/dist/influxdb/influxDBManager.js.map +1 -0
  5. package/dist/influxdb/influxDBManagerAgent.js +211 -0
  6. package/dist/influxdb/influxDBManagerAgent.js.map +1 -0
  7. package/dist/influxdb/influxDBManagerBase.js +97 -0
  8. package/dist/influxdb/influxDBManagerBase.js.map +1 -0
  9. package/dist/influxdb/influxDBManagerService.js +240 -0
  10. package/dist/influxdb/influxDBManagerService.js.map +1 -0
  11. package/dist/kafka/IMKafkaManager.js +103 -0
  12. package/dist/kafka/IMKafkaManager.js.map +1 -0
  13. package/package.json +2 -1
  14. package/src/commonTypes.ts +15 -0
  15. package/src/index.ts +5 -0
  16. package/src/influxdb/influxDBManager.ts +359 -0
  17. package/src/influxdb/influxDBManagerAgent.ts +227 -0
  18. package/src/influxdb/influxDBManagerBase.ts +119 -0
  19. package/src/influxdb/influxDBManagerService.ts +257 -0
  20. package/src/kafka/IMKafkaManager.ts +119 -0
  21. package/types/commonTypes.d.ts +14 -0
  22. package/types/commonTypes.d.ts.map +1 -1
  23. package/types/index.d.ts +5 -0
  24. package/types/index.d.ts.map +1 -1
  25. package/types/influxdb/influxDBManager.d.ts +18 -0
  26. package/types/influxdb/influxDBManager.d.ts.map +1 -0
  27. package/types/influxdb/influxDBManagerAgent.d.ts +27 -0
  28. package/types/influxdb/influxDBManagerAgent.d.ts.map +1 -0
  29. package/types/influxdb/influxDBManagerBase.d.ts +19 -0
  30. package/types/influxdb/influxDBManagerBase.d.ts.map +1 -0
  31. package/types/influxdb/influxDBManagerService.d.ts +27 -0
  32. package/types/influxdb/influxDBManagerService.d.ts.map +1 -0
  33. package/types/kafka/IMKafkaManager.d.ts +11 -0
  34. package/types/kafka/IMKafkaManager.d.ts.map +1 -0
@@ -0,0 +1,359 @@
1
+ /* eslint @typescript-eslint/no-explicit-any: 0, @typescript-eslint/no-unused-vars: 0 */ // --> OFF
2
+ import { InfluxDB, Point, WriteApi, QueryApi, flux } from '@influxdata/influxdb-client'
3
+ import { Agent } from 'http'
4
+
5
+ import { InstrumentPayload } from '@nsshunt/stsmodels'
6
+
7
+ import { $Options } from '@nsshunt/stsconfig'
8
+ import { JSONObject } from '@nsshunt/stsutils'
9
+ const goptions = $Options()
10
+
11
+ import { InfluxDBManagerBase } from './influxDBManagerBase'
12
+ import { InfluxDBManagerService} from './influxDBManagerService'
13
+ import { InfluxDBManagerAgent } from './influxDBManagerAgent'
14
+ import { IInfluxDBManagerOptions } from './../commonTypes'
15
+
16
+ /*
17
+ Manual docker run command and setup -------------------------------------------------------
18
+
19
+ docker run -d -p 8086:8086 --name influxdb \
20
+ -v $PWD/data:/var/lib/influxdb2 \
21
+ -v $PWD/config:/etc/influxdb2 \
22
+ -e DOCKER_INFLUXDB_INIT_MODE=setup \
23
+ -e DOCKER_INFLUXDB_INIT_USERNAME=my-user \
24
+ -e DOCKER_INFLUXDB_INIT_PASSWORD=my-password \
25
+ -e DOCKER_INFLUXDB_INIT_ORG=my-org \
26
+ -e DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \
27
+ -e DOCKER_INFLUXDB_INIT_RETENTION=1w \
28
+ -e DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-super-secret-auth-token \
29
+ --restart unless-stopped \
30
+ influxdb:latest
31
+
32
+ Then access admin portal via;
33
+ https://192.168.14.92:8086
34
+
35
+ Login using credentials above
36
+ username: my-user
37
+ password: my-password
38
+
39
+ Select Buckets from the menu
40
+ Create buckets as required
41
+ Example: TestBucket01
42
+ Note: We will not use the default bucket as specified in the docker initial setup (my-bucket)
43
+
44
+ Select API Tokens from the menu
45
+ Generate an API token / custom API token for read/write access to the bucket about
46
+ Bucket: TestBucket01 (from above bucket create)
47
+
48
+ Copy the API token and use in the ENV file for access
49
+ INFLUXDB_API_TOKEN=<< generated from InfluxDB web admin tool >> \
50
+ INFLUXDB_URL=http://192.168.14.92:8086 \
51
+ INFLUXDB_ORG=my-org \
52
+ INFLUXDB_BUCKET=TestBucket01 \
53
+
54
+ Docker compose file --------------------------------------------------------------
55
+
56
+ version: '2'
57
+ services:
58
+ influxdb:
59
+ image: influxdb:latest
60
+ ports:
61
+ - "8086:8086"
62
+ environment:
63
+ DOCKER_INFLUXDB_INIT_MODE: setup
64
+ DOCKER_INFLUXDB_INIT_USERNAME: my-user
65
+ DOCKER_INFLUXDB_INIT_PASSWORD: my-password
66
+ DOCKER_INFLUXDB_INIT_ORG: my-org
67
+ DOCKER_INFLUXDB_INIT_BUCKET: my-bucket
68
+ DOCKER_INFLUXDB_INIT_RETENTION: 1w
69
+ DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: my-super-secret-auth-token
70
+ volumes:
71
+ - /var/run/influxdb/data:/var/lib/influxdb2
72
+ - /var/run/influxdb/config:/etc/influxdb2
73
+ restart: unless-stopped
74
+
75
+
76
+ // Exam Queries Below ----------------------------------------------------------------------
77
+
78
+ from(bucket: "TestBucket01")
79
+ |> range(start: v.timeRangeStart, stop: v.timeRangeStop)
80
+ |> filter(fn: (r) => r["_measurement"] == "stsServicePointV3")
81
+ |> group(columns: ["serviceId"], mode: "by")
82
+ |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
83
+ |> yield(name: "mean")
84
+
85
+ from(bucket: "TestBucket01")
86
+ |> range(start: -10m)
87
+ |> filter(fn: (r) => r["_measurement"] == "requestCount")
88
+ |> group(columns: ["serviceInstanceId"])
89
+ |> mean()
90
+
91
+ from(bucket: "TestBucket01")
92
+ |> range(start: -10m)
93
+ |> filter(fn: (r) => r["_measurement"] == "cpu")
94
+ |> group(columns: ["serviceInstanceId"])
95
+ |> aggregateWindow(every: 1m, fn: mean, createEmpty: false)
96
+
97
+ from(bucket: "TestBucket01")
98
+ |> range(start: -2m)
99
+ |> filter(fn: (r) => r["_measurement"] == "cpu")
100
+ |> group(columns: ["serviceInstanceId"])
101
+ |> aggregateWindow(every: 1m, fn: mean, createEmpty: false)
102
+
103
+ from(bucket: "TestBucket01")
104
+ |> range(start: -10s)
105
+ |> filter(fn: (r) => r["_measurement"] == "all")
106
+ |> filter(fn: (r) => r["_field"] == "requestCount")
107
+ |> group(columns: ["serviceInstanceId"])
108
+ |> max()
109
+
110
+ from(bucket: "TestBucket01")
111
+ |> range(start: -10s)
112
+ |> filter(fn: (r) => r["_measurement"] == "all")
113
+ |> filter(fn: (r) => r["_field"] == "requestCount" or r["_field"] == "errorCount")
114
+ |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
115
+ |> group(columns: ["serviceInstanceId"])
116
+ |> max()
117
+
118
+ from(bucket: "TestBucket01")
119
+ |> range(start: -5s)
120
+ |> filter(fn: (r) => r["_measurement"] == "all") |> filter(fn: (r) => r["_field"] == "cpu")
121
+ |> last()
122
+ |> group(columns: ["serviceInstanceId", "serviceId", "_field"])
123
+ |> sum()
124
+ |> group(columns: ["_field"])
125
+ |> sum()
126
+
127
+ // Histo ---------------------------------------
128
+
129
+ import "math"
130
+ from(bucket: "TestBucket01")
131
+ |> range(start: -10m)
132
+ |> filter(fn: (r) => r["_measurement"] == "all") |> filter(fn: (r) => r["_field"] == "latency")
133
+ |> group(columns: ["serviceInstanceIsssd", "serviceId", "_field"])
134
+ |> aggregateWindow(every: 1s, fn: mean, createEmpty: false)
135
+ |> histogram(bins: [0.0, 5.0, 10.0, 20.0, 50.0, math.mInf(sign: 1) ])
136
+ |> difference()
137
+
138
+ // Velocity ------------------------------------------------------------------------
139
+
140
+ dostsvelocityex = (r, ds, d) =>
141
+ from(bucket: "TestBucket01")
142
+ |> range(start: r)
143
+ |> filter(fn: (r) => r["_measurement"] == "all")
144
+ |> filter(fn: (r) => r["_field"] == "requestCount")
145
+ |> group(columns: ["serviceInstanceIsssd", "serviceId", "_field", "serviceInstanceProcessId"])
146
+ |> aggregateWindow(every: ds, fn: last, createEmpty: false)
147
+ |> limit(n: 5)
148
+ |> difference()
149
+ |> last()
150
+ |> group(columns: ["serviceInstanceIsssd", "serviceId", "_field"])
151
+ |> sum()
152
+ |> map(fn: (r) => ({r with _value: float(v: r._value) / d}))
153
+
154
+ dostsvelocity = () =>
155
+ dostsvelocityex(r: -30s, ds: 5s, d: 5.0)
156
+
157
+ dostsvelocity()
158
+
159
+ // Velocity - for a specific service instance ----------------------------------
160
+
161
+ dostsvelocitybyserviceinstanceex = (sid, r, ds, d) =>
162
+ from(bucket: "TestBucket01")
163
+ |> range(start: r)
164
+ |> filter(fn: (r) => r["_measurement"] == "all")
165
+ |> filter(fn: (r) => r["_field"] == "requestCount")
166
+ |> filter(fn: (r) => r["serviceId"] == sid)
167
+ |> group(columns: ["serviceInstanceId", "serviceId", "_field", "serviceInstanceProcessId"])
168
+ |> aggregateWindow(every: ds, fn: last, createEmpty: false)
169
+ |> limit(n: 5)
170
+ |> difference()
171
+ |> last()
172
+ |> sum()
173
+ |> map(fn: (r) => ({r with _value: float(v: r._value) / d}))
174
+
175
+ dostsvelocitybyserviceinstance = (sid) =>
176
+ dostsvelocitybyserviceinstanceex(sid: sid, r: -30s, ds: 5s, d: 5.0)
177
+
178
+ dostsvelocitybyserviceinstance(sid: "STSAuth@1.0.0")
179
+
180
+ // Percentile --------------------------------------------------------------
181
+
182
+ from(bucket: "TestBucket01")
183
+ |> range(start: -10m)
184
+ |> filter(fn: (r) => r["_measurement"] == "all") |> filter(fn: (r) => r["_field"] == "latency")
185
+ |> group(columns: ["serviceInstanceIsssd", "serviceId", "_field"])
186
+ |> aggregateWindow(every: 10s, fn: mean, createEmpty: false)
187
+ |> quantile(q: 0.99, method: "exact_mean")
188
+ */
189
+
190
+ // export INFLUXDB_TOKEN=W6GztqLmWEqqpP9hAOW9nucgVfESk32dC0JYzFCaYD2yJtS8Ox_WK_1hwSkPUaheWjwwcavkE7_J2aRzm3-E7w==
191
+
192
+
193
+ const _logPrefix = 'InfluxDBManager:'
194
+
195
+ export class InfluxDBManager
196
+ {
197
+ #shuttingDown: boolean = false;
198
+ #options: IInfluxDBManagerOptions;
199
+
200
+ //#token = process.env.INFLUXDB_TOKEN;
201
+ #token = '';
202
+ #url = '' // 'http://192.168.14.92:8086'
203
+ #org = '' // `my-org`
204
+ #bucket = '' // `TestBucket01`
205
+ #client: InfluxDB;
206
+ #writeClient: WriteApi;
207
+ #queryApi: QueryApi;
208
+ #writeDataPointFlushTimeout: NodeJS.Timeout | null = null;
209
+ #agent: Agent | null = null;
210
+ #influxDBManagerClients: Record<string, InfluxDBManagerBase> = { };
211
+
212
+ constructor(options: IInfluxDBManagerOptions)
213
+ {
214
+ this.#options = options;
215
+
216
+ this.#token = options.token;
217
+ this.#url = options.url;
218
+ this.#org = options.org;
219
+ this.#bucket = options.bucket;
220
+
221
+ if (options.agent) {
222
+ this.#agent = new Agent({
223
+ keepAlive: options.agent.influxDB_keepAlive,
224
+ keepAliveMsecs: 20000, //@@
225
+ maxSockets: options.agent.influxDB_maxSockets,
226
+ maxTotalSockets: options.agent.influxDB_maxTotalSockets,
227
+ maxFreeSockets: options.agent.influxDB_maxFreeSockets,
228
+ timeout: options.agent.influxDB_timeout,
229
+ //.rejectUnauthorized: options.agent.influxDB_rejectUnauthorized, // only for SSL
230
+ })
231
+
232
+ this.#client = new InfluxDB({
233
+ url: this.#url,
234
+ token: this.#token,
235
+ transportOptions: { agent: this.#agent }
236
+ })
237
+ } else {
238
+ this.#agent = null;
239
+
240
+ this.#client = new InfluxDB({
241
+ url: this.#url,
242
+ token: this.#token
243
+ })
244
+ }
245
+
246
+ this.#queryApi = this.#client.getQueryApi(this.#org)
247
+ this.#writeClient = this.#client.getWriteApi(this.#org, this.#bucket, 'ns')
248
+
249
+ this.CreateInfluxDBManagerClient('service');
250
+ this.CreateInfluxDBManagerClient('agent');
251
+
252
+ const StartWriteClient = () => {
253
+ this.#writeDataPointFlushTimeout = setTimeout(() => {
254
+ this.#writeClient.flush()
255
+ StartWriteClient();
256
+ }, 1000).unref();
257
+ }
258
+
259
+ StartWriteClient();
260
+ }
261
+
262
+ CreateInfluxDBManagerClient(modelType: string) {
263
+ switch (modelType) {
264
+ case 'service':
265
+ this.#influxDBManagerClients[modelType] = new InfluxDBManagerService(this.#options, this.#queryApi, this.#writeClient);
266
+ break;
267
+ case 'agent':
268
+ this.#influxDBManagerClients[modelType] = new InfluxDBManagerAgent(this.#options, this.#queryApi, this.#writeClient);
269
+ break;
270
+ default:
271
+ throw new Error(`Model type: [${modelType} not supported.]`);
272
+ }
273
+ }
274
+
275
+ get serviceManager(): InfluxDBManagerService {
276
+ return this.#influxDBManagerClients['service'] as InfluxDBManagerService;
277
+ }
278
+
279
+ get agentManager(): InfluxDBManagerAgent {
280
+ return this.#influxDBManagerClients['agent'] as InfluxDBManagerAgent;
281
+ }
282
+
283
+ Start = async () => {
284
+ // perform any setup here
285
+ }
286
+
287
+ Terminate = async () => {
288
+ if (this.#shuttingDown) {
289
+ console.log(`${_logPrefix}Terminate: Terminate already called. Ignoring.`.yellow);
290
+ } else {
291
+ this.#shuttingDown = true;
292
+ try {
293
+ this.#writeClient.flush();
294
+ this.#shuttingDown = true;
295
+ if (this.#writeDataPointFlushTimeout) {
296
+ clearTimeout(this.#writeDataPointFlushTimeout);
297
+ this.#writeDataPointFlushTimeout = null;
298
+ }
299
+
300
+ if (this.#agent) {
301
+ this.#agent?.destroy();
302
+ this.#agent = null;
303
+ }
304
+
305
+ console.log(`ProducerDisconnect`.yellow);
306
+ } catch (error) {
307
+ console.error(`${_logPrefix}#Terminate: Error: [${error}]`.red);
308
+ }
309
+ }
310
+ }
311
+
312
+ /*
313
+ .floatField('timer', instrumentPayload.instruments[Gauge.TIMER_GAUGE].val)
314
+ .floatField('duration', instrumentPayload.instruments[Gauge.DURATION_GAUGE].val)
315
+ .floatField('latency', instrumentPayload.instruments[Gauge.LATENCY_GAUGE].val)
316
+ */
317
+
318
+ CreateGlobalCountModel = (stscount: any[]) => {
319
+ try {
320
+ const results: any = { };
321
+ stscount.forEach((o: any) => {
322
+ results[o._field] = o._value;
323
+ });
324
+ return results;
325
+ } catch (error) {
326
+ console.error(`${_logPrefix}#CreateGlobalCountModel: Error: [${error}]`.red);
327
+ }
328
+ }
329
+
330
+
331
+ CreateNestedObject(combinedResults: JSONObject, keyList: string[]): JSONObject {
332
+ let workingObject = combinedResults;
333
+ keyList.forEach((key) => {
334
+ if (!workingObject[key]) {
335
+ workingObject[key] = { };
336
+ workingObject = workingObject[key];
337
+ }
338
+ });
339
+ return combinedResults;
340
+ }
341
+
342
+ // Agent context payload
343
+ // {"nid":"somehost@e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent|MainProcess|0","id":"somehost@e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent|MainProcess|0","hostName":"somehost","agentName":"e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent","threadId":"MainProcess","asyncRunnerId":"0"}
344
+ async OutputInfluxDB(instrumentPayload: InstrumentPayload): Promise<boolean> {
345
+ try {
346
+ if (this.#shuttingDown) {
347
+ return false;
348
+ }
349
+ if (instrumentPayload.context.agentName) {
350
+ return this.#influxDBManagerClients['agent'].OutputInfluxDB(instrumentPayload);
351
+ } else {
352
+ return this.#influxDBManagerClients['service'].OutputInfluxDB(instrumentPayload);
353
+ }
354
+ } catch (error) {
355
+ console.error(`${_logPrefix}OutputInfluxDB: Could not write data point: [${error}]`.red);
356
+ return false;
357
+ }
358
+ }
359
+ }
@@ -0,0 +1,227 @@
1
+ /* eslint @typescript-eslint/no-explicit-any: 0, @typescript-eslint/no-unused-vars: 0 */ // --> OFF
2
+ import { Point, WriteApi, QueryApi } from '@influxdata/influxdb-client'
3
+
4
+ import { InstrumentPayload } from '@nsshunt/stsmodels'
5
+ import { Gauge, InstrumentVelocity } from '@nsshunt/stsinstrumentation'
6
+
7
+ import { InfluxDBManagerBase } from './influxDBManagerBase'
8
+
9
+ import { IInfluxDBManagerOptions } from './../commonTypes'
10
+
11
+ const _logPrefix = 'InfluxDBManagerService:'
12
+
13
+ export class InfluxDBManagerAgent extends InfluxDBManagerBase
14
+ {
15
+ constructor(options: IInfluxDBManagerOptions, queryApi: QueryApi, writeClient: WriteApi) {
16
+ super(options, queryApi, writeClient);
17
+ }
18
+
19
+ override get modelType(): string {
20
+ return 'agent'
21
+ }
22
+
23
+ // Queries --------------------------------------------------------------------------------------------------------
24
+
25
+ // Counter metrics
26
+ #GetSTSCountGenericAgent = async (modelType: string, filterClause: string, groupClause: string, showOutput: boolean = false) => {
27
+ try {
28
+ const query = `dostscountex = (q, d) =>
29
+ from(bucket: "${this.options.bucket}")
30
+ |> range(start: d)
31
+ |> filter(fn: (r) => r["_measurement"] == "all" and r["modelType"] == "${modelType}" ${filterClause})
32
+ |> filter(fn: (r) => r["_field"] == q)
33
+ |> last()
34
+ |> ${groupClause}
35
+ |> sum()
36
+
37
+ dostscount = (d) =>
38
+ union(tables: [
39
+ dostscountex(q: "requestCount", d: d),
40
+ dostscountex(q: "errorCount", d: d),
41
+ dostscountex(q: "retryCount", d: d),
42
+ dostscountex(q: "authenticationCount", d: d),
43
+ dostscountex(q: "velocity", d: d),
44
+ dostscountex(q: "coreCount", d: d),
45
+ dostscountex(q: "timer", d: d),
46
+ dostscountex(q: "duration", d: d),
47
+ dostscountex(q: "latency", d: d),
48
+ dostscountex(q: "activeRequestCount", d: d)
49
+ ])
50
+ dostscount(d: -5s)`;
51
+
52
+ if (showOutput) {
53
+ console.log(query);
54
+ }
55
+
56
+ return this.queryApi.collectRows(query)
57
+ } catch (error) {
58
+ console.error(`${_logPrefix}#GetSTSCountGeneric: Error: [${error}]`.red);
59
+ }
60
+ }
61
+
62
+ // Histo metrics
63
+ #GetSTSHistoGenericService = async (modelType: string, filterClause: string, groupClause: string): Promise<any> => {
64
+ try {
65
+ const query = `import "math"
66
+
67
+ dostshistoex = (q, d) =>
68
+ from(bucket: "${this.options.bucket}")
69
+ |> range(start: d)
70
+ |> filter(fn: (r) => r["_measurement"] == "all")
71
+ |> filter(fn: (r) => r["_field"] == q and r["modelType"] == "${modelType}" ${filterClause})
72
+ |> ${groupClause}
73
+ |> histogram(bins: [0.0, 10.0, 20.0, 50.0, 100.0, 1000.0, 50000.0, math.mInf(sign: 1) ])
74
+ |> difference()
75
+
76
+ dostshisto = (d) =>
77
+ union(tables: [
78
+ dostshistoex(q: "latency", d: d),
79
+ dostshistoex(q: "duration", d: d)
80
+ ])
81
+
82
+ dostshisto(d: -10m)`;
83
+ return this.queryApi.collectRows(query)
84
+ } catch (error) {
85
+ console.error(`${_logPrefix}#GetSTSHistoGeneric: Error: [${error}]`.red);
86
+ }
87
+ }
88
+
89
+ // Quantile metrics
90
+ #GetSTSQuantileGenericAgent = async (modelType: string, filterClause: string, groupClause: string) => {
91
+ try {
92
+ const query = `dostsquantileex = (q, d, i, f) =>
93
+ from(bucket: "${this.options.bucket}")
94
+ |> range(start: d)
95
+ |> filter(fn: (r) => r["_measurement"] == "all")
96
+ |> filter(fn: (r) => r["_field"] == f and r["modelType"] == "${modelType}" ${filterClause})
97
+ |> ${groupClause}
98
+ |> aggregateWindow(every: i, fn: max, createEmpty: false)
99
+ |> quantile(q: q, method: "estimate_tdigest", compression: 1000.0)
100
+ |> set(key: "quantile", value: string(v:q))
101
+ |> group(columns: ["LatencyType","quantile"])
102
+
103
+ dostsquantile = (d, i, f) =>
104
+ union(tables: [
105
+ dostsquantileex(q: 0.5, d: d, i: i, f: f),
106
+ dostsquantileex(q: 0.8, d: d, i: i, f: f),
107
+ dostsquantileex(q: 0.9, d: d, i: i, f: f),
108
+ dostsquantileex(q: 0.95, d: d, i: i, f: f),
109
+ dostsquantileex(q: 0.99, d: d, i: i, f: f)
110
+ ])
111
+
112
+ union(tables: [
113
+ dostsquantile(d: -10m, i: 5s, f: "latency"),
114
+ dostsquantile(d: -10m, i: 5s, f: "duration")
115
+ ])`;
116
+
117
+ return this.queryApi.collectRows(query)
118
+ } catch (error) {
119
+ console.error(`${_logPrefix}#GetSTSQuantileGeneric: Error: [${error}]`.red);
120
+ }
121
+ }
122
+
123
+ // Metric queries -------------------------------------------------------------------------------------------------
124
+ // Root level metrics
125
+ async GetInfluxDBResultsRootAgent() {
126
+ try {
127
+ const combinedResults = await this.ProcessInfluxDBResults([
128
+ this.#GetSTSCountGenericAgent('agent', '', 'group(columns: ["_field"])'),
129
+ this.#GetSTSQuantileGenericAgent(`agent`, '', 'group(columns: ["_field"])'),
130
+ this.#GetSTSHistoGenericService('agent', '', 'group(columns: ["_field"])')],
131
+ [ ])
132
+
133
+ return { SubscriptionType: "ID_Agent_Root", data: combinedResults };
134
+ } catch (error) {
135
+ console.error(`${_logPrefix}GetInfluxDBResultsRootAgent: Error: [${error}]`.red);
136
+ }
137
+ }
138
+
139
+ // Service level metrics
140
+ async GetInfluxDBResultsAgent() {
141
+ try {
142
+ const combinedResults = await this.ProcessInfluxDBResults([
143
+ this.#GetSTSCountGenericAgent('agent', '', 'group(columns: ["agentName", "_field"])'),
144
+ this.#GetSTSQuantileGenericAgent(`agent`, '', 'group(columns: ["agentName", "_field"])'),
145
+ this.#GetSTSHistoGenericService('agent', '', 'group(columns: ["agentName", "_field"])')],
146
+ ['agentName'])
147
+
148
+ return { SubscriptionType: "ID_Agent_Agents", data: combinedResults};
149
+ } catch (error) {
150
+ console.error(`${_logPrefix}GetInfluxDBResultsAgent: Error: [${error}]`.red);
151
+ }
152
+ }
153
+
154
+ // Service instance metrics for a particular service type
155
+ async GetInfluxDBResultsAgentThreads(agentName: string) {
156
+ try {
157
+
158
+ const combinedResults = await this.ProcessInfluxDBResults([
159
+ this.#GetSTSCountGenericAgent('agent', `and r["agentName"] == "${agentName}"`, `group(columns: ["agentName", "threadId", "_field"])`),
160
+ this.#GetSTSQuantileGenericAgent(`agent`, `and r["agentName"] == "${agentName}"`, 'group(columns: ["agentName", "threadId", "_field"])'),
161
+ this.#GetSTSHistoGenericService('agent', `and r["agentName"] == "${agentName}"`, 'group(columns: ["agentName", "threadId", "_field"])')],
162
+ ['agentName', 'threadId'])
163
+
164
+ console.log(`${JSON.stringify(combinedResults)}`.magenta);
165
+
166
+ return { SubscriptionType: `ID_Agent_Agent_Threads_${agentName}`, data: combinedResults};
167
+ } catch (error) {
168
+ console.error(`${_logPrefix}GetInfluxDBResultsAgentThreads: Error: [${error}]`.red);
169
+ }
170
+ }
171
+
172
+ // Service instance thread metrics for a particular service instance
173
+ async GetInfluxDBResultsAgentThread(agentName: string, threadId: string) {
174
+ try {
175
+ const combinedResults = await this.ProcessInfluxDBResults([
176
+ this.#GetSTSCountGenericAgent('agent', `and r["agentName"] == "${agentName}" and r["threadId"] == "${threadId}"`, `group(columns: ["agentName", "threadId", "asyncRunnerId", "_field"])`),
177
+ this.#GetSTSQuantileGenericAgent(`agent`, `and r["agentName"] == "${agentName}" and r["threadId"] == "${threadId}"`, 'group(columns: ["agentName", "threadId", "asyncRunnerId", "_field"])'),
178
+ this.#GetSTSHistoGenericService('agent', `and r["agentName"] == "${agentName}" and r["threadId"] == "${threadId}"`, 'group(columns: ["agentName", "threadId", "asyncRunnerId", "_field"])')],
179
+ ['agentName', 'threadId', 'asyncRunnerId'])
180
+
181
+ return { SubscriptionType: `ID_Agent_Agent_Thread_${agentName}_${threadId}`, data: combinedResults};
182
+ } catch (error) {
183
+ console.error(`${_logPrefix}GetInfluxDBResultsAgentThread: Error: [${error}]`.red);
184
+ }
185
+ }
186
+
187
+ // Agent context payload
188
+ // {"nid":"somehost@e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent|MainProcess|0","id":"somehost@e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent|MainProcess|0","hostName":"somehost"
189
+ // ,"agentName":"e58f5d75-6ff6-4e04-92a4-f2bcd722fec0-someuseragent","threadId":"MainProcess","asyncRunnerId":"0"}
190
+ async OutputInfluxDB(instrumentPayload: InstrumentPayload): Promise<boolean> {
191
+ try {
192
+ const { nid, id, hostName, agentName, threadId, asyncRunnerId } = instrumentPayload.context;
193
+
194
+ let systemcpu = 0.0;
195
+ if (instrumentPayload.instruments[Gauge.CPU_SYSTEM_LOAD_GAUGE]) {
196
+ systemcpu = (instrumentPayload.instruments[Gauge.CPU_SYSTEM_LOAD_GAUGE].val as number);
197
+ }
198
+
199
+ const point = new Point('all')
200
+ // Context settings
201
+ .tag('id', id as string) // Must be unique (uuid)
202
+ .tag('hostName', hostName as string) // Must be unique (uuid)
203
+ .tag('agentName', agentName as string) // Must be unique (uuid)
204
+ .tag('threadId', threadId as string) // Must be unique (uuid)
205
+ .tag('asyncRunnerId', asyncRunnerId as string)
206
+ .tag('modelType', 'agent')
207
+ // Data fields
208
+ .intField('requestCount', instrumentPayload.instruments[Gauge.REQUEST_COUNT_GAUGE].val) // q
209
+ .intField('errorCount', instrumentPayload.instruments[Gauge.ERROR_COUNT_GAUGE].val) // j
210
+ .intField('retryCount', instrumentPayload.instruments[Gauge.RETRY_COUNT_GAUGE].val) // r
211
+ .intField('authenticationCount', instrumentPayload.instruments[Gauge.AUTHENTICATION_COUNT_GAUGE].val) // b
212
+ .floatField('velocity', (instrumentPayload.instruments[Gauge.VELOCITY_GAUGE] as InstrumentVelocity).va) // t
213
+ .intField('coreCount', instrumentPayload.instruments[Gauge.CORE_COUNT_GAUGE].val) // x
214
+ .floatField('timer', instrumentPayload.instruments[Gauge.TIMER_GAUGE].val) // s
215
+ .floatField('duration', instrumentPayload.instruments[Gauge.DURATION_GAUGE].val) // h
216
+ .floatField('latency', instrumentPayload.instruments[Gauge.LATENCY_GAUGE].val) // h
217
+ .intField('activeRequestCount', instrumentPayload.instruments[Gauge.ACTIVE_REQUEST_GAUGE].val) // a
218
+ // logger // m
219
+ // duration histo // i
220
+ this.writeClient.writePoint(point);
221
+ return true;
222
+ } catch (error: any) {
223
+ console.error(`${_logPrefix}OutputInfluxDBAgent: Could not write data point: [${error}]`.red);
224
+ return false;
225
+ }
226
+ }
227
+ }