@blazedpath/commons 0.0.11 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/blz-base/index.js +893 -895
- package/blz-cache/index.js +25 -14
- package/blz-core/index.js +347 -349
- package/blz-cryptography/index.js +47 -49
- package/blz-datetimes/index.js +348 -350
- package/blz-file/index.js +83 -88
- package/blz-hazelcast/index.js +100 -103
- package/blz-iterable/index.js +404 -406
- package/blz-json-schema/index.js +6 -8
- package/blz-jwt/index.js +89 -92
- package/blz-kafka/index.js +106 -108
- package/blz-math/index.js +127 -129
- package/blz-mongodb/index.js +33 -35
- package/blz-rds/index.js +44 -46
- package/blz-rds-mysql/index.js +9 -11
- package/blz-rds-mysqlx/index.js +10 -12
- package/blz-rds-oracle/index.js +99 -104
- package/blz-rds-postgres/index.js +11 -13
- package/blz-redis/index.js +123 -125
- package/blz-regex/index.js +22 -24
- package/blz-strings/index.js +165 -167
- package/blz-uuid/index.js +2 -4
- package/blz-yaml/index.js +16 -19
- package/package.json +1 -1
package/blz-json-schema/index.js
CHANGED
|
@@ -1,13 +1,11 @@
|
|
|
1
1
|
const validate = require('jsonschema').validate;
|
|
2
2
|
|
|
3
3
|
module.exports = {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
return result
|
|
11
|
-
}
|
|
4
|
+
jsonSchemaValidate: function (data, schema) {
|
|
5
|
+
if (schema === undefined || schema === null)
|
|
6
|
+
throw new Error('schema undefined')
|
|
7
|
+
let result = validate(data, schema)
|
|
8
|
+
result.valid = (result.errors.length === 0)
|
|
9
|
+
return result
|
|
12
10
|
}
|
|
13
11
|
};
|
package/blz-jwt/index.js
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
const jwt = require('jsonwebtoken');
|
|
2
|
-
const { Backend } = require('../blz-json-schema');
|
|
3
2
|
|
|
4
3
|
class JwtSecretKeyNotDefined extends Error {
|
|
5
4
|
constructor() {
|
|
@@ -18,107 +17,105 @@ function getJwtSecretKey() {
|
|
|
18
17
|
}
|
|
19
18
|
|
|
20
19
|
module.exports = {
|
|
21
|
-
|
|
22
|
-
JwtSecretKeyNotDefined,
|
|
20
|
+
JwtSecretKeyNotDefined,
|
|
23
21
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
}
|
|
22
|
+
jwtSign: function (payload, secret, algorithm) {
|
|
23
|
+
try {
|
|
24
|
+
switch (algorithm) {
|
|
25
|
+
case 'HMAC-SHA256':
|
|
26
|
+
return jwt.sign(payload, secret, { algorithm: 'HS256' });
|
|
27
|
+
case 'HMAC-SHA384':
|
|
28
|
+
return jwt.sign(payload, secret, { algorithm: 'HS384' });
|
|
29
|
+
case 'HMAC-SHA512':
|
|
30
|
+
return jwt.sign(payload, secret, { algorithm: 'HS512' });
|
|
31
|
+
default:
|
|
32
|
+
throw new Error('Invalid encryption algorithm');
|
|
33
|
+
}
|
|
34
|
+
} catch (jwterr) {
|
|
35
|
+
if (jwterr.name === 'TokenExpiredError') {
|
|
36
|
+
let err = new Error();
|
|
37
|
+
err.code = 'JwtExpired';
|
|
38
|
+
err.data = { expiredAt: jwterr.expiredAt };
|
|
39
|
+
throw err;
|
|
40
|
+
} else {
|
|
41
|
+
throw jwterr;
|
|
45
42
|
}
|
|
46
|
-
}
|
|
43
|
+
}
|
|
44
|
+
},
|
|
47
45
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
}
|
|
46
|
+
jwtVerify: function (token, secret) {
|
|
47
|
+
try {
|
|
48
|
+
return jwt.verify(token, secret);
|
|
49
|
+
} catch (jwterr) {
|
|
50
|
+
if (jwterr.name === 'TokenExpiredError') {
|
|
51
|
+
let err = new Error();
|
|
52
|
+
err.code = 'JwtExpired';
|
|
53
|
+
err.data = { expiredAt: jwterr.expiredAt };
|
|
54
|
+
throw err;
|
|
55
|
+
} else {
|
|
56
|
+
let err = new Error();
|
|
57
|
+
err.code = 'JwtError';
|
|
58
|
+
err.data = {};
|
|
59
|
+
throw err;
|
|
63
60
|
}
|
|
64
|
-
}
|
|
61
|
+
}
|
|
62
|
+
},
|
|
65
63
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
}
|
|
64
|
+
jwtDecode: function (token) {
|
|
65
|
+
try {
|
|
66
|
+
return jwt.decode(token);
|
|
67
|
+
} catch (jwterr) {
|
|
68
|
+
if (jwterr.name === 'TokenExpiredError') {
|
|
69
|
+
let err = new Error();
|
|
70
|
+
err.code = 'JwtExpired';
|
|
71
|
+
err.data = { expiredAt: jwterr.expiredAt };
|
|
72
|
+
throw err;
|
|
73
|
+
} else {
|
|
74
|
+
let err = new Error();
|
|
75
|
+
err.code = 'JwtError';
|
|
76
|
+
err.data = {};
|
|
77
|
+
throw err;
|
|
81
78
|
}
|
|
82
|
-
}
|
|
79
|
+
}
|
|
80
|
+
},
|
|
83
81
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
82
|
+
jwtCreateToken: function (id, expiresIn = '10s') {
|
|
83
|
+
return jwt.sign({ id }, getJwtSecretKey(), { expiresIn });
|
|
84
|
+
},
|
|
87
85
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
86
|
+
jwtGetId: function (token) {
|
|
87
|
+
try {
|
|
88
|
+
const decoded = jwt.verify(token, getJwtSecretKey());
|
|
89
|
+
return decoded.id;
|
|
90
|
+
} catch (err) {
|
|
91
|
+
const error = new Error('Invalid or expired token');
|
|
92
|
+
error.code = 'JwtInvalid';
|
|
93
|
+
error.data = err.message;
|
|
94
|
+
throw error;
|
|
95
|
+
}
|
|
96
|
+
},
|
|
97
|
+
|
|
98
|
+
jwtEncodeUrl: function (url, expiresIn = '10s') {
|
|
99
|
+
const [path, query] = url.split('?');
|
|
100
|
+
const parts = path.split('/').map(part => {
|
|
101
|
+
if (/^\d+$/.test(part)) {
|
|
102
|
+
return jwt.sign({ id: parseInt(part) }, getJwtSecretKey(), { expiresIn });
|
|
97
103
|
}
|
|
98
|
-
|
|
104
|
+
return part;
|
|
105
|
+
});
|
|
106
|
+
return parts.join('/') + (query ? `?${query}` : '');
|
|
107
|
+
},
|
|
99
108
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
109
|
+
jwtDecodeUrl: function (url) {
|
|
110
|
+
const [path, query] = url.split('?');
|
|
111
|
+
const parts = path.split('/').map(part => {
|
|
112
|
+
try {
|
|
113
|
+
const decoded = jwt.verify(part, getJwtSecretKey());
|
|
114
|
+
return decoded.id?.toString() ?? part;
|
|
115
|
+
} catch {
|
|
106
116
|
return part;
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
jwtDecodeUrl: function (url) {
|
|
112
|
-
const [path, query] = url.split('?');
|
|
113
|
-
const parts = path.split('/').map(part => {
|
|
114
|
-
try {
|
|
115
|
-
const decoded = jwt.verify(part, getJwtSecretKey());
|
|
116
|
-
return decoded.id?.toString() ?? part;
|
|
117
|
-
} catch {
|
|
118
|
-
return part;
|
|
119
|
-
}
|
|
120
|
-
});
|
|
121
|
-
return parts.join('/') + (query ? `?${query}` : '');
|
|
122
|
-
}
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
return parts.join('/') + (query ? `?${query}` : '');
|
|
123
120
|
}
|
|
124
121
|
};
|
package/blz-kafka/index.js
CHANGED
|
@@ -31,11 +31,11 @@ const getConfig = async function (connection) {
|
|
|
31
31
|
if (connection.brokers)
|
|
32
32
|
kafkaConfig.brokers = connection.brokers.split(',');
|
|
33
33
|
kafkaConfig.connectionTimeout = connection.connectionTimeout || 3000;
|
|
34
|
-
kafkaConfig.authenticationTimeout
|
|
34
|
+
kafkaConfig.authenticationTimeout= connection.authenticationTimeout || 1000;
|
|
35
35
|
kafkaConfig.reauthenticationThreshold = connection.reauthenticationThreshold || 10000;
|
|
36
36
|
if (connection.requestTimeout)
|
|
37
37
|
kafkaConfig.requestTimeout = connection.requestTimeout;
|
|
38
|
-
if (connection.connectionRetries || connection.connectionInitialRetryTime || connection.connectionMaxRetryTime || connection.connectionRetryFactor)
|
|
38
|
+
if (connection.connectionRetries || connection.connectionInitialRetryTime || connection.connectionMaxRetryTime || connection.connectionRetryFactor){
|
|
39
39
|
kafkaConfig.retry = {};
|
|
40
40
|
if (connection.connectionRetries)
|
|
41
41
|
kafkaConfig.retry.retries = connection.connectionRetries;
|
|
@@ -81,16 +81,16 @@ let getOrCreateProducer = async function (connection) {
|
|
|
81
81
|
if (producer) {
|
|
82
82
|
return producer;
|
|
83
83
|
}
|
|
84
|
-
let kafkaConfig = await getConfig(connection);
|
|
84
|
+
let kafkaConfig = await getConfig(connection);
|
|
85
85
|
let kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
86
|
-
producer = kafka.producer({
|
|
86
|
+
producer = kafka.producer({createPartitioner: Kafkajs.Partitioners.LegacyPartitioner});
|
|
87
87
|
await producer.connect();
|
|
88
88
|
producers[connection.name] = producer;
|
|
89
89
|
return producer;
|
|
90
90
|
};
|
|
91
91
|
|
|
92
92
|
let getOrCreateAdmin = async function (connection, config) {
|
|
93
|
-
let kafkaConfig = await getConfig(connection);
|
|
93
|
+
let kafkaConfig = await getConfig(connection);
|
|
94
94
|
let kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
95
95
|
admin = kafka.admin();
|
|
96
96
|
await admin.connect();
|
|
@@ -134,7 +134,7 @@ async function createKafkaInstances(kafkaConsumers, connections, buildConfigItem
|
|
|
134
134
|
continue
|
|
135
135
|
}
|
|
136
136
|
|
|
137
|
-
let connection = connections[buildKafkaConsumer.systemName];
|
|
137
|
+
let connection = connections[buildKafkaConsumer.systemName];
|
|
138
138
|
// Check if kafka connection info is undefined
|
|
139
139
|
if (!connection) {
|
|
140
140
|
const kafkaBuildSystemConfig = buildConfigItems[buildKafkaConsumer.systemName]
|
|
@@ -153,11 +153,11 @@ async function createKafkaInstances(kafkaConsumers, connections, buildConfigItem
|
|
|
153
153
|
if (connection.brokers)
|
|
154
154
|
kafkaConfig.brokers = connection.brokers.split(',');
|
|
155
155
|
kafkaConfig.connectionTimeout = connection.connectionTimeout || 3000;
|
|
156
|
-
kafkaConfig.authenticationTimeout
|
|
156
|
+
kafkaConfig.authenticationTimeout= connection.authenticationTimeout || 1000;
|
|
157
157
|
kafkaConfig.reauthenticationThreshold = connection.reauthenticationThreshold || 10000;
|
|
158
158
|
if (connection.requestTimeout)
|
|
159
159
|
kafkaConfig.requestTimeout = connection.requestTimeout;
|
|
160
|
-
if (connection.connectionRetries || connection.connectionInitialRetryTime || connection.connectionMaxRetryTime || connection.connectionRetryFactor)
|
|
160
|
+
if (connection.connectionRetries || connection.connectionInitialRetryTime || connection.connectionMaxRetryTime || connection.connectionRetryFactor){
|
|
161
161
|
kafkaConfig.retry = {};
|
|
162
162
|
if (connection.connectionRetries)
|
|
163
163
|
kafkaConfig.retry.retries = connection.connectionRetries;
|
|
@@ -195,16 +195,16 @@ async function createKafkaInstances(kafkaConsumers, connections, buildConfigItem
|
|
|
195
195
|
}
|
|
196
196
|
}
|
|
197
197
|
kafkaConfig.logLevel = Kafkajs.logLevel.ERROR;
|
|
198
|
-
kafkaConfig.logCreator = ()
|
|
198
|
+
kafkaConfig.logCreator = ()=> {
|
|
199
199
|
return ({ namespace, level, label, log }) => {
|
|
200
200
|
if (level === 1 && log.message && log.message.includes('The group is rebalancing')) {
|
|
201
201
|
// Do nothing or log at a different level for rebalancing errors
|
|
202
202
|
return;
|
|
203
|
-
} else if (level === 1) {
|
|
203
|
+
} else if (level === 1 ) {
|
|
204
204
|
runHelper.logError(`[${namespace}] : ${JSON.stringify(log.message)}`, namespace, log);
|
|
205
205
|
} else {
|
|
206
206
|
runHelper.log(`[${namespace}] : ${log.message}`);
|
|
207
|
-
}
|
|
207
|
+
}
|
|
208
208
|
}
|
|
209
209
|
};
|
|
210
210
|
kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
@@ -219,7 +219,7 @@ async function createKafkaInstances(kafkaConsumers, connections, buildConfigItem
|
|
|
219
219
|
// en blz-builder es ... TODO
|
|
220
220
|
// - kafkaConsumers: objeto con todos los consumers. Cada property del objeto es consumer
|
|
221
221
|
// - connections: objeto con todas las conneciones con key
|
|
222
|
-
async function startKafkaConsumers(kafkas,
|
|
222
|
+
async function startKafkaConsumers (kafkas,config, kafkaConsumers, connections, runHelper) {
|
|
223
223
|
|
|
224
224
|
for (let kafkaConsumerName in kafkaConsumers) {
|
|
225
225
|
let buildKafkaConsumer = kafkaConsumers[kafkaConsumerName];
|
|
@@ -232,21 +232,21 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
232
232
|
let kafkaErrorOptions = {};
|
|
233
233
|
kafkaErrorOptions.errorOptions = connection.errorOptions ?? false;
|
|
234
234
|
kafkaErrorOptions.manualCommit = connection.messageManualCommit ?? false;
|
|
235
|
-
kafkaErrorOptions.retries =
|
|
235
|
+
kafkaErrorOptions.retries = connection.messageRetryCount ?? 5;
|
|
236
236
|
kafkaErrorOptions.initialRetryTime = connection.initialRetryTime ?? 300;
|
|
237
237
|
kafkaErrorOptions.factor = connection.messageRetryFactor ?? 2;
|
|
238
238
|
kafkaErrorOptions.multiplier = connection.messageRetryMultiplier ?? 2;
|
|
239
239
|
kafkaErrorOptions.traceAll = connection.traceAll ?? false;
|
|
240
|
-
kafkaErrorOptions.retryTopic = (buildKafkaConsumer.useErrorTopic && buildKafkaConsumer.parameterizedErrorTopic)
|
|
240
|
+
kafkaErrorOptions.retryTopic = (buildKafkaConsumer.useErrorTopic && buildKafkaConsumer.parameterizedErrorTopic)? config.parameters[ buildKafkaConsumer.parameterizedErrorTopic.configParameterName ] : '';
|
|
241
241
|
|
|
242
242
|
let topics = [];
|
|
243
|
-
if (buildKafkaConsumer.topics)
|
|
243
|
+
if (buildKafkaConsumer.topics){
|
|
244
244
|
for (let i = 0; i < buildKafkaConsumer.topics.length; i++) {
|
|
245
245
|
let topic = buildKafkaConsumer.topics[i];
|
|
246
246
|
topics.push(topic);
|
|
247
247
|
}
|
|
248
248
|
}
|
|
249
|
-
if (buildKafkaConsumer.parameterizedTopics)
|
|
249
|
+
if (buildKafkaConsumer.parameterizedTopics){
|
|
250
250
|
for (let i = 0; i < buildKafkaConsumer.parameterizedTopics.length; i++) {
|
|
251
251
|
let parameterizedTopic = buildKafkaConsumer.parameterizedTopics[i];
|
|
252
252
|
const topic = config.parameters[parameterizedTopic.configParameterName];
|
|
@@ -263,13 +263,13 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
263
263
|
continue
|
|
264
264
|
}
|
|
265
265
|
const groupId = buildKafkaConsumer.isParameterizedGroupId
|
|
266
|
-
? config.parameters[buildKafkaConsumer.parameterizedGroupId.configParameterName]
|
|
266
|
+
? config.parameters[ buildKafkaConsumer.parameterizedGroupId.configParameterName ]
|
|
267
267
|
: buildKafkaConsumer.groupId
|
|
268
|
-
|
|
268
|
+
|
|
269
269
|
if (groupId === undefined || groupId === null || groupId.trim() === '') {
|
|
270
270
|
console.error(`Kafka: ${kafkaConsumerName} consumer not had groupId defined`)
|
|
271
271
|
continue
|
|
272
|
-
}
|
|
272
|
+
}
|
|
273
273
|
if (topics.length === 0) {
|
|
274
274
|
console.error(`Kafka: ${kafkaConsumerName} consumer not had topics defined`)
|
|
275
275
|
continue
|
|
@@ -296,7 +296,7 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
296
296
|
message: { key: consumeData.message.key, value: consumeData.message.value, headers: consumeData.message.headers }
|
|
297
297
|
};
|
|
298
298
|
await runHelper.runAlgorithm(callContext, { elementType: 'KafkaConsumer', elementName: kafkaConsumerName, path: buildKafkaConsumer.path, aliasesMatches: buildKafkaConsumer.aliasesMatches }, null, buildKafkaConsumer.algorithm, ctx0);
|
|
299
|
-
|
|
299
|
+
if (kafkaErrorOptions.manualCommit) {
|
|
300
300
|
await consumer.commitOffsets([{ topic, partition, offset: (Number(message.offset) + 1).toString() }]);
|
|
301
301
|
}
|
|
302
302
|
}
|
|
@@ -310,7 +310,7 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
310
310
|
}
|
|
311
311
|
// If Kafka is an actual server (!= mock). Use this section
|
|
312
312
|
else {
|
|
313
|
-
try
|
|
313
|
+
try{
|
|
314
314
|
let consumerOptions = {}
|
|
315
315
|
let consumerOptionsDefault = {
|
|
316
316
|
sessionTimeout: 60000, // 60 seconds
|
|
@@ -327,7 +327,7 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
327
327
|
multiplier: 2 // Backoff multiplier
|
|
328
328
|
}
|
|
329
329
|
};
|
|
330
|
-
if (connection.consumerOptions)
|
|
330
|
+
if (connection.consumerOptions){
|
|
331
331
|
consumerOptions = { ...consumerOptionsDefault, ...connection.consumerOptions };
|
|
332
332
|
}
|
|
333
333
|
let consumer = kafka.consumer({ groupId: groupId });
|
|
@@ -340,8 +340,8 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
340
340
|
}
|
|
341
341
|
try {
|
|
342
342
|
await consumer.subscribe({ topic: topic, fromBeginning: buildKafkaConsumer.fromBeginning });
|
|
343
|
-
} catch
|
|
344
|
-
runHelper.logError(new Error(`Kafka: error whit topic ${topic}: ${err.message} `,
|
|
343
|
+
} catch(err) {
|
|
344
|
+
runHelper.logError(new Error(`Kafka: error whit topic ${topic}: ${err.message} `,err));
|
|
345
345
|
}
|
|
346
346
|
}
|
|
347
347
|
// Handle group join and leave events
|
|
@@ -399,18 +399,18 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
399
399
|
await consumer.commitOffsets([{ topic, partition, offset: (Number(message.offset) + 1).toString() }]);
|
|
400
400
|
}
|
|
401
401
|
} catch (err) {
|
|
402
|
-
if
|
|
402
|
+
if(kafkaErrorOptions.traceAll) {
|
|
403
403
|
console.error(err);
|
|
404
404
|
}
|
|
405
|
-
if
|
|
405
|
+
if(kafkaErrorOptions.retryTopic && kafkaErrorOptions.retryTopic !== '') {
|
|
406
406
|
// sends message with error to retry topic
|
|
407
|
-
await module.exports.
|
|
407
|
+
await module.exports.kafkaSend(connection, kafkaErrorOptions.retryTopic, [message]);
|
|
408
408
|
// marks the message as read on original topic
|
|
409
409
|
await consumer.commitOffsets([{ topic, partition, offset: (Number(message.offset) + 1).toString() }]);
|
|
410
|
-
|
|
410
|
+
}
|
|
411
411
|
}
|
|
412
412
|
await runHelper.terminateCallContext(callContext);
|
|
413
|
-
},
|
|
413
|
+
},
|
|
414
414
|
retry: {
|
|
415
415
|
retries: kafkaErrorOptions.retries,
|
|
416
416
|
initialRetryTime: kafkaErrorOptions.initialRetryTime,
|
|
@@ -429,96 +429,94 @@ async function startKafkaConsumers(kafkas, config, kafkaConsumers, connections,
|
|
|
429
429
|
} catch (err) {
|
|
430
430
|
runHelper.logError(err);
|
|
431
431
|
}
|
|
432
|
-
}
|
|
432
|
+
}
|
|
433
433
|
if (process.env.blz_traceAll === 'true' || process.env.blz_traceKafkaConsumers === 'true') {
|
|
434
434
|
console.log('KAFKA CONSUMER | ' + kafkaConsumerName + ' | Start');
|
|
435
435
|
}
|
|
436
|
-
|
|
436
|
+
|
|
437
437
|
}
|
|
438
438
|
}
|
|
439
439
|
// END: functions to initialize kafka consumers
|
|
440
440
|
|
|
441
441
|
module.exports = {
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
throw new Error(`Invalid topic ${i + 1} config`);
|
|
451
|
-
}
|
|
452
|
-
}
|
|
453
|
-
let result = false
|
|
454
|
-
let error = null;
|
|
455
|
-
let admin = null;
|
|
456
|
-
try {
|
|
457
|
-
let kafkaConfig = await getConfig(connection);
|
|
458
|
-
let kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
459
|
-
admin = kafka.admin();
|
|
460
|
-
await admin.connect();
|
|
461
|
-
const currentTopics = await admin.listTopics();
|
|
462
|
-
let newsTopics = config.topicsConfig.topics.filter(topic => !currentTopics.includes(topic.topic));
|
|
463
|
-
if (newsTopics && newsTopics.length > 0) {
|
|
464
|
-
config.topicsConfig.topics = newsTopics;
|
|
465
|
-
result = await admin.createTopics(config.topicsConfig);
|
|
466
|
-
}
|
|
467
|
-
} catch (e) {
|
|
468
|
-
error = e;
|
|
469
|
-
} finally {
|
|
470
|
-
if (admin) {
|
|
471
|
-
await admin.disconnect();
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
if (error) {
|
|
475
|
-
throw error;
|
|
442
|
+
kafkaConfig: async function (connection, config) {
|
|
443
|
+
if(!config || !config.topicsConfig || !config.topicsConfig.topics || !config.topicsConfig.topics.length || config.topicsConfig.topics.length === 0) {
|
|
444
|
+
throw new Error('Invalid config');
|
|
445
|
+
}
|
|
446
|
+
for (let i = 0; i < config.topicsConfig.topics.length; i++) {
|
|
447
|
+
let topic = config.topicsConfig.topics[i];
|
|
448
|
+
if (!topic.topic || topic.topic.trim().length === 0) {
|
|
449
|
+
throw new Error(`Invalid topic ${i+1} config`);
|
|
476
450
|
}
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
451
|
+
}
|
|
452
|
+
let result = false
|
|
453
|
+
let error = null;
|
|
454
|
+
let admin = null;
|
|
455
|
+
try {
|
|
456
|
+
let kafkaConfig = await getConfig(connection);
|
|
457
|
+
let kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
458
|
+
admin = kafka.admin();
|
|
483
459
|
await admin.connect();
|
|
484
|
-
const
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
460
|
+
const currentTopics = await admin.listTopics();
|
|
461
|
+
let newsTopics = config.topicsConfig.topics.filter(topic => !currentTopics.includes(topic.topic));
|
|
462
|
+
if(newsTopics && newsTopics.length > 0) {
|
|
463
|
+
config.topicsConfig.topics = newsTopics;
|
|
464
|
+
result = await admin.createTopics(config.topicsConfig);
|
|
465
|
+
}
|
|
466
|
+
}catch(e) {
|
|
467
|
+
error = e;
|
|
468
|
+
} finally {
|
|
469
|
+
if(admin) {
|
|
470
|
+
await admin.disconnect();
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
if(error) {
|
|
474
|
+
throw error;
|
|
475
|
+
}
|
|
476
|
+
return result;
|
|
477
|
+
},
|
|
478
|
+
kafkaGetTopics: async function (connection) {
|
|
479
|
+
let kafkaConfig = await getConfig(connection);
|
|
480
|
+
let kafka = new Kafkajs.Kafka(kafkaConfig);
|
|
481
|
+
const admin = kafka.admin();
|
|
482
|
+
await admin.connect();
|
|
483
|
+
const topics = await admin.listTopics();
|
|
484
|
+
await admin.disconnect();
|
|
485
|
+
return topics;
|
|
486
|
+
},
|
|
487
|
+
kafkaSend: async function (connection, topic, messages) {
|
|
488
|
+
if (connection.provider) {
|
|
489
|
+
await connection.provider.produce(connection, topic, messages);
|
|
495
490
|
return null;
|
|
496
|
-
},
|
|
497
|
-
kafkaConnections: async function (kafkaConsumers, connections, buildConfigItems, runHelper) {
|
|
498
|
-
return createKafkaInstances(kafkaConsumers, connections, buildConfigItems, runHelper);
|
|
499
|
-
},
|
|
500
|
-
startKafkaConsumers: async function (kafkas, config, kafkaConsumers, connections, runHelper) {
|
|
501
|
-
startKafkaConsumers(kafkas, config, kafkaConsumers, connections, runHelper);
|
|
502
|
-
},
|
|
503
|
-
kafkaConnectionHealthCheck: async function (connection) {
|
|
504
|
-
if (!connection.brokers) {
|
|
505
|
-
return { status: "error", message: "Kafka broker list not defined" }
|
|
506
|
-
}
|
|
507
|
-
const kafka = new Kafkajs.Kafka({ brokers: [connection.brokers] });
|
|
508
|
-
try {
|
|
509
|
-
// Try to get metadata from Kafka to check the connection
|
|
510
|
-
const admin = kafka.admin();
|
|
511
|
-
await admin.connect();
|
|
512
|
-
// Fetch metadata (this will trigger a request to Kafka to see if it's reachable)
|
|
513
|
-
const metadata = await admin.fetchTopicMetadata({ topics: [] });
|
|
514
|
-
logger.info({ message: "Kafka is reachable!", metadata: metadata });
|
|
515
|
-
await admin.disconnect(); // Disconnect after metadata check
|
|
516
|
-
return { status: "ok", message: "Kafka connected, metadata vissible" };
|
|
517
|
-
} catch (error) {
|
|
518
|
-
logger.info({ message: "Kafka is not reachable", error: error });
|
|
519
|
-
return { status: "error", message: error.message };
|
|
520
|
-
}
|
|
521
|
-
|
|
522
491
|
}
|
|
492
|
+
let producer = await getOrCreateProducer(connection);
|
|
493
|
+
await producer.send({ topic: topic, messages: messages });
|
|
494
|
+
return null;
|
|
495
|
+
},
|
|
496
|
+
kafkaConnections: async function(kafkaConsumers, connections, buildConfigItems, runHelper){
|
|
497
|
+
return createKafkaInstances(kafkaConsumers, connections, buildConfigItems, runHelper);
|
|
498
|
+
},
|
|
499
|
+
startKafkaConsumers: async function(kafkas,config, kafkaConsumers, connections, runHelper){
|
|
500
|
+
startKafkaConsumers(kafkas, config, kafkaConsumers, connections, runHelper);
|
|
501
|
+
},
|
|
502
|
+
kafkaConnectionHealthCheck: async function (connection) {
|
|
503
|
+
if (!connection.brokers) {
|
|
504
|
+
return { status: "error", message: "Kafka broker list not defined" }
|
|
505
|
+
}
|
|
506
|
+
const kafka = new Kafkajs.Kafka({ brokers: [connection.brokers] });
|
|
507
|
+
try {
|
|
508
|
+
// Try to get metadata from Kafka to check the connection
|
|
509
|
+
const admin = kafka.admin();
|
|
510
|
+
await admin.connect();
|
|
511
|
+
// Fetch metadata (this will trigger a request to Kafka to see if it's reachable)
|
|
512
|
+
const metadata = await admin.fetchTopicMetadata({ topics: [] });
|
|
513
|
+
logger.info({message: "Kafka is reachable!", metadata:metadata});
|
|
514
|
+
await admin.disconnect(); // Disconnect after metadata check
|
|
515
|
+
return { status: "ok", message: "Kafka connected, metadata vissible" };
|
|
516
|
+
} catch (error) {
|
|
517
|
+
logger.info({ message: "Kafka is not reachable", error: error });
|
|
518
|
+
return { status: "error", message: error.message };
|
|
519
|
+
}
|
|
520
|
+
|
|
523
521
|
}
|
|
524
522
|
};
|