azify-logger 1.0.26 → 1.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,439 @@
1
+ #!/usr/bin/env node
2
+
3
+ const os = require('os')
4
+ const http = require('http')
5
+ const https = require('https')
6
+ const Redis = require('ioredis')
7
+ const axios = require('axios')
8
+ const { DEFAULT_STREAM_KEY, DEFAULT_MAXLEN } = require('../queue/redisQueue')
9
+
10
+ const STREAM_KEY = process.env.AZIFY_LOGGER_REDIS_STREAM || process.env.AZIFY_LOGGER_REDIS_QUEUE_KEY || DEFAULT_STREAM_KEY
11
+ const DEAD_LETTER_STREAM_KEY = process.env.AZIFY_LOGGER_REDIS_DLQ || `${STREAM_KEY}:dlq`
12
+ const REDIS_URL = process.env.AZIFY_LOGGER_REDIS_URL || 'redis://localhost:6381'
13
+ const WORKER_GROUP = process.env.AZIFY_LOGGER_REDIS_GROUP || 'azify-logger-workers'
14
+ const CONSUMER_NAME = process.env.AZIFY_LOGGER_REDIS_CONSUMER || `${os.hostname()}-${process.pid}`
15
+ const MAX_BATCH = Number(process.env.AZIFY_LOGGER_REDIS_BATCH || 500)
16
+ const BLOCK_MS = Number(process.env.AZIFY_LOGGER_REDIS_BLOCK || 5000)
17
+ const MAX_DELIVERY_ATTEMPTS = Number(process.env.AZIFY_LOGGER_MAX_DELIVERY_ATTEMPTS || 10)
18
+ const PENDING_IDLE_TIMEOUT = Number(process.env.AZIFY_LOGGER_PENDING_IDLE_TIMEOUT || 60000)
19
+
20
+ const TRANSPORT_TIMEOUT = Number(process.env.AZIFY_LOGGER_HTTP_TIMEOUT || 250)
21
+ const MAX_SOCKETS = Number(process.env.AZIFY_LOGGER_MAX_SOCKETS || 50)
22
+ const WORKER_CONCURRENCY = Math.max(1, Number(process.env.AZIFY_LOGGER_WORKER_CONCURRENCY || 100))
23
+ const NO_GROUP_RETRY_DELAY = Number(process.env.AZIFY_LOGGER_WORKER_NOGROUP_DELAY || 250)
24
+
25
+ const httpAgent = new http.Agent({ keepAlive: true, maxSockets: MAX_SOCKETS })
26
+ const httpsAgent = new https.Agent({ keepAlive: true, maxSockets: MAX_SOCKETS })
27
+
28
+ const redisOptions = {
29
+ enableAutoPipelining: true,
30
+ maxRetriesPerRequest: null,
31
+ retryStrategy(times) {
32
+ const delay = Math.min(1000 * Math.pow(1.5, times), 10000)
33
+ return delay
34
+ },
35
+ reconnectOnError(err) {
36
+ const msg = err && err.message ? err.message : ''
37
+ if (msg.includes('READONLY') || msg.includes('ECONNRESET') || msg.includes('ECONNREFUSED')) {
38
+ return true
39
+ }
40
+ return false
41
+ }
42
+ }
43
+
44
+ let stopRequested = false
45
+ let deliveries = 0
46
+ let lastRedisErrorLog = 0
47
+ let consecutiveNoGroupErrors = 0
48
+ let redisErrorCount = 0
49
+ const REDIS_ERROR_LOG_INTERVAL = 300000
50
+ if (typeof global.__azifyLoggerRedisErrorLogged === 'undefined') {
51
+ global.__azifyLoggerRedisErrorLogged = false
52
+ global.__azifyLoggerRedisErrorLastLog = 0
53
+ }
54
+
55
+ const redis = new Redis(REDIS_URL, redisOptions)
56
+ redis.on('error', (err) => {
57
+ const now = Date.now()
58
+ if (!global.__azifyLoggerRedisErrorLogged && now - global.__azifyLoggerRedisErrorLastLog > REDIS_ERROR_LOG_INTERVAL) {
59
+ if (err && (err.code === 'ECONNREFUSED' || err.message?.includes('ECONNREFUSED') || err.message?.includes('Redis'))) {
60
+ global.__azifyLoggerRedisErrorLogged = true
61
+ global.__azifyLoggerRedisErrorLastLog = now
62
+ redisErrorCount++
63
+ process.stderr.write('[azify-logger] ⚠️ Redis indisponível. O sistema de logging está desabilitado. A aplicação continua funcionando normalmente.\n')
64
+ lastRedisErrorLog = now
65
+ }
66
+ }
67
+ })
68
+ redis.on('connect', () => {
69
+ if (redisErrorCount > 0 || global.__azifyLoggerRedisErrorLogged) {
70
+ redisErrorCount = 0
71
+ lastRedisErrorLog = 0
72
+ consecutiveNoGroupErrors = 0
73
+ global.__azifyLoggerRedisErrorLogged = false
74
+ global.__azifyLoggerRedisErrorLastLog = 0
75
+ }
76
+ })
77
+
78
+ process.on('uncaughtException', (err) => {
79
+ console.error('[azify-logger][worker] uncaughtException:', err)
80
+ process.exit(1)
81
+ })
82
+
83
+ process.on('unhandledRejection', (reason) => {
84
+ console.error('[azify-logger][worker] unhandledRejection:', reason)
85
+ process.exit(1)
86
+ })
87
+
88
+ async function ensureGroup() {
89
+ try {
90
+ await redis.xgroup('CREATE', STREAM_KEY, WORKER_GROUP, '0', 'MKSTREAM')
91
+ } catch (err) {
92
+ if (!String(err && err.message).includes('BUSYGROUP')) {
93
+ throw err
94
+ }
95
+ }
96
+ }
97
+
98
+ function isNoGroupError(err) {
99
+ return typeof (err && err.message) === 'string' && err.message.includes('NOGROUP')
100
+ }
101
+
102
+ function sleep(ms) {
103
+ return new Promise(resolve => setTimeout(resolve, ms))
104
+ }
105
+
106
+ const SENSITIVE_HEADER_KEYS = new Set([
107
+ 'authorization',
108
+ 'cookie',
109
+ 'set-cookie',
110
+ 'x-api-key',
111
+ 'x-auth-token',
112
+ 'x-access-token',
113
+ 'proxy-authorization',
114
+ 'x-signature',
115
+ 'x-timestamp'
116
+ ])
117
+
118
+ const SENSITIVE_BODY_FIELDS = new Set([
119
+ 'password',
120
+ 'token',
121
+ 'secret',
122
+ 'apiKey',
123
+ 'api_key',
124
+ 'accessToken',
125
+ 'access_token',
126
+ 'refreshToken',
127
+ 'refresh_token',
128
+ 'clientSecret',
129
+ 'client_secret',
130
+ 'creditCard',
131
+ 'credit_card',
132
+ 'cvv',
133
+ 'cvc'
134
+ ])
135
+
136
+ function sanitizeHeaders(headers) {
137
+ if (!headers || typeof headers !== 'object') {
138
+ return {}
139
+ }
140
+ const sanitized = {}
141
+ for (const key in headers) {
142
+ if (!headers.hasOwnProperty(key)) continue
143
+ const lower = String(key).toLowerCase()
144
+ if (SENSITIVE_HEADER_KEYS.has(lower)) {
145
+ sanitized[key] = '***'
146
+ } else {
147
+ const value = headers[key]
148
+ sanitized[key] = Array.isArray(value) ? value.map(String) : (value != null ? String(value) : value)
149
+ }
150
+ }
151
+ return sanitized
152
+ }
153
+
154
+ function sanitizeBody(body) {
155
+ if (!body || typeof body !== 'object') {
156
+ return body
157
+ }
158
+
159
+ try {
160
+ const sanitized = Array.isArray(body) ? [] : {}
161
+
162
+ for (const key in body) {
163
+ if (!body.hasOwnProperty(key)) continue
164
+ const lower = String(key).toLowerCase()
165
+
166
+ if (SENSITIVE_BODY_FIELDS.has(lower) || lower.includes('password') || lower.includes('secret')) {
167
+ sanitized[key] = '***'
168
+ } else if (typeof body[key] === 'object' && body[key] !== null) {
169
+ sanitized[key] = sanitizeBody(body[key])
170
+ } else {
171
+ sanitized[key] = body[key]
172
+ }
173
+ }
174
+
175
+ return sanitized
176
+ } catch (err) {
177
+ return body
178
+ }
179
+ }
180
+
181
+ function sanitizePayload(payload) {
182
+ if (!payload || typeof payload !== 'object') {
183
+ return payload
184
+ }
185
+
186
+ const sanitized = { ...payload }
187
+
188
+ if (sanitized.meta && typeof sanitized.meta === 'object') {
189
+ if (sanitized.meta.request && sanitized.meta.request.headers) {
190
+ sanitized.meta.request.headers = sanitizeHeaders(sanitized.meta.request.headers)
191
+ }
192
+
193
+ if (sanitized.meta.response && sanitized.meta.response.headers) {
194
+ sanitized.meta.response.headers = sanitizeHeaders(sanitized.meta.response.headers)
195
+ }
196
+
197
+ if (sanitized.meta.request && sanitized.meta.request.body) {
198
+ sanitized.meta.request.body = sanitizeBody(sanitized.meta.request.body)
199
+ }
200
+
201
+ if (sanitized.meta.response && sanitized.meta.response.body) {
202
+ sanitized.meta.response.body = sanitizeBody(sanitized.meta.response.body)
203
+ }
204
+ if (sanitized.meta.headers) {
205
+ sanitized.meta.headers = sanitizeHeaders(sanitized.meta.headers)
206
+ }
207
+ if (sanitized.meta.responseHeaders) {
208
+ sanitized.meta.responseHeaders = sanitizeHeaders(sanitized.meta.responseHeaders)
209
+ }
210
+ if (sanitized.meta.responseBody) {
211
+ sanitized.meta.responseBody = sanitizeBody(sanitized.meta.responseBody)
212
+ }
213
+ }
214
+
215
+ return sanitized
216
+ }
217
+
218
+ async function deliver(entry) {
219
+ if (!entry || typeof entry !== 'object') {
220
+ return
221
+ }
222
+ const target = entry.loggerUrl
223
+ if (!target) {
224
+ return
225
+ }
226
+
227
+ let sanitizedPayload = entry.payload
228
+ if (entry.payload && typeof entry.payload === 'object') {
229
+ try {
230
+ sanitizedPayload = sanitizePayload(entry.payload)
231
+ } catch (err) {
232
+ sanitizedPayload = entry.payload
233
+ }
234
+ }
235
+
236
+ await axios.post(target, sanitizedPayload, {
237
+ headers: entry.headers || {},
238
+ timeout: TRANSPORT_TIMEOUT,
239
+ httpAgent,
240
+ httpsAgent,
241
+ validateStatus: () => true,
242
+ maxRedirects: 0
243
+ })
244
+
245
+ deliveries += 1
246
+ }
247
+
248
+ async function requeue(entry, attempts) {
249
+ const next = {
250
+ ...entry,
251
+ attempts: attempts + 1,
252
+ lastAttemptAt: Date.now()
253
+ }
254
+ await redis.xadd(STREAM_KEY, 'MAXLEN', '~', DEFAULT_MAXLEN, '*', 'entry', JSON.stringify(next))
255
+ }
256
+
257
+ async function deadLetter(entry, reason) {
258
+ const payload = {
259
+ ...entry,
260
+ deadLetterReason: reason,
261
+ deadLetterAt: Date.now()
262
+ }
263
+ await redis.xadd(DEAD_LETTER_STREAM_KEY, '*', 'entry', JSON.stringify(payload))
264
+ }
265
+
266
+ async function acknowledge(id) {
267
+ await redis.xack(STREAM_KEY, WORKER_GROUP, id)
268
+ }
269
+
270
+ async function claimPending() {
271
+ try {
272
+ const pending = await redis.xpending(STREAM_KEY, WORKER_GROUP, '-', '+', MAX_BATCH)
273
+ const stale = pending.filter((item) => item[1] >= PENDING_IDLE_TIMEOUT)
274
+ for (const item of stale) {
275
+ const id = item[0]
276
+ const claim = await redis.xclaim(STREAM_KEY, WORKER_GROUP, CONSUMER_NAME, 0, id, 'JUSTID')
277
+ if (claim && claim.length) {
278
+ const entries = await redis.xrange(STREAM_KEY, id, id)
279
+ if (entries && entries.length) {
280
+ await processBatch(entries)
281
+ }
282
+ }
283
+ }
284
+ } catch (err) {
285
+ if (isNoGroupError(err)) {
286
+ consecutiveNoGroupErrors += 1
287
+ if (consecutiveNoGroupErrors === 1) {
288
+ console.warn('[azify-logger][worker] grupo não encontrado ao recuperar pendentes, recriando…')
289
+ }
290
+ await ensureGroup()
291
+ if (NO_GROUP_RETRY_DELAY > 0) {
292
+ await sleep(NO_GROUP_RETRY_DELAY)
293
+ }
294
+ return
295
+ }
296
+ console.error('[azify-logger][worker] erro ao reivindicar pendentes:', err && err.message ? err.message : err)
297
+ }
298
+ }
299
+
300
+ async function processEntry(raw) {
301
+ const id = raw[0]
302
+ const fields = raw[1]
303
+ const serialized = fields[1]
304
+ let entry
305
+ try {
306
+ entry = JSON.parse(serialized)
307
+ } catch (_) {
308
+ await acknowledge(id)
309
+ return
310
+ }
311
+
312
+ const attempts = Number(entry.attempts || 0)
313
+ try {
314
+ await deliver(entry)
315
+ await acknowledge(id)
316
+ consecutiveNoGroupErrors = 0
317
+ } catch (error) {
318
+ if (attempts + 1 >= MAX_DELIVERY_ATTEMPTS) {
319
+ await acknowledge(id)
320
+ await deadLetter(entry, error && error.message ? error.message : 'delivery-error')
321
+ } else {
322
+ await acknowledge(id)
323
+ await requeue(entry, attempts)
324
+ }
325
+ }
326
+ }
327
+
328
+ async function processBatch(entries) {
329
+ if (!entries || !entries.length) {
330
+ return
331
+ }
332
+
333
+ const chunkSize = WORKER_CONCURRENCY
334
+ const chunks = []
335
+
336
+ for (let i = 0; i < entries.length; i += chunkSize) {
337
+ chunks.push(entries.slice(i, i + chunkSize))
338
+ }
339
+
340
+ const chunkPromises = chunks.map(chunk => {
341
+ return Promise.allSettled(chunk.map(entry => processEntry(entry)))
342
+ })
343
+
344
+ await Promise.all(chunkPromises)
345
+ }
346
+
347
+ async function consumeLoop() {
348
+ let groupEnsured = false
349
+
350
+ while (!stopRequested) {
351
+ if (!groupEnsured) {
352
+ try {
353
+ await ensureGroup()
354
+ groupEnsured = true
355
+ consecutiveNoGroupErrors = 0
356
+ } catch (err) {
357
+ const errMsg = err && err.message ? err.message : String(err)
358
+ if (!errMsg.includes('BUSYGROUP')) {
359
+ const now = Date.now()
360
+ if (now - lastRedisErrorLog > 5000) {
361
+ console.error('[azify-logger][worker] erro ao garantir grupo:', errMsg)
362
+ lastRedisErrorLog = now
363
+ }
364
+ await sleep(1000)
365
+ } else {
366
+ groupEnsured = true
367
+ }
368
+ continue
369
+ }
370
+ }
371
+
372
+ let messages = null
373
+ try {
374
+ messages = await redis.xreadgroup(
375
+ 'GROUP', WORKER_GROUP, CONSUMER_NAME,
376
+ 'COUNT', MAX_BATCH,
377
+ 'BLOCK', BLOCK_MS,
378
+ 'STREAMS', STREAM_KEY, '>'
379
+ )
380
+
381
+ if (messages && Array.isArray(messages) && messages.length > 0) {
382
+ for (const streamData of messages) {
383
+ if (Array.isArray(streamData) && streamData.length >= 2) {
384
+ const entries = streamData[1]
385
+ if (Array.isArray(entries) && entries.length > 0) {
386
+ await processBatch(entries)
387
+ }
388
+ }
389
+ }
390
+ consecutiveNoGroupErrors = 0
391
+ continue
392
+ }
393
+
394
+ await claimPending()
395
+ } catch (err) {
396
+ const errMsg = err && err.message ? err.message : String(err)
397
+
398
+ if (isNoGroupError(err) || errMsg.includes('syntax error') || errMsg.includes('NOGROUP')) {
399
+ groupEnsured = false
400
+ consecutiveNoGroupErrors += 1
401
+ if (consecutiveNoGroupErrors === 1) {
402
+ console.warn('[azify-logger][worker] grupo não encontrado ou erro de sintaxe, recriando…')
403
+ }
404
+ await sleep(NO_GROUP_RETRY_DELAY)
405
+ continue
406
+ }
407
+
408
+ const now = Date.now()
409
+ if (now - lastRedisErrorLog > 5000) {
410
+ console.error('[azify-logger][worker] erro ao ler stream:', errMsg)
411
+ lastRedisErrorLog = now
412
+ }
413
+ await sleep(250)
414
+ }
415
+ }
416
+ }
417
+
418
+ async function shutdown() {
419
+ stopRequested = true
420
+ console.log('[azify-logger][worker] encerrando...')
421
+ await redis.quit().catch(() => {})
422
+ process.exit(0)
423
+ }
424
+
425
+ process.on('SIGINT', shutdown)
426
+ process.on('SIGTERM', shutdown)
427
+
428
+ ensureGroup()
429
+ .then(() => {
430
+ console.log('[azify-logger][worker] consumindo stream', STREAM_KEY, 'como', CONSUMER_NAME)
431
+ if (process.send) {
432
+ process.send({ type: 'azify-logger:ready', pid: process.pid, stream: STREAM_KEY })
433
+ }
434
+ return consumeLoop()
435
+ })
436
+ .catch((err) => {
437
+ console.error('[azify-logger][worker] não foi possível iniciar:', err)
438
+ process.exit(1)
439
+ })