azify-logger 1.0.26 → 1.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,467 @@
1
+ #!/usr/bin/env node
2
+
3
+ const os = require('os')
4
+ const http = require('http')
5
+ const https = require('https')
6
+ const Redis = require('ioredis')
7
+ const axios = require('axios')
8
+ const { DEFAULT_STREAM_KEY, DEFAULT_MAXLEN } = require('../queue/redisQueue')
9
+
10
+ const STREAM_KEY = process.env.AZIFY_LOGGER_REDIS_STREAM || process.env.AZIFY_LOGGER_REDIS_QUEUE_KEY || DEFAULT_STREAM_KEY
11
+ const DEAD_LETTER_STREAM_KEY = process.env.AZIFY_LOGGER_REDIS_DLQ || `${STREAM_KEY}:dlq`
12
+ const REDIS_URL = process.env.AZIFY_LOGGER_REDIS_URL || 'redis://localhost:6381'
13
+ const WORKER_GROUP = process.env.AZIFY_LOGGER_REDIS_GROUP || 'azify-logger-workers'
14
+ const CONSUMER_NAME = process.env.AZIFY_LOGGER_REDIS_CONSUMER || `${os.hostname()}-${process.pid}`
15
+ const MAX_BATCH = Number(process.env.AZIFY_LOGGER_REDIS_BATCH || 100)
16
+ const BLOCK_MS = Number(process.env.AZIFY_LOGGER_REDIS_BLOCK || 5000)
17
+ const MAX_DELIVERY_ATTEMPTS = Number(process.env.AZIFY_LOGGER_MAX_DELIVERY_ATTEMPTS || 10)
18
+ const PENDING_IDLE_TIMEOUT = Number(process.env.AZIFY_LOGGER_PENDING_IDLE_TIMEOUT || 60000)
19
+
20
+ const TRANSPORT_TIMEOUT = Number(process.env.AZIFY_LOGGER_HTTP_TIMEOUT || 250)
21
+ const MAX_SOCKETS = Number(process.env.AZIFY_LOGGER_MAX_SOCKETS || 20)
22
+ const WORKER_CONCURRENCY = Math.max(1, Number(process.env.AZIFY_LOGGER_WORKER_CONCURRENCY || 25))
23
+ const NO_GROUP_RETRY_DELAY = Number(process.env.AZIFY_LOGGER_WORKER_NOGROUP_DELAY || 250)
24
+
25
+ const httpAgent = new http.Agent({ keepAlive: true, maxSockets: MAX_SOCKETS })
26
+ const httpsAgent = new https.Agent({ keepAlive: true, maxSockets: MAX_SOCKETS })
27
+
28
+ const redisOptions = {
29
+ enableAutoPipelining: true,
30
+ maxRetriesPerRequest: null,
31
+ retryStrategy(times) {
32
+ const delay = Math.min(1000 * Math.pow(1.5, times), 10000)
33
+ return delay
34
+ },
35
+ reconnectOnError(err) {
36
+ const msg = err && err.message ? err.message : ''
37
+ if (msg.includes('READONLY') || msg.includes('ECONNRESET') || msg.includes('ECONNREFUSED')) {
38
+ return true
39
+ }
40
+ return false
41
+ }
42
+ }
43
+
44
+ let stopRequested = false
45
+ let deliveries = 0
46
+ let lastRedisErrorLog = 0
47
+ let consecutiveNoGroupErrors = 0
48
+ let redisErrorCount = 0
49
+ const REDIS_ERROR_LOG_INTERVAL = 300000 // 5 minutos entre logs (evitar logs repetidos)
50
+
51
+ // Usar flag global compartilhada com redisQueue.js para garantir apenas 1 log por processo
52
+ if (typeof global.__azifyLoggerRedisErrorLogged === 'undefined') {
53
+ global.__azifyLoggerRedisErrorLogged = false
54
+ global.__azifyLoggerRedisErrorLastLog = 0
55
+ }
56
+
57
+ const redis = new Redis(REDIS_URL, redisOptions)
58
+ redis.on('error', (err) => {
59
+ // Log apenas uma vez por processo inteiro (compartilhado com producer)
60
+ // Se já foi logado pelo producer, não logar novamente
61
+ const now = Date.now()
62
+ if (!global.__azifyLoggerRedisErrorLogged && now - global.__azifyLoggerRedisErrorLastLog > REDIS_ERROR_LOG_INTERVAL) {
63
+ if (err && (err.code === 'ECONNREFUSED' || err.message?.includes('ECONNREFUSED') || err.message?.includes('Redis'))) {
64
+ global.__azifyLoggerRedisErrorLogged = true
65
+ global.__azifyLoggerRedisErrorLastLog = now
66
+ redisErrorCount++
67
+ // Mensagem clara: aplicação continua funcionando, apenas logging está desabilitado
68
+ process.stderr.write('[azify-logger] ⚠️ Redis indisponível. O sistema de logging está desabilitado. A aplicação continua funcionando normalmente.\n')
69
+ lastRedisErrorLog = now
70
+ }
71
+ }
72
+ // Após primeira mensagem, não logar mais - silenciar completamente
73
+ })
74
+ redis.on('connect', () => {
75
+ // Resetar contador quando conectar com sucesso (sem logar para não poluir)
76
+ if (redisErrorCount > 0 || global.__azifyLoggerRedisErrorLogged) {
77
+ redisErrorCount = 0
78
+ lastRedisErrorLog = 0
79
+ consecutiveNoGroupErrors = 0
80
+ global.__azifyLoggerRedisErrorLogged = false
81
+ global.__azifyLoggerRedisErrorLastLog = 0
82
+ }
83
+ })
84
+
85
+ process.on('uncaughtException', (err) => {
86
+ console.error('[azify-logger][worker] uncaughtException:', err)
87
+ process.exit(1)
88
+ })
89
+
90
+ process.on('unhandledRejection', (reason) => {
91
+ console.error('[azify-logger][worker] unhandledRejection:', reason)
92
+ process.exit(1)
93
+ })
94
+
95
+ async function ensureGroup() {
96
+ try {
97
+ await redis.xgroup('CREATE', STREAM_KEY, WORKER_GROUP, '0', 'MKSTREAM')
98
+ } catch (err) {
99
+ if (!String(err && err.message).includes('BUSYGROUP')) {
100
+ throw err
101
+ }
102
+ }
103
+ }
104
+
105
+ function isNoGroupError(err) {
106
+ return typeof (err && err.message) === 'string' && err.message.includes('NOGROUP')
107
+ }
108
+
109
+ function sleep(ms) {
110
+ return new Promise(resolve => setTimeout(resolve, ms))
111
+ }
112
+
113
+ // Headers sensíveis que devem ser mascarados
114
+ const SENSITIVE_HEADER_KEYS = new Set([
115
+ 'authorization',
116
+ 'cookie',
117
+ 'set-cookie',
118
+ 'x-api-key',
119
+ 'x-auth-token',
120
+ 'x-access-token',
121
+ 'proxy-authorization',
122
+ 'x-signature',
123
+ 'x-timestamp'
124
+ ])
125
+
126
+ // Campos sensíveis no body que devem ser mascarados
127
+ const SENSITIVE_BODY_FIELDS = new Set([
128
+ 'password',
129
+ 'token',
130
+ 'secret',
131
+ 'apiKey',
132
+ 'api_key',
133
+ 'accessToken',
134
+ 'access_token',
135
+ 'refreshToken',
136
+ 'refresh_token',
137
+ 'clientSecret',
138
+ 'client_secret',
139
+ 'creditCard',
140
+ 'credit_card',
141
+ 'cvv',
142
+ 'cvc'
143
+ ])
144
+
145
+ // Função para sanitizar headers
146
+ function sanitizeHeaders(headers) {
147
+ if (!headers || typeof headers !== 'object') {
148
+ return {}
149
+ }
150
+ const sanitized = {}
151
+ for (const key in headers) {
152
+ if (!headers.hasOwnProperty(key)) continue
153
+ const lower = String(key).toLowerCase()
154
+ if (SENSITIVE_HEADER_KEYS.has(lower)) {
155
+ sanitized[key] = '***'
156
+ } else {
157
+ const value = headers[key]
158
+ sanitized[key] = Array.isArray(value) ? value.map(String) : (value != null ? String(value) : value)
159
+ }
160
+ }
161
+ return sanitized
162
+ }
163
+
164
+ // Função para sanitizar body (sem truncamento - manter tamanho completo)
165
+ function sanitizeBody(body) {
166
+ if (!body || typeof body !== 'object') {
167
+ // Se não for objeto, retornar como está (não truncar strings)
168
+ return body
169
+ }
170
+
171
+ try {
172
+ // Sanitizar campos sensíveis recursivamente (sem limitação de tamanho)
173
+ const sanitized = Array.isArray(body) ? [] : {}
174
+
175
+ for (const key in body) {
176
+ if (!body.hasOwnProperty(key)) continue
177
+ const lower = String(key).toLowerCase()
178
+
179
+ if (SENSITIVE_BODY_FIELDS.has(lower) || lower.includes('password') || lower.includes('secret')) {
180
+ // Mascarar campos sensíveis
181
+ sanitized[key] = '***'
182
+ } else if (typeof body[key] === 'object' && body[key] !== null) {
183
+ // Recursivamente sanitizar objetos aninhados (sem limitação de profundidade)
184
+ sanitized[key] = sanitizeBody(body[key])
185
+ } else {
186
+ // Copiar valores não sensíveis (mantendo tamanho completo)
187
+ sanitized[key] = body[key]
188
+ }
189
+ }
190
+
191
+ return sanitized
192
+ } catch (err) {
193
+ // Se houver erro na sanitização, retornar body original (não truncar)
194
+ return body
195
+ }
196
+ }
197
+
198
+ // Função para sanitizar payload completo
199
+ function sanitizePayload(payload) {
200
+ if (!payload || typeof payload !== 'object') {
201
+ return payload
202
+ }
203
+
204
+ const sanitized = { ...payload }
205
+
206
+ // Sanitizar meta se existir
207
+ if (sanitized.meta && typeof sanitized.meta === 'object') {
208
+ // Sanitizar headers da request
209
+ if (sanitized.meta.request && sanitized.meta.request.headers) {
210
+ sanitized.meta.request.headers = sanitizeHeaders(sanitized.meta.request.headers)
211
+ }
212
+
213
+ // Sanitizar headers da response
214
+ if (sanitized.meta.response && sanitized.meta.response.headers) {
215
+ sanitized.meta.response.headers = sanitizeHeaders(sanitized.meta.response.headers)
216
+ }
217
+
218
+ // Sanitizar body da request
219
+ if (sanitized.meta.request && sanitized.meta.request.body) {
220
+ sanitized.meta.request.body = sanitizeBody(sanitized.meta.request.body)
221
+ }
222
+
223
+ // Sanitizar body da response
224
+ if (sanitized.meta.response && sanitized.meta.response.body) {
225
+ sanitized.meta.response.body = sanitizeBody(sanitized.meta.response.body)
226
+ }
227
+
228
+ // Sanitizar headers de HTTP client (interceptors)
229
+ if (sanitized.meta.headers) {
230
+ sanitized.meta.headers = sanitizeHeaders(sanitized.meta.headers)
231
+ }
232
+ if (sanitized.meta.responseHeaders) {
233
+ sanitized.meta.responseHeaders = sanitizeHeaders(sanitized.meta.responseHeaders)
234
+ }
235
+ if (sanitized.meta.responseBody) {
236
+ sanitized.meta.responseBody = sanitizeBody(sanitized.meta.responseBody)
237
+ }
238
+ }
239
+
240
+ return sanitized
241
+ }
242
+
243
+ async function deliver(entry) {
244
+ if (!entry || typeof entry !== 'object') {
245
+ return
246
+ }
247
+ const target = entry.loggerUrl
248
+ if (!target) {
249
+ return
250
+ }
251
+
252
+ // Sanitizar payload antes de enviar
253
+ const sanitizedPayload = entry.payload ? sanitizePayload(entry.payload) : entry.payload
254
+
255
+ await axios.post(target, sanitizedPayload, {
256
+ headers: entry.headers || {},
257
+ timeout: TRANSPORT_TIMEOUT,
258
+ httpAgent,
259
+ httpsAgent,
260
+ validateStatus: () => true
261
+ })
262
+
263
+ deliveries += 1
264
+ // Log removido para reduzir ruído nos logs
265
+ }
266
+
267
+ async function requeue(entry, attempts) {
268
+ const next = {
269
+ ...entry,
270
+ attempts: attempts + 1,
271
+ lastAttemptAt: Date.now()
272
+ }
273
+ await redis.xadd(STREAM_KEY, 'MAXLEN', '~', DEFAULT_MAXLEN, '*', 'entry', JSON.stringify(next))
274
+ }
275
+
276
+ async function deadLetter(entry, reason) {
277
+ const payload = {
278
+ ...entry,
279
+ deadLetterReason: reason,
280
+ deadLetterAt: Date.now()
281
+ }
282
+ await redis.xadd(DEAD_LETTER_STREAM_KEY, '*', 'entry', JSON.stringify(payload))
283
+ }
284
+
285
+ async function acknowledge(id) {
286
+ await redis.xack(STREAM_KEY, WORKER_GROUP, id)
287
+ }
288
+
289
+ async function claimPending() {
290
+ try {
291
+ const pending = await redis.xpending(STREAM_KEY, WORKER_GROUP, '-', '+', MAX_BATCH)
292
+ const stale = pending.filter((item) => item[1] >= PENDING_IDLE_TIMEOUT)
293
+ for (const item of stale) {
294
+ const id = item[0]
295
+ const claim = await redis.xclaim(STREAM_KEY, WORKER_GROUP, CONSUMER_NAME, 0, id, 'JUSTID')
296
+ if (claim && claim.length) {
297
+ const entries = await redis.xrange(STREAM_KEY, id, id)
298
+ if (entries && entries.length) {
299
+ await processBatch(entries)
300
+ }
301
+ }
302
+ }
303
+ } catch (err) {
304
+ if (isNoGroupError(err)) {
305
+ consecutiveNoGroupErrors += 1
306
+ if (consecutiveNoGroupErrors === 1) {
307
+ console.warn('[azify-logger][worker] grupo não encontrado ao recuperar pendentes, recriando…')
308
+ }
309
+ await ensureGroup()
310
+ if (NO_GROUP_RETRY_DELAY > 0) {
311
+ await sleep(NO_GROUP_RETRY_DELAY)
312
+ }
313
+ return
314
+ }
315
+ console.error('[azify-logger][worker] erro ao reivindicar pendentes:', err && err.message ? err.message : err)
316
+ }
317
+ }
318
+
319
+ async function processEntry(raw) {
320
+ const id = raw[0]
321
+ const fields = raw[1]
322
+ const serialized = fields[1]
323
+ let entry
324
+ try {
325
+ entry = JSON.parse(serialized)
326
+ } catch (_) {
327
+ await acknowledge(id)
328
+ return
329
+ }
330
+
331
+ const attempts = Number(entry.attempts || 0)
332
+ try {
333
+ await deliver(entry)
334
+ await acknowledge(id)
335
+ consecutiveNoGroupErrors = 0
336
+ } catch (error) {
337
+ // Silenciar logs de erro - não poluir logs da aplicação
338
+ // Apenas enviar para DLQ após max tentativas ou requeue silenciosamente
339
+ if (attempts + 1 >= MAX_DELIVERY_ATTEMPTS) {
340
+ await acknowledge(id)
341
+ await deadLetter(entry, error && error.message ? error.message : 'delivery-error')
342
+ } else {
343
+ await acknowledge(id)
344
+ await requeue(entry, attempts)
345
+ }
346
+ }
347
+ }
348
+
349
+ async function processBatch(entries) {
350
+ if (!entries || !entries.length) {
351
+ return
352
+ }
353
+
354
+ const executing = []
355
+ for (const entry of entries) {
356
+ executing.push(processEntry(entry))
357
+ if (executing.length >= WORKER_CONCURRENCY) {
358
+ await Promise.allSettled(executing.splice(0, executing.length))
359
+ }
360
+ }
361
+
362
+ if (executing.length) {
363
+ await Promise.allSettled(executing)
364
+ }
365
+ }
366
+
367
+ async function consumeLoop() {
368
+ let groupEnsured = false
369
+
370
+ while (!stopRequested) {
371
+ // Garantir que o grupo existe antes de tentar ler
372
+ if (!groupEnsured) {
373
+ try {
374
+ await ensureGroup()
375
+ groupEnsured = true
376
+ consecutiveNoGroupErrors = 0
377
+ } catch (err) {
378
+ const errMsg = err && err.message ? err.message : String(err)
379
+ if (!errMsg.includes('BUSYGROUP')) {
380
+ const now = Date.now()
381
+ if (now - lastRedisErrorLog > 5000) {
382
+ console.error('[azify-logger][worker] erro ao garantir grupo:', errMsg)
383
+ lastRedisErrorLog = now
384
+ }
385
+ await sleep(1000)
386
+ } else {
387
+ groupEnsured = true
388
+ }
389
+ continue
390
+ }
391
+ }
392
+
393
+ let messages = null
394
+ try {
395
+ // Usar xreadgroup diretamente do ioredis
396
+ // Sintaxe: xreadgroup('GROUP', group, consumer, 'COUNT', count, 'BLOCK', block, 'STREAMS', key, id)
397
+ messages = await redis.xreadgroup(
398
+ 'GROUP', WORKER_GROUP, CONSUMER_NAME,
399
+ 'COUNT', MAX_BATCH,
400
+ 'BLOCK', BLOCK_MS,
401
+ 'STREAMS', STREAM_KEY, '>'
402
+ )
403
+
404
+ // Se retornar mensagens, processar
405
+ if (messages && Array.isArray(messages) && messages.length > 0) {
406
+ // XREADGROUP retorna [[streamName, [entries]]]
407
+ for (const streamData of messages) {
408
+ if (Array.isArray(streamData) && streamData.length >= 2) {
409
+ const entries = streamData[1]
410
+ if (Array.isArray(entries) && entries.length > 0) {
411
+ await processBatch(entries)
412
+ }
413
+ }
414
+ }
415
+ consecutiveNoGroupErrors = 0
416
+ continue
417
+ }
418
+
419
+ // Se não houver mensagens novas, verificar pendentes
420
+ await claimPending()
421
+ } catch (err) {
422
+ const errMsg = err && err.message ? err.message : String(err)
423
+
424
+ // Se for erro de grupo ou sintaxe, recriar o grupo
425
+ if (isNoGroupError(err) || errMsg.includes('syntax error') || errMsg.includes('NOGROUP')) {
426
+ groupEnsured = false
427
+ consecutiveNoGroupErrors += 1
428
+ if (consecutiveNoGroupErrors === 1) {
429
+ console.warn('[azify-logger][worker] grupo não encontrado ou erro de sintaxe, recriando…')
430
+ }
431
+ await sleep(NO_GROUP_RETRY_DELAY)
432
+ continue
433
+ }
434
+
435
+ // Outros erros - log apenas a cada 5 segundos para evitar spam
436
+ const now = Date.now()
437
+ if (now - lastRedisErrorLog > 5000) {
438
+ console.error('[azify-logger][worker] erro ao ler stream:', errMsg)
439
+ lastRedisErrorLog = now
440
+ }
441
+ await sleep(250)
442
+ }
443
+ }
444
+ }
445
+
446
+ async function shutdown() {
447
+ stopRequested = true
448
+ console.log('[azify-logger][worker] encerrando...')
449
+ await redis.quit().catch(() => {})
450
+ process.exit(0)
451
+ }
452
+
453
+ process.on('SIGINT', shutdown)
454
+ process.on('SIGTERM', shutdown)
455
+
456
+ ensureGroup()
457
+ .then(() => {
458
+ console.log('[azify-logger][worker] consumindo stream', STREAM_KEY, 'como', CONSUMER_NAME)
459
+ if (process.send) {
460
+ process.send({ type: 'azify-logger:ready', pid: process.pid, stream: STREAM_KEY })
461
+ }
462
+ return consumeLoop()
463
+ })
464
+ .catch((err) => {
465
+ console.error('[azify-logger][worker] não foi possível iniciar:', err)
466
+ process.exit(1)
467
+ })