azify-logger 1.0.26 → 1.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,357 @@
1
+ const axios = require('axios')
2
+ const http = require('http')
3
+ const https = require('https')
4
+ const path = require('path')
5
+ const { randomUUID } = require('crypto')
6
+
7
+ const { createRedisProducer, DEFAULT_STREAM_KEY, DEFAULT_REDIS_URL } = require('../queue/redisQueue')
8
+ const { createFileSpool } = require('../queue/fileQueue')
9
+ const { ensureWorker } = require('../queue/workerManager')
10
+
11
+ const noopTransport = {
12
+ enqueue () {},
13
+ async flush () {}
14
+ }
15
+
16
+ const defaultNumbers = {
17
+ timeout: 250,
18
+ maxQueueSize: 2000,
19
+ batchSize: 25,
20
+ flushInterval: 100,
21
+ maxSockets: 10,
22
+ failureThreshold: 5,
23
+ failureCooldown: 30000
24
+ }
25
+
26
+ const envKeys = {
27
+ timeout: 'AZIFY_LOGGER_HTTP_TIMEOUT',
28
+ maxQueueSize: 'AZIFY_LOGGER_MAX_QUEUE',
29
+ batchSize: 'AZIFY_LOGGER_BATCH_SIZE',
30
+ flushInterval: 'AZIFY_LOGGER_FLUSH_INTERVAL',
31
+ maxSockets: 'AZIFY_LOGGER_MAX_SOCKETS',
32
+ failureThreshold: 'AZIFY_LOGGER_FAILURE_THRESHOLD',
33
+ failureCooldown: 'AZIFY_LOGGER_FAILURE_COOLDOWN'
34
+ }
35
+
36
+ const exitSignals = ['SIGINT', 'SIGTERM', 'SIGQUIT']
37
+
38
+ function toNumber (value, fallback, { min = 0, max } = {}) {
39
+ const numeric = Number(value)
40
+ if (!Number.isFinite(numeric)) {
41
+ return fallback
42
+ }
43
+ if (numeric < min) {
44
+ return min
45
+ }
46
+ if (typeof max === 'number' && numeric > max) {
47
+ return max
48
+ }
49
+ return numeric
50
+ }
51
+
52
+ function resolveOptions (overrides = {}) {
53
+ const resolved = {}
54
+
55
+ for (const [key, fallback] of Object.entries(defaultNumbers)) {
56
+ const envValue = process.env[envKeys[key]]
57
+ resolved[key] = toNumber(
58
+ overrides[key] ?? envValue,
59
+ fallback,
60
+ key === 'failureThreshold'
61
+ ? { min: 0 }
62
+ : key === 'batchSize'
63
+ ? { min: 1 }
64
+ : { min: 0 }
65
+ )
66
+ }
67
+
68
+ if (resolved.maxQueueSize < resolved.batchSize) {
69
+ resolved.maxQueueSize = resolved.batchSize
70
+ }
71
+
72
+ return resolved
73
+ }
74
+
75
+ function resolveRedisConfig (overrides = {}) {
76
+ const redisUrl =
77
+ overrides.redisUrl ||
78
+ (overrides.redis && overrides.redis.url) ||
79
+ process.env.AZIFY_LOGGER_REDIS_URL ||
80
+ DEFAULT_REDIS_URL
81
+
82
+ if (!redisUrl) {
83
+ return null
84
+ }
85
+
86
+ const streamKey = overrides.redisStream || (overrides.redis && overrides.redis.stream) || process.env.AZIFY_LOGGER_REDIS_STREAM || process.env.AZIFY_LOGGER_REDIS_QUEUE_KEY || DEFAULT_STREAM_KEY
87
+ const maxLen = Number(overrides.redisMaxStreamLength ?? (overrides.redis && overrides.redis.maxStreamLength) ?? process.env.AZIFY_LOGGER_REDIS_MAX_STREAM_LENGTH) || undefined
88
+ const spoolDir = overrides.redisSpoolDir || (overrides.redis && overrides.redis.spoolDir) || process.env.AZIFY_LOGGER_REDIS_SPOOL_DIR
89
+
90
+ return {
91
+ url: redisUrl,
92
+ streamKey,
93
+ maxLen,
94
+ spoolDir
95
+ }
96
+ }
97
+
98
+ function createHttpLoggerTransport (loggerUrl, overrides) {
99
+ if (!loggerUrl) {
100
+ return noopTransport
101
+ }
102
+
103
+ const options = resolveOptions(overrides)
104
+ const redisConfig = resolveRedisConfig(overrides)
105
+
106
+ if (redisConfig) {
107
+ try {
108
+ return createRedisStreamTransport(loggerUrl, options, redisConfig)
109
+ } catch (err) {
110
+ // Erro ao inicializar Redis - retornar noopTransport silenciosamente
111
+ // Não travar aplicação se Redis não estiver disponível
112
+ // Não logar - silenciar completamente para não poluir logs
113
+ return noopTransport
114
+ }
115
+ }
116
+
117
+ // Se Redis não estiver configurado, retornar noopTransport
118
+ // Não usar fallback síncrono para não travar aplicação
119
+ return noopTransport
120
+ }
121
+
122
+ function createRedisStreamTransport (loggerUrl, options, redisConfig) {
123
+ let producer = null
124
+ try {
125
+ producer = createRedisProducer(redisConfig)
126
+ } catch (err) {
127
+ // Erro ao criar producer - retornar noopTransport silenciosamente
128
+ // Não logar - silenciar completamente para não poluir logs
129
+ return noopTransport
130
+ }
131
+
132
+ if (!producer) {
133
+ return noopTransport
134
+ }
135
+
136
+ let spool = null
137
+ try {
138
+ spool = createFileSpool({
139
+ directory: redisConfig.spoolDir || path.join(process.cwd(), '.azify-logger-spool'),
140
+ flushInterval: options.flushInterval,
141
+ batchSize: options.batchSize,
142
+ async pushFn (entries) {
143
+ if (producer) {
144
+ for (const entry of entries) {
145
+ try {
146
+ await producer.enqueue(entry).catch(() => {
147
+ // Erro ao enfileirar - ignorar silenciosamente
148
+ })
149
+ } catch (err) {
150
+ // Erro ao enfileirar - ignorar silenciosamente
151
+ }
152
+ }
153
+ }
154
+ }
155
+ })
156
+ } catch (err) {
157
+ // Erro ao criar spool - continuar sem spool silenciosamente
158
+ // Não logar - silenciar completamente para não poluir logs
159
+ }
160
+
161
+ if (spool) {
162
+ spool.flush().catch(() => {})
163
+ }
164
+
165
+ try {
166
+ ensureWorker(redisConfig, { autoRestart: true, stdio: 'inherit' })
167
+ } catch (err) {
168
+ // Erro ao garantir worker - continuar sem worker silenciosamente
169
+ // Não logar - silenciar completamente para não poluir logs
170
+ }
171
+
172
+ function pushEntry(entry) {
173
+ // OTIMIZAÇÃO: Remover setImmediate aqui - já estamos dentro de um setImmediate (do middleware)
174
+ // producer.enqueue() já faz seu próprio setImmediate
175
+ // Isso reduz de 3 para 2 níveis de setImmediate aninhados
176
+ try {
177
+ if (!producer) {
178
+ // Se producer não estiver disponível, tentar spool
179
+ if (spool) {
180
+ spool.append(entry).catch(() => {})
181
+ }
182
+ return
183
+ }
184
+
185
+ // Não aguardar - fazer fire-and-forget para não bloquear
186
+ // producer.enqueue() já faz setImmediate internamente
187
+ producer.enqueue(entry).catch(() => {
188
+ // Erro ao enfileirar no Redis - tentar spool
189
+ if (spool) {
190
+ spool.append(entry).catch(() => {})
191
+ }
192
+ })
193
+ } catch (err) {
194
+ // Erro ao preparar entrada - ignorar silenciosamente
195
+ if (spool) {
196
+ spool.append(entry).catch(() => {})
197
+ }
198
+ }
199
+ }
200
+
201
+ return {
202
+ enqueue (payload, headers = {}) {
203
+ // OTIMIZAÇÃO: Criar entry inline para reduzir overhead
204
+ // randomUUID() e Date.now() são rápidos, mas podemos otimizar verificando producer primeiro
205
+ try {
206
+ // Criar entry apenas quando necessário (após verificações básicas)
207
+ const entry = {
208
+ id: randomUUID(),
209
+ loggerUrl,
210
+ headers,
211
+ payload,
212
+ createdAt: Date.now(),
213
+ attempts: 0
214
+ }
215
+ // pushEntry não tem setImmediate agora - chama direto producer.enqueue()
216
+ // producer.enqueue() faz setImmediate → client.xadd() (Redis I/O assíncrono)
217
+ pushEntry(entry)
218
+ } catch (err) {
219
+ // Erro ao criar entrada - ignorar silenciosamente
220
+ // Não travar aplicação por erro no logger
221
+ }
222
+ },
223
+ async flush () {
224
+ try {
225
+ if (spool) {
226
+ await spool.flush().catch(() => {
227
+ // Erro ao fazer flush - ignorar silenciosamente
228
+ })
229
+ }
230
+ } catch (err) {
231
+ // Erro ao fazer flush - ignorar silenciosamente
232
+ }
233
+ }
234
+ }
235
+ }
236
+
237
+ function buildInlineTransport (loggerUrl, options) {
238
+ const { httpAgent, httpsAgent } = createHttpAgents(options)
239
+
240
+ const queue = []
241
+ let flushing = false
242
+ let flushTimer = null
243
+ let consecutiveFailures = 0
244
+ let circuitOpenUntil = 0
245
+
246
+ const scheduleFlush = (delay = options.flushInterval) => {
247
+ if (flushTimer || flushing) {
248
+ return
249
+ }
250
+
251
+ flushTimer = setTimeout(() => {
252
+ flushTimer = null
253
+ void flushQueue()
254
+ }, delay)
255
+
256
+ if (typeof flushTimer.unref === 'function') {
257
+ flushTimer.unref()
258
+ }
259
+ }
260
+
261
+ const flushQueue = async (force = false) => {
262
+ if (flushing) {
263
+ return
264
+ }
265
+
266
+ const now = Date.now()
267
+ if (!force && now < circuitOpenUntil) {
268
+ scheduleFlush(circuitOpenUntil - now)
269
+ return
270
+ }
271
+
272
+ const batch = queue.splice(0, options.batchSize)
273
+ if (!batch.length) {
274
+ return
275
+ }
276
+
277
+ flushing = true
278
+ try {
279
+ for (let i = 0; i < batch.length; i += 1) {
280
+ const { payload, headers } = batch[i]
281
+ try {
282
+ await axios.post(loggerUrl, payload, {
283
+ headers,
284
+ timeout: options.timeout,
285
+ httpAgent,
286
+ httpsAgent,
287
+ validateStatus: () => true
288
+ })
289
+ consecutiveFailures = 0
290
+ } catch (error) {
291
+ consecutiveFailures += 1
292
+
293
+ if (options.failureThreshold > 0 && consecutiveFailures >= options.failureThreshold) {
294
+ circuitOpenUntil = Date.now() + options.failureCooldown
295
+ }
296
+
297
+ const remaining = batch.slice(i)
298
+ if (remaining.length) {
299
+ queue.unshift(...remaining)
300
+ }
301
+
302
+ throw error
303
+ }
304
+ }
305
+ } catch (_) {
306
+ // remaining items already reinserted in queue
307
+ } finally {
308
+ flushing = false
309
+ if (queue.length) {
310
+ scheduleFlush()
311
+ }
312
+ }
313
+ }
314
+
315
+ const enqueue = (payload, headers = {}) => {
316
+ if (queue.length >= options.maxQueueSize) {
317
+ queue.shift()
318
+ }
319
+
320
+ queue.push({ payload, headers })
321
+
322
+ if (queue.length >= options.batchSize) {
323
+ void flushQueue()
324
+ } else {
325
+ scheduleFlush()
326
+ }
327
+ }
328
+
329
+ const gracefulFlush = () => flushQueue(true)
330
+ process.on('beforeExit', gracefulFlush)
331
+ process.on('exit', gracefulFlush)
332
+ exitSignals.forEach((signal) => {
333
+ process.on(signal, gracefulFlush)
334
+ })
335
+
336
+ return {
337
+ enqueue,
338
+ flush: flushQueue
339
+ }
340
+ }
341
+
342
+ function createHttpAgents (options) {
343
+ const httpAgent = new http.Agent({
344
+ keepAlive: true,
345
+ maxSockets: Math.max(1, options.maxSockets)
346
+ })
347
+ const httpsAgent = new https.Agent({
348
+ keepAlive: true,
349
+ maxSockets: Math.max(1, options.maxSockets)
350
+ })
351
+ return { httpAgent, httpsAgent }
352
+ }
353
+
354
+ module.exports = {
355
+ createHttpLoggerTransport
356
+ }
357
+
package/streams/pino.d.ts CHANGED
@@ -1,9 +1,47 @@
1
1
  import { Transform } from 'stream';
2
2
 
3
+ export interface HttpTransportOptions {
4
+ timeout?: number;
5
+ maxQueueSize?: number;
6
+ batchSize?: number;
7
+ flushInterval?: number;
8
+ maxSockets?: number;
9
+ failureThreshold?: number;
10
+ failureCooldown?: number;
11
+ redisUrl?: string;
12
+ redisQueueKey?: string;
13
+ redisMaxQueueLen?: number;
14
+ redisWorker?: boolean;
15
+ }
16
+
3
17
  export interface PinoStreamOptions {
4
18
  loggerUrl?: string;
5
19
  serviceName?: string;
6
20
  environment?: string;
21
+ /** Optional low level transport tuning overrides */
22
+ transportOptions?: HttpTransportOptions;
23
+ /** Shorthand for transportOptions.timeout */
24
+ timeout?: number;
25
+ /** Shorthand for transportOptions.maxQueueSize */
26
+ maxQueueSize?: number;
27
+ /** Shorthand for transportOptions.batchSize */
28
+ batchSize?: number;
29
+ /** Shorthand for transportOptions.flushInterval */
30
+ flushInterval?: number;
31
+ /** Shorthand for transportOptions.maxSockets */
32
+ maxSockets?: number;
33
+ /** Shorthand for transportOptions.failureThreshold */
34
+ failureThreshold?: number;
35
+ /** Shorthand for transportOptions.failureCooldown */
36
+ failureCooldown?: number;
37
+ /** Shorthand for enabling Redis queue */
38
+ redisUrl?: string;
39
+ /** Shorthand for queue key used when Redis is enabled */
40
+ redisQueueKey?: string;
41
+ /** Shorthand for Redis queue max length */
42
+ redisMaxQueueLen?: number;
43
+ /** Disable/enable embedded Redis worker */
44
+ redisWorker?: boolean;
7
45
  }
8
46
 
9
47
  /**
package/streams/pino.js CHANGED
@@ -12,17 +12,25 @@ try {
12
12
  trace = { getSpan: () => null }
13
13
  W3CTraceContextPropagator = class {}
14
14
  }
15
- const axios = require('axios')
15
+ const { createHttpLoggerTransport } = require('./httpQueue')
16
+ const { shouldSample } = require('../sampling')
16
17
 
17
18
  function createPinoStream(options = {}) {
18
- const loggerUrl = options.loggerUrl || process.env.AZIFY_LOGGER_URL || 'http://localhost:3001'
19
- const serviceName = options.serviceName || process.env.APP_NAME || 'app'
20
- const environment = options.environment || process.env.NODE_ENV || 'development'
19
+ const loggerUrl = options.loggerUrl || process.env.AZIFY_LOGGER_URL
20
+ const serviceName = options.serviceName || process.env.APP_NAME
21
+ const environment = options.environment || process.env.NODE_ENV
22
+
23
+ const transport = createHttpLoggerTransport(loggerUrl, extractTransportOptions(options))
21
24
 
22
25
  return {
23
- write(chunk) {
26
+ write (chunk) {
24
27
  let record
25
- try { record = typeof chunk === 'string' ? JSON.parse(chunk) : chunk } catch (_) { return }
28
+ try {
29
+ record = typeof chunk === 'string' ? JSON.parse(chunk) : chunk
30
+ } catch (_) {
31
+ return
32
+ }
33
+
26
34
  const levelMap = { 60: 'fatal', 50: 'error', 40: 'warn', 30: 'info', 20: 'debug', 10: 'trace' }
27
35
  const level = levelMap[record.level] || 'info'
28
36
 
@@ -65,10 +73,39 @@ function createPinoStream(options = {}) {
65
73
  ...(spanId && { spanId })
66
74
  }
67
75
 
76
+ if (!shouldSample(level, 'logger')) {
77
+ return
78
+ }
79
+
68
80
  const payload = { level, message: record.msg || record.message || 'log', meta }
69
- axios.post(`${loggerUrl}`, payload, { headers, timeout: 10000 }).catch(() => {})
81
+ transport.enqueue(payload, headers)
70
82
  }
71
83
  }
72
84
  }
73
85
 
74
86
  module.exports = createPinoStream
87
+
88
+ function extractTransportOptions (options) {
89
+ const {
90
+ transportOptions = {},
91
+ timeout,
92
+ maxQueueSize,
93
+ batchSize,
94
+ flushInterval,
95
+ maxSockets,
96
+ failureThreshold,
97
+ failureCooldown
98
+ } = options
99
+
100
+ const overrides = { ...transportOptions }
101
+
102
+ if (timeout != null) overrides.timeout = timeout
103
+ if (maxQueueSize != null) overrides.maxQueueSize = maxQueueSize
104
+ if (batchSize != null) overrides.batchSize = batchSize
105
+ if (flushInterval != null) overrides.flushInterval = flushInterval
106
+ if (maxSockets != null) overrides.maxSockets = maxSockets
107
+ if (failureThreshold != null) overrides.failureThreshold = failureThreshold
108
+ if (failureCooldown != null) overrides.failureCooldown = failureCooldown
109
+
110
+ return overrides
111
+ }