azify-logger 1.0.26 → 1.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,342 @@
1
+ const axios = require('axios')
2
+ const http = require('http')
3
+ const https = require('https')
4
+ const path = require('path')
5
+
6
+ function fastUUID() {
7
+ const timestamp = Date.now().toString(36)
8
+ const randomPart = Math.random().toString(36).substring(2, 15)
9
+ const randomPart2 = Math.random().toString(36).substring(2, 15)
10
+ return `${timestamp}-${randomPart}-${randomPart2}`.substring(0, 36)
11
+ }
12
+
13
+ const { createRedisProducer, DEFAULT_STREAM_KEY, DEFAULT_REDIS_URL } = require('../queue/redisQueue')
14
+ const { createFileSpool } = require('../queue/fileQueue')
15
+ const { ensureWorker } = require('../queue/workerManager')
16
+
17
+ const noopTransport = {
18
+ enqueue () {},
19
+ async flush () {}
20
+ }
21
+
22
+ const defaultNumbers = {
23
+ timeout: 250,
24
+ maxQueueSize: 2000,
25
+ batchSize: 25,
26
+ flushInterval: 100,
27
+ maxSockets: 10,
28
+ failureThreshold: 5,
29
+ failureCooldown: 30000
30
+ }
31
+
32
+ const envKeys = {
33
+ timeout: 'AZIFY_LOGGER_HTTP_TIMEOUT',
34
+ maxQueueSize: 'AZIFY_LOGGER_MAX_QUEUE',
35
+ batchSize: 'AZIFY_LOGGER_BATCH_SIZE',
36
+ flushInterval: 'AZIFY_LOGGER_FLUSH_INTERVAL',
37
+ maxSockets: 'AZIFY_LOGGER_MAX_SOCKETS',
38
+ failureThreshold: 'AZIFY_LOGGER_FAILURE_THRESHOLD',
39
+ failureCooldown: 'AZIFY_LOGGER_FAILURE_COOLDOWN'
40
+ }
41
+
42
+ const exitSignals = ['SIGINT', 'SIGTERM', 'SIGQUIT']
43
+
44
+ function toNumber (value, fallback, { min = 0, max } = {}) {
45
+ const numeric = Number(value)
46
+ if (!Number.isFinite(numeric)) {
47
+ return fallback
48
+ }
49
+ if (numeric < min) {
50
+ return min
51
+ }
52
+ if (typeof max === 'number' && numeric > max) {
53
+ return max
54
+ }
55
+ return numeric
56
+ }
57
+
58
+ function resolveOptions (overrides = {}) {
59
+ const resolved = {}
60
+
61
+ for (const [key, fallback] of Object.entries(defaultNumbers)) {
62
+ const envValue = process.env[envKeys[key]]
63
+ resolved[key] = toNumber(
64
+ overrides[key] ?? envValue,
65
+ fallback,
66
+ key === 'failureThreshold'
67
+ ? { min: 0 }
68
+ : key === 'batchSize'
69
+ ? { min: 1 }
70
+ : { min: 0 }
71
+ )
72
+ }
73
+
74
+ if (resolved.maxQueueSize < resolved.batchSize) {
75
+ resolved.maxQueueSize = resolved.batchSize
76
+ }
77
+
78
+ return resolved
79
+ }
80
+
81
+ function resolveRedisConfig (overrides = {}) {
82
+ const redisUrl =
83
+ overrides.redisUrl ||
84
+ (overrides.redis && overrides.redis.url) ||
85
+ process.env.AZIFY_LOGGER_REDIS_URL ||
86
+ DEFAULT_REDIS_URL
87
+
88
+ if (!redisUrl) {
89
+ return null
90
+ }
91
+
92
+ const streamKey = overrides.redisStream || (overrides.redis && overrides.redis.stream) || process.env.AZIFY_LOGGER_REDIS_STREAM || process.env.AZIFY_LOGGER_REDIS_QUEUE_KEY || DEFAULT_STREAM_KEY
93
+ const maxLen = Number(overrides.redisMaxStreamLength ?? (overrides.redis && overrides.redis.maxStreamLength) ?? process.env.AZIFY_LOGGER_REDIS_MAX_STREAM_LENGTH) || undefined
94
+ const spoolDir = overrides.redisSpoolDir || (overrides.redis && overrides.redis.spoolDir) || process.env.AZIFY_LOGGER_REDIS_SPOOL_DIR
95
+
96
+ return {
97
+ url: redisUrl,
98
+ streamKey,
99
+ maxLen,
100
+ spoolDir
101
+ }
102
+ }
103
+
104
+ function createHttpLoggerTransport (loggerUrl, overrides) {
105
+ if (!loggerUrl) {
106
+ return noopTransport
107
+ }
108
+
109
+ const options = resolveOptions(overrides)
110
+ const redisConfig = resolveRedisConfig(overrides)
111
+
112
+ if (redisConfig) {
113
+ try {
114
+ return createRedisStreamTransport(loggerUrl, options, redisConfig)
115
+ } catch (err) {
116
+ const errorMsg = `[azify-logger] ❌ ERRO: Redis é obrigatório mas falhou ao inicializar.\n` +
117
+ ` Mensagem: ${err.message}\n` +
118
+ ` Verifique se:\n` +
119
+ ` 1. ioredis está instalado: npm install ioredis\n` +
120
+ ` 2. Redis está configurado corretamente (AZIFY_LOGGER_REDIS_URL)\n` +
121
+ ` 3. Redis está rodando e acessível\n`
122
+ throw new Error(errorMsg)
123
+ }
124
+ }
125
+
126
+ const defaultRedisConfig = {
127
+ url: DEFAULT_REDIS_URL,
128
+ streamKey: DEFAULT_STREAM_KEY
129
+ }
130
+
131
+ try {
132
+ return createRedisStreamTransport(loggerUrl, options, defaultRedisConfig)
133
+ } catch (err) {
134
+ const errorMsg = `[azify-logger] ❌ ERRO: Redis é obrigatório mas não está disponível.\n` +
135
+ ` Mensagem: ${err.message}\n` +
136
+ ` Verifique se:\n` +
137
+ ` 1. ioredis está instalado: npm install ioredis\n` +
138
+ ` 2. Redis está rodando em ${DEFAULT_REDIS_URL}\n` +
139
+ ` 3. Configure AZIFY_LOGGER_REDIS_URL se Redis estiver em outro endereço\n`
140
+ throw new Error(errorMsg)
141
+ }
142
+ }
143
+
144
+ function createRedisStreamTransport (loggerUrl, options, redisConfig) {
145
+ let producer = null
146
+ try {
147
+ producer = createRedisProducer(redisConfig)
148
+ } catch (err) {
149
+ throw new Error(`Falha ao criar producer Redis: ${err.message}`)
150
+ }
151
+
152
+ if (!producer) {
153
+ throw new Error('Falha ao criar producer Redis: producer é null')
154
+ }
155
+
156
+ let spool = null
157
+ if (createFileSpool) {
158
+ try {
159
+ spool = createFileSpool({
160
+ directory: redisConfig.spoolDir || path.join(process.cwd(), '.azify-logger-spool'),
161
+ flushInterval: options.flushInterval,
162
+ batchSize: options.batchSize,
163
+ async pushFn (entries) {
164
+ if (producer) {
165
+ for (const entry of entries) {
166
+ try {
167
+ await producer.enqueue(entry).catch(() => {
168
+ })
169
+ } catch (err) {
170
+ }
171
+ }
172
+ }
173
+ }
174
+ })
175
+ } catch (err) {
176
+ }
177
+ }
178
+
179
+ if (spool) {
180
+ spool.flush().catch(() => {})
181
+ }
182
+
183
+ if (ensureWorker) {
184
+ try {
185
+ ensureWorker(redisConfig, { autoRestart: true, stdio: 'inherit' })
186
+ } catch (err) {
187
+ process.stderr.write(`[azify-logger] ⚠️ Aviso: Falha ao inicializar worker: ${err.message}\n`)
188
+ }
189
+ }
190
+
191
+ function pushEntry(entry) {
192
+ if (!producer) {
193
+ if (spool) {
194
+ spool.append(entry).catch(() => {})
195
+ }
196
+ return
197
+ }
198
+
199
+ producer.enqueue(entry)
200
+ }
201
+
202
+ return {
203
+ enqueue (payload, headers = {}) {
204
+ const entry = {
205
+ id: fastUUID(),
206
+ loggerUrl,
207
+ headers: headers || {},
208
+ payload
209
+ }
210
+ pushEntry(entry)
211
+ },
212
+ async flush () {
213
+ try {
214
+ if (spool) {
215
+ await spool.flush().catch(() => {
216
+ })
217
+ }
218
+ } catch (err) {
219
+ }
220
+ }
221
+ }
222
+ }
223
+
224
+ function buildInlineTransport (loggerUrl, options) {
225
+ const { httpAgent, httpsAgent } = createHttpAgents(options)
226
+
227
+ const queue = []
228
+ let flushing = false
229
+ let flushTimer = null
230
+ let consecutiveFailures = 0
231
+ let circuitOpenUntil = 0
232
+
233
+ const scheduleFlush = (delay = options.flushInterval) => {
234
+ if (flushTimer || flushing) {
235
+ return
236
+ }
237
+
238
+ flushTimer = setTimeout(() => {
239
+ flushTimer = null
240
+ void flushQueue()
241
+ }, delay)
242
+
243
+ if (typeof flushTimer.unref === 'function') {
244
+ flushTimer.unref()
245
+ }
246
+ }
247
+
248
+ const flushQueue = async (force = false) => {
249
+ if (flushing) {
250
+ return
251
+ }
252
+
253
+ const now = Date.now()
254
+ if (!force && now < circuitOpenUntil) {
255
+ scheduleFlush(circuitOpenUntil - now)
256
+ return
257
+ }
258
+
259
+ const batch = queue.splice(0, options.batchSize)
260
+ if (!batch.length) {
261
+ return
262
+ }
263
+
264
+ flushing = true
265
+ try {
266
+ for (let i = 0; i < batch.length; i += 1) {
267
+ const { payload, headers } = batch[i]
268
+ try {
269
+ await axios.post(loggerUrl, payload, {
270
+ headers,
271
+ timeout: options.timeout,
272
+ httpAgent,
273
+ httpsAgent,
274
+ validateStatus: () => true
275
+ })
276
+ consecutiveFailures = 0
277
+ } catch (error) {
278
+ consecutiveFailures += 1
279
+
280
+ if (options.failureThreshold > 0 && consecutiveFailures >= options.failureThreshold) {
281
+ circuitOpenUntil = Date.now() + options.failureCooldown
282
+ }
283
+
284
+ const remaining = batch.slice(i)
285
+ if (remaining.length) {
286
+ queue.unshift(...remaining)
287
+ }
288
+
289
+ throw error
290
+ }
291
+ }
292
+ } catch (_) {
293
+ } finally {
294
+ flushing = false
295
+ if (queue.length) {
296
+ scheduleFlush()
297
+ }
298
+ }
299
+ }
300
+
301
+ const enqueue = (payload, headers = {}) => {
302
+ if (queue.length >= options.maxQueueSize) {
303
+ queue.shift()
304
+ }
305
+
306
+ queue.push({ payload, headers })
307
+
308
+ if (queue.length >= options.batchSize) {
309
+ void flushQueue()
310
+ } else {
311
+ scheduleFlush()
312
+ }
313
+ }
314
+
315
+ const gracefulFlush = () => flushQueue(true)
316
+ process.on('beforeExit', gracefulFlush)
317
+ process.on('exit', gracefulFlush)
318
+ exitSignals.forEach((signal) => {
319
+ process.on(signal, gracefulFlush)
320
+ })
321
+
322
+ return {
323
+ enqueue,
324
+ flush: flushQueue
325
+ }
326
+ }
327
+
328
+ function createHttpAgents (options) {
329
+ const httpAgent = new http.Agent({
330
+ keepAlive: true,
331
+ maxSockets: Math.max(1, options.maxSockets)
332
+ })
333
+ const httpsAgent = new https.Agent({
334
+ keepAlive: true,
335
+ maxSockets: Math.max(1, options.maxSockets)
336
+ })
337
+ return { httpAgent, httpsAgent }
338
+ }
339
+
340
+ module.exports = {
341
+ createHttpLoggerTransport
342
+ }
package/streams/pino.d.ts CHANGED
@@ -1,9 +1,47 @@
1
1
  import { Transform } from 'stream';
2
2
 
3
+ export interface HttpTransportOptions {
4
+ timeout?: number;
5
+ maxQueueSize?: number;
6
+ batchSize?: number;
7
+ flushInterval?: number;
8
+ maxSockets?: number;
9
+ failureThreshold?: number;
10
+ failureCooldown?: number;
11
+ redisUrl?: string;
12
+ redisQueueKey?: string;
13
+ redisMaxQueueLen?: number;
14
+ redisWorker?: boolean;
15
+ }
16
+
3
17
  export interface PinoStreamOptions {
4
18
  loggerUrl?: string;
5
19
  serviceName?: string;
6
20
  environment?: string;
21
+ /** Optional low level transport tuning overrides */
22
+ transportOptions?: HttpTransportOptions;
23
+ /** Shorthand for transportOptions.timeout */
24
+ timeout?: number;
25
+ /** Shorthand for transportOptions.maxQueueSize */
26
+ maxQueueSize?: number;
27
+ /** Shorthand for transportOptions.batchSize */
28
+ batchSize?: number;
29
+ /** Shorthand for transportOptions.flushInterval */
30
+ flushInterval?: number;
31
+ /** Shorthand for transportOptions.maxSockets */
32
+ maxSockets?: number;
33
+ /** Shorthand for transportOptions.failureThreshold */
34
+ failureThreshold?: number;
35
+ /** Shorthand for transportOptions.failureCooldown */
36
+ failureCooldown?: number;
37
+ /** Shorthand for enabling Redis queue */
38
+ redisUrl?: string;
39
+ /** Shorthand for queue key used when Redis is enabled */
40
+ redisQueueKey?: string;
41
+ /** Shorthand for Redis queue max length */
42
+ redisMaxQueueLen?: number;
43
+ /** Disable/enable embedded Redis worker */
44
+ redisWorker?: boolean;
7
45
  }
8
46
 
9
47
  /**
package/streams/pino.js CHANGED
@@ -12,17 +12,25 @@ try {
12
12
  trace = { getSpan: () => null }
13
13
  W3CTraceContextPropagator = class {}
14
14
  }
15
- const axios = require('axios')
15
+ const { createHttpLoggerTransport } = require('./httpQueue')
16
+ const { shouldSample } = require('../sampling')
16
17
 
17
18
  function createPinoStream(options = {}) {
18
- const loggerUrl = options.loggerUrl || process.env.AZIFY_LOGGER_URL || 'http://localhost:3001'
19
- const serviceName = options.serviceName || process.env.APP_NAME || 'app'
20
- const environment = options.environment || process.env.NODE_ENV || 'development'
19
+ const loggerUrl = options.loggerUrl || process.env.AZIFY_LOGGER_URL
20
+ const serviceName = options.serviceName || process.env.APP_NAME
21
+ const environment = options.environment || process.env.NODE_ENV
22
+
23
+ const transport = createHttpLoggerTransport(loggerUrl, extractTransportOptions(options))
21
24
 
22
25
  return {
23
- write(chunk) {
26
+ write (chunk) {
24
27
  let record
25
- try { record = typeof chunk === 'string' ? JSON.parse(chunk) : chunk } catch (_) { return }
28
+ try {
29
+ record = typeof chunk === 'string' ? JSON.parse(chunk) : chunk
30
+ } catch (_) {
31
+ return
32
+ }
33
+
26
34
  const levelMap = { 60: 'fatal', 50: 'error', 40: 'warn', 30: 'info', 20: 'debug', 10: 'trace' }
27
35
  const level = levelMap[record.level] || 'info'
28
36
 
@@ -65,10 +73,39 @@ function createPinoStream(options = {}) {
65
73
  ...(spanId && { spanId })
66
74
  }
67
75
 
76
+ if (!shouldSample(level, 'logger')) {
77
+ return
78
+ }
79
+
68
80
  const payload = { level, message: record.msg || record.message || 'log', meta }
69
- axios.post(`${loggerUrl}`, payload, { headers, timeout: 10000 }).catch(() => {})
81
+ transport.enqueue(payload, headers)
70
82
  }
71
83
  }
72
84
  }
73
85
 
74
86
  module.exports = createPinoStream
87
+
88
+ function extractTransportOptions (options) {
89
+ const {
90
+ transportOptions = {},
91
+ timeout,
92
+ maxQueueSize,
93
+ batchSize,
94
+ flushInterval,
95
+ maxSockets,
96
+ failureThreshold,
97
+ failureCooldown
98
+ } = options
99
+
100
+ const overrides = { ...transportOptions }
101
+
102
+ if (timeout != null) overrides.timeout = timeout
103
+ if (maxQueueSize != null) overrides.maxQueueSize = maxQueueSize
104
+ if (batchSize != null) overrides.batchSize = batchSize
105
+ if (flushInterval != null) overrides.flushInterval = flushInterval
106
+ if (maxSockets != null) overrides.maxSockets = maxSockets
107
+ if (failureThreshold != null) overrides.failureThreshold = failureThreshold
108
+ if (failureCooldown != null) overrides.failureCooldown = failureCooldown
109
+
110
+ return overrides
111
+ }