azify-logger 1.0.26 → 1.0.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -1
- package/index.js +38 -16
- package/middleware-express.js +218 -367
- package/middleware-restify.js +135 -306
- package/package.json +31 -29
- package/queue/fileQueue.js +100 -0
- package/queue/redisQueue.js +181 -0
- package/queue/workerManager.js +111 -0
- package/register-otel.js +63 -13
- package/register.js +364 -99
- package/sampling.js +79 -0
- package/scripts/redis-worker.js +467 -0
- package/server.js +168 -70
- package/streams/bunyan.d.ts +26 -0
- package/streams/bunyan.js +39 -8
- package/streams/httpQueue.js +357 -0
- package/streams/pino.d.ts +38 -0
- package/streams/pino.js +44 -7
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
const fs = require('fs')
|
|
2
|
+
const path = require('path')
|
|
3
|
+
|
|
4
|
+
function createFileSpool(options = {}) {
|
|
5
|
+
const directory = options.directory || path.join(process.cwd(), '.azify-logger-spool')
|
|
6
|
+
const flushInterval = options.flushInterval || 5000
|
|
7
|
+
const batchSize = options.batchSize || 100
|
|
8
|
+
const pushFn = typeof options.pushFn === 'function' ? options.pushFn : null
|
|
9
|
+
|
|
10
|
+
if (!pushFn) {
|
|
11
|
+
return null
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
fs.mkdirSync(directory, { recursive: true })
|
|
15
|
+
|
|
16
|
+
let scheduled = false
|
|
17
|
+
let flushing = false
|
|
18
|
+
|
|
19
|
+
const scheduleFlush = () => {
|
|
20
|
+
if (scheduled) {
|
|
21
|
+
return
|
|
22
|
+
}
|
|
23
|
+
scheduled = true
|
|
24
|
+
const timer = setTimeout(async () => {
|
|
25
|
+
scheduled = false
|
|
26
|
+
await flush().catch(() => {})
|
|
27
|
+
}, flushInterval)
|
|
28
|
+
if (typeof timer.unref === 'function') {
|
|
29
|
+
timer.unref()
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
async function append(entry) {
|
|
34
|
+
const filePath = path.join(directory, `spool-${process.pid}.ndjson`)
|
|
35
|
+
await fs.promises.appendFile(filePath, JSON.stringify(entry) + '\n')
|
|
36
|
+
scheduleFlush()
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async function flush() {
|
|
40
|
+
if (flushing) {
|
|
41
|
+
return
|
|
42
|
+
}
|
|
43
|
+
flushing = true
|
|
44
|
+
try {
|
|
45
|
+
const files = (await fs.promises.readdir(directory)).filter((file) => file.endsWith('.ndjson'))
|
|
46
|
+
for (const file of files) {
|
|
47
|
+
const fullPath = path.join(directory, file)
|
|
48
|
+
const drainingPath = `${fullPath}.draining`
|
|
49
|
+
try {
|
|
50
|
+
await fs.promises.rename(fullPath, drainingPath)
|
|
51
|
+
} catch (err) {
|
|
52
|
+
if (err.code === 'ENOENT') {
|
|
53
|
+
continue
|
|
54
|
+
}
|
|
55
|
+
throw err
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const content = await fs.promises.readFile(drainingPath, 'utf8').catch(() => '')
|
|
59
|
+
if (!content) {
|
|
60
|
+
await fs.promises.unlink(drainingPath).catch(() => {})
|
|
61
|
+
continue
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const lines = content.split('\n').filter(Boolean)
|
|
65
|
+
while (lines.length) {
|
|
66
|
+
const slice = lines.splice(0, batchSize)
|
|
67
|
+
const entries = slice.map((line) => {
|
|
68
|
+
try {
|
|
69
|
+
return JSON.parse(line)
|
|
70
|
+
} catch (_) {
|
|
71
|
+
return null
|
|
72
|
+
}
|
|
73
|
+
}).filter(Boolean)
|
|
74
|
+
if (entries.length > 0) {
|
|
75
|
+
await pushFn(entries).catch(async () => {
|
|
76
|
+
const remaining = entries.map((entry) => JSON.stringify(entry)).join('\n') + '\n'
|
|
77
|
+
await fs.promises.appendFile(fullPath, remaining)
|
|
78
|
+
throw new Error('Failed to flush file spool entries back to Redis')
|
|
79
|
+
})
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
await fs.promises.unlink(drainingPath).catch(() => {})
|
|
84
|
+
}
|
|
85
|
+
} finally {
|
|
86
|
+
flushing = false
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
append,
|
|
92
|
+
flush
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
module.exports = {
|
|
97
|
+
createFileSpool
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
const Redis = require('ioredis')
|
|
2
|
+
|
|
3
|
+
const DEFAULT_REDIS_URL = process.env.AZIFY_LOGGER_REDIS_URL || 'redis://localhost:6381'
|
|
4
|
+
const DEFAULT_STREAM_KEY = process.env.AZIFY_LOGGER_REDIS_STREAM || process.env.AZIFY_LOGGER_REDIS_QUEUE_KEY || 'azify-logger:stream'
|
|
5
|
+
const DEFAULT_MAXLEN = Number(process.env.AZIFY_LOGGER_REDIS_MAX_STREAM_LENGTH) || 100000
|
|
6
|
+
|
|
7
|
+
const defaultRedisOptions = {
|
|
8
|
+
enableAutoPipelining: true,
|
|
9
|
+
maxRetriesPerRequest: null,
|
|
10
|
+
retryStrategy(times) {
|
|
11
|
+
const delay = Math.min(1000 * Math.pow(1.5, times), 10000)
|
|
12
|
+
return delay
|
|
13
|
+
},
|
|
14
|
+
reconnectOnError(err) {
|
|
15
|
+
const msg = err && err.message ? err.message : ''
|
|
16
|
+
if (msg.includes('READONLY') || msg.includes('ECONNRESET') || msg.includes('ECONNREFUSED')) {
|
|
17
|
+
return true
|
|
18
|
+
}
|
|
19
|
+
return false
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function createRedisProducer(config = {}) {
|
|
24
|
+
const url = config.url || process.env.AZIFY_LOGGER_REDIS_URL || DEFAULT_REDIS_URL
|
|
25
|
+
if (!url) {
|
|
26
|
+
throw new Error('Redis URL is required to initialize the azify-logger Redis producer.')
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const streamKey = config.streamKey || DEFAULT_STREAM_KEY
|
|
30
|
+
const maxLen = Number.isFinite(config.maxLen) ? config.maxLen : DEFAULT_MAXLEN
|
|
31
|
+
const redisOptions = { ...defaultRedisOptions, ...(config.redisOptions || {}) }
|
|
32
|
+
|
|
33
|
+
const client = new Redis(url, redisOptions)
|
|
34
|
+
let lastConnectionErrorLog = 0
|
|
35
|
+
let lastEnqueueErrorLog = 0
|
|
36
|
+
let connectionErrorCount = 0
|
|
37
|
+
const ERROR_LOG_INTERVAL = 300000 // 5 minutos entre logs (evitar logs repetidos)
|
|
38
|
+
|
|
39
|
+
// Flag global compartilhada entre todas as instâncias para garantir apenas 1 log por processo
|
|
40
|
+
if (typeof global.__azifyLoggerRedisErrorLogged === 'undefined') {
|
|
41
|
+
global.__azifyLoggerRedisErrorLogged = false
|
|
42
|
+
global.__azifyLoggerRedisErrorLastLog = 0
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
client.on('error', (err) => {
|
|
46
|
+
// Log apenas uma vez por processo inteiro (compartilhado entre producer e worker)
|
|
47
|
+
const now = Date.now()
|
|
48
|
+
if (!global.__azifyLoggerRedisErrorLogged && now - global.__azifyLoggerRedisErrorLastLog > ERROR_LOG_INTERVAL) {
|
|
49
|
+
if (err && (err.code === 'ECONNREFUSED' || err.message?.includes('ECONNREFUSED') || err.message?.includes('Redis'))) {
|
|
50
|
+
global.__azifyLoggerRedisErrorLogged = true
|
|
51
|
+
global.__azifyLoggerRedisErrorLastLog = now
|
|
52
|
+
connectionErrorCount++
|
|
53
|
+
// Usar process.stderr.write para evitar interceptação do console
|
|
54
|
+
// Mensagem clara: aplicação continua funcionando, apenas logging está desabilitado
|
|
55
|
+
process.stderr.write('[azify-logger] ⚠️ Redis indisponível. O sistema de logging está desabilitado. A aplicação continua funcionando normalmente.\n')
|
|
56
|
+
lastConnectionErrorLog = now
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
// Após primeira mensagem, não logar mais - silenciar completamente
|
|
60
|
+
})
|
|
61
|
+
client.on('end', () => {
|
|
62
|
+
// Não logar - silenciar completamente
|
|
63
|
+
})
|
|
64
|
+
client.on('connect', () => {
|
|
65
|
+
// Resetar contador quando conectar com sucesso (sem logar)
|
|
66
|
+
if (connectionErrorCount > 0 || global.__azifyLoggerRedisErrorLogged) {
|
|
67
|
+
connectionErrorCount = 0
|
|
68
|
+
lastConnectionErrorLog = 0
|
|
69
|
+
global.__azifyLoggerRedisErrorLogged = false
|
|
70
|
+
global.__azifyLoggerRedisErrorLastLog = 0
|
|
71
|
+
}
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
// BATCHING: acumular logs e enviar em batch para reduzir overhead
|
|
75
|
+
const batch = []
|
|
76
|
+
let batchTimer = null
|
|
77
|
+
const BATCH_SIZE = 100 // Batch size balanceado (200 era muito grande, causava latência)
|
|
78
|
+
const BATCH_TIMEOUT = 150 // Timeout balanceado
|
|
79
|
+
let flushing = false
|
|
80
|
+
|
|
81
|
+
function flushBatch() {
|
|
82
|
+
if (flushing || batch.length === 0) return
|
|
83
|
+
flushing = true
|
|
84
|
+
|
|
85
|
+
// OTIMIZAÇÃO: enviar TODOS os logs disponíveis no batch (até BATCH_SIZE)
|
|
86
|
+
// Isso maximiza eficiência do pipeline Redis
|
|
87
|
+
const entriesToFlush = batch.splice(0, BATCH_SIZE)
|
|
88
|
+
if (entriesToFlush.length === 0) {
|
|
89
|
+
flushing = false
|
|
90
|
+
return
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
setImmediate(() => {
|
|
94
|
+
try {
|
|
95
|
+
// Usar pipeline do Redis para enviar múltiplos logs de uma vez
|
|
96
|
+
const pipeline = client.pipeline()
|
|
97
|
+
|
|
98
|
+
for (const entry of entriesToFlush) {
|
|
99
|
+
try {
|
|
100
|
+
const payload = JSON.stringify(entry)
|
|
101
|
+
pipeline.xadd(streamKey, 'MAXLEN', '~', maxLen, '*', 'entry', payload)
|
|
102
|
+
} catch (err) {
|
|
103
|
+
// Erro ao serializar - ignorar silenciosamente
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Executar pipeline - muito mais eficiente que múltiplos xadd
|
|
108
|
+
pipeline.exec().catch(() => {
|
|
109
|
+
// Erro silencioso - não travar aplicação
|
|
110
|
+
})
|
|
111
|
+
} catch (err) {
|
|
112
|
+
// Erro silencioso - não travar aplicação
|
|
113
|
+
} finally {
|
|
114
|
+
flushing = false
|
|
115
|
+
// Se ainda há logs no batch, agendar próximo flush
|
|
116
|
+
if (batch.length > 0) {
|
|
117
|
+
scheduleFlush()
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
})
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
function scheduleFlush() {
|
|
124
|
+
if (batchTimer || flushing) return
|
|
125
|
+
|
|
126
|
+
if (batch.length >= BATCH_SIZE) {
|
|
127
|
+
// Flush imediato se batch está cheio
|
|
128
|
+
flushBatch()
|
|
129
|
+
} else {
|
|
130
|
+
// Flush após timeout
|
|
131
|
+
batchTimer = setTimeout(() => {
|
|
132
|
+
batchTimer = null
|
|
133
|
+
flushBatch()
|
|
134
|
+
}, BATCH_TIMEOUT)
|
|
135
|
+
|
|
136
|
+
if (typeof batchTimer.unref === 'function') {
|
|
137
|
+
batchTimer.unref()
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
function enqueue(entry) {
|
|
143
|
+
// OTIMIZAÇÃO: adicionar ao batch ao invés de enviar imediatamente
|
|
144
|
+
// Isso reduz drasticamente o número de operações Redis
|
|
145
|
+
try {
|
|
146
|
+
batch.push(entry)
|
|
147
|
+
scheduleFlush()
|
|
148
|
+
} catch (err) {
|
|
149
|
+
// Erro silencioso - não travar aplicação
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
async function close() {
|
|
154
|
+
// Flush batch restante antes de fechar
|
|
155
|
+
if (batchTimer) {
|
|
156
|
+
clearTimeout(batchTimer)
|
|
157
|
+
batchTimer = null
|
|
158
|
+
}
|
|
159
|
+
while (batch.length > 0) {
|
|
160
|
+
flushBatch()
|
|
161
|
+
// Aguardar um pouco para flush completar
|
|
162
|
+
await new Promise(resolve => setTimeout(resolve, 10))
|
|
163
|
+
}
|
|
164
|
+
await client.quit().catch(() => {})
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
return {
|
|
168
|
+
enqueue,
|
|
169
|
+
client,
|
|
170
|
+
streamKey,
|
|
171
|
+
close,
|
|
172
|
+
flush: flushBatch
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
module.exports = {
|
|
177
|
+
createRedisProducer,
|
|
178
|
+
DEFAULT_REDIS_URL,
|
|
179
|
+
DEFAULT_STREAM_KEY,
|
|
180
|
+
DEFAULT_MAXLEN
|
|
181
|
+
}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
const { fork } = require('child_process')
|
|
2
|
+
const path = require('path')
|
|
3
|
+
|
|
4
|
+
let workerProcess = null
|
|
5
|
+
let restarting = false
|
|
6
|
+
|
|
7
|
+
function buildEnv(redisConfig = {}, extraEnv = {}) {
|
|
8
|
+
const env = { ...process.env, ...extraEnv }
|
|
9
|
+
|
|
10
|
+
if (redisConfig.url) env.AZIFY_LOGGER_REDIS_URL = String(redisConfig.url)
|
|
11
|
+
if (redisConfig.streamKey) env.AZIFY_LOGGER_REDIS_STREAM = String(redisConfig.streamKey)
|
|
12
|
+
if (redisConfig.streamKey) env.AZIFY_LOGGER_REDIS_QUEUE_KEY = String(redisConfig.streamKey)
|
|
13
|
+
if (redisConfig.maxLen != null) env.AZIFY_LOGGER_REDIS_MAX_STREAM_LENGTH = String(redisConfig.maxLen)
|
|
14
|
+
|
|
15
|
+
return env
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function spawnWorker(redisConfig, options = {}) {
|
|
19
|
+
if (workerProcess && !workerProcess.killed) {
|
|
20
|
+
return workerProcess
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const scriptPath = path.resolve(__dirname, '..', 'scripts', 'redis-worker.js')
|
|
24
|
+
const env = buildEnv(redisConfig, options.env)
|
|
25
|
+
const stdio = options.stdio || 'inherit'
|
|
26
|
+
const autoRestart = options.autoRestart !== false
|
|
27
|
+
const restartDelay = options.restartDelay ?? 1000
|
|
28
|
+
|
|
29
|
+
try {
|
|
30
|
+
workerProcess = fork(scriptPath, [], { env, stdio })
|
|
31
|
+
} catch (error) {
|
|
32
|
+
console.error('[azify-logger] Falha ao iniciar worker Redis:', error)
|
|
33
|
+
workerProcess = null
|
|
34
|
+
return null
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const cleanup = () => {
|
|
38
|
+
if (workerProcess && !workerProcess.killed) {
|
|
39
|
+
workerProcess.kill()
|
|
40
|
+
workerProcess = null
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
process.once('exit', cleanup)
|
|
45
|
+
process.once('SIGINT', () => {
|
|
46
|
+
cleanup()
|
|
47
|
+
process.exit(0)
|
|
48
|
+
})
|
|
49
|
+
process.once('SIGTERM', () => {
|
|
50
|
+
cleanup()
|
|
51
|
+
process.exit(0)
|
|
52
|
+
})
|
|
53
|
+
|
|
54
|
+
if (stdio !== 'inherit') {
|
|
55
|
+
workerProcess.on('message', (msg) => {
|
|
56
|
+
if (msg && msg.type === 'azify-logger:ready') {
|
|
57
|
+
console.log('[azify-logger] Worker Redis ativo')
|
|
58
|
+
}
|
|
59
|
+
})
|
|
60
|
+
} else {
|
|
61
|
+
console.log('[azify-logger] Worker Redis iniciado (PID', workerProcess.pid, ')')
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
workerProcess.on('exit', (code, signal) => {
|
|
65
|
+
const reason = signal || code
|
|
66
|
+
console.warn('[azify-logger] Worker Redis finalizado:', reason)
|
|
67
|
+
workerProcess = null
|
|
68
|
+
if (autoRestart && !restarting && process.env.AZIFY_LOGGER_EMBEDDED_WORKER !== '0') {
|
|
69
|
+
restarting = true
|
|
70
|
+
setTimeout(() => {
|
|
71
|
+
restarting = false
|
|
72
|
+
spawnWorker(redisConfig, options)
|
|
73
|
+
}, restartDelay)
|
|
74
|
+
}
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
workerProcess.on('error', (error) => {
|
|
78
|
+
console.error('[azify-logger] Erro no worker Redis:', error)
|
|
79
|
+
})
|
|
80
|
+
|
|
81
|
+
return workerProcess
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function ensureWorker(redisConfig, options = {}) {
|
|
85
|
+
if (process.env.AZIFY_LOGGER_EMBEDDED_WORKER === '0') {
|
|
86
|
+
return null
|
|
87
|
+
}
|
|
88
|
+
return spawnWorker(redisConfig, options)
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function startLoggerWorker(options = {}) {
|
|
92
|
+
return spawnWorker(options.redisConfig || {}, options)
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function stopLoggerWorker() {
|
|
96
|
+
if (workerProcess && !workerProcess.killed) {
|
|
97
|
+
workerProcess.kill()
|
|
98
|
+
workerProcess = null
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
function getWorkerProcess() {
|
|
103
|
+
return workerProcess
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
module.exports = {
|
|
107
|
+
ensureWorker,
|
|
108
|
+
startLoggerWorker,
|
|
109
|
+
stopLoggerWorker,
|
|
110
|
+
getWorkerProcess
|
|
111
|
+
}
|
package/register-otel.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
try {
|
|
2
2
|
const { NodeSDK } = require('@opentelemetry/sdk-node')
|
|
3
|
-
const {
|
|
3
|
+
const { HttpInstrumentation } = require('@opentelemetry/instrumentation-http')
|
|
4
|
+
const { ExpressInstrumentation } = require('@opentelemetry/instrumentation-express')
|
|
5
|
+
const { RestifyInstrumentation } = require('@opentelemetry/instrumentation-restify')
|
|
4
6
|
|
|
5
7
|
const serviceName = process.env.OTEL_SERVICE_NAME || process.env.APP_NAME || 'app'
|
|
6
8
|
const serviceVersion = process.env.OTEL_SERVICE_VERSION || '1.0.0'
|
|
@@ -8,22 +10,70 @@ try {
|
|
|
8
10
|
process.env.OTEL_SERVICE_NAME = serviceName
|
|
9
11
|
process.env.OTEL_SERVICE_VERSION = serviceVersion
|
|
10
12
|
|
|
13
|
+
let collectorHost = null
|
|
14
|
+
let collectorPath = null
|
|
15
|
+
try {
|
|
16
|
+
const target = new URL(process.env.AZIFY_LOGGER_URL || 'http://localhost:3001/log')
|
|
17
|
+
collectorHost = target.host
|
|
18
|
+
collectorPath = target.pathname || '/'
|
|
19
|
+
} catch (_) {}
|
|
20
|
+
|
|
21
|
+
const isLoggerRequest = (host, path) => {
|
|
22
|
+
if (!collectorHost || typeof host !== 'string') return false
|
|
23
|
+
if (host !== collectorHost) return false
|
|
24
|
+
return typeof path === 'string' && path.startsWith(collectorPath)
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const httpInstrumentation = new HttpInstrumentation({
|
|
28
|
+
enabled: true,
|
|
29
|
+
requireParentforOutgoingSpans: false,
|
|
30
|
+
requireParentforIncomingSpans: false,
|
|
31
|
+
ignoreIncomingRequestHook (req) {
|
|
32
|
+
if (!collectorHost) return false
|
|
33
|
+
const host = req.headers?.host
|
|
34
|
+
const url = req.url
|
|
35
|
+
return typeof host === 'string' && isLoggerRequest(host, url)
|
|
36
|
+
},
|
|
37
|
+
ignoreOutgoingRequestHook (res) {
|
|
38
|
+
try {
|
|
39
|
+
if (!collectorHost) return false
|
|
40
|
+
const host = res?.host
|
|
41
|
+
const path = res?.path
|
|
42
|
+
return isLoggerRequest(host, path)
|
|
43
|
+
} catch (_) {
|
|
44
|
+
return false
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
requestHook (span, { options }) {
|
|
48
|
+
if (!options?.headers) return
|
|
49
|
+
const requestId = options.headers['x-request-id'] || options.headers['X-Request-ID']
|
|
50
|
+
if (requestId) {
|
|
51
|
+
span.setAttribute('azify.request_id', requestId)
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
})
|
|
55
|
+
|
|
56
|
+
const expressInstrumentation = new ExpressInstrumentation({
|
|
57
|
+
enabled: true,
|
|
58
|
+
ignoreLayersType: ['router', 'middleware']
|
|
59
|
+
})
|
|
60
|
+
|
|
61
|
+
const restifyInstrumentation = new RestifyInstrumentation({
|
|
62
|
+
enabled: true
|
|
63
|
+
})
|
|
64
|
+
|
|
11
65
|
const sdk = new NodeSDK({
|
|
12
|
-
serviceName
|
|
13
|
-
serviceVersion
|
|
14
|
-
instrumentations: [
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
},
|
|
20
|
-
'@opentelemetry/instrumentation-express': { enabled: true },
|
|
21
|
-
'@opentelemetry/instrumentation-restify': { enabled: true }
|
|
22
|
-
})]
|
|
66
|
+
serviceName,
|
|
67
|
+
serviceVersion,
|
|
68
|
+
instrumentations: [
|
|
69
|
+
httpInstrumentation,
|
|
70
|
+
expressInstrumentation,
|
|
71
|
+
restifyInstrumentation
|
|
72
|
+
]
|
|
23
73
|
})
|
|
24
74
|
|
|
25
75
|
sdk.start()
|
|
26
|
-
|
|
76
|
+
|
|
27
77
|
process.once('SIGTERM', () => {
|
|
28
78
|
sdk.shutdown()
|
|
29
79
|
.finally(() => process.exit(0))
|