dd-trace 5.78.0 → 5.80.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE-3rdparty.csv +1 -0
  2. package/index.d.ts +11 -4
  3. package/initialize.mjs +10 -10
  4. package/package.json +6 -3
  5. package/packages/datadog-core/src/storage.js +4 -4
  6. package/packages/datadog-esbuild/src/utils.js +5 -1
  7. package/packages/datadog-instrumentations/src/aws-sdk.js +9 -2
  8. package/packages/datadog-instrumentations/src/azure-service-bus.js +43 -36
  9. package/packages/datadog-instrumentations/src/helpers/hook.js +1 -0
  10. package/packages/datadog-instrumentations/src/helpers/instrument.js +2 -1
  11. package/packages/datadog-instrumentations/src/jest.js +1 -1
  12. package/packages/datadog-instrumentations/src/playwright.js +20 -0
  13. package/packages/datadog-plugin-aws-sdk/src/services/bedrockruntime/utils.js +3 -2
  14. package/packages/datadog-plugin-azure-service-bus/src/producer.js +14 -5
  15. package/packages/datadog-plugin-jest/src/index.js +1 -6
  16. package/packages/datadog-plugin-jest/src/util.js +46 -15
  17. package/packages/datadog-plugin-kafkajs/src/consumer.js +2 -1
  18. package/packages/datadog-plugin-kafkajs/src/producer.js +3 -1
  19. package/packages/datadog-plugin-openai/src/stream-helpers.js +1 -1
  20. package/packages/datadog-shimmer/src/shimmer.js +2 -2
  21. package/packages/dd-trace/src/aiguard/sdk.js +12 -5
  22. package/packages/dd-trace/src/appsec/telemetry/index.js +1 -31
  23. package/packages/dd-trace/src/baggage.js +11 -0
  24. package/packages/dd-trace/src/ci-visibility/exporters/git/git_metadata.js +1 -1
  25. package/packages/dd-trace/src/config_defaults.js +1 -0
  26. package/packages/dd-trace/src/debugger/devtools_client/session.js +11 -1
  27. package/packages/dd-trace/src/encode/0.4.js +3 -3
  28. package/packages/dd-trace/src/encode/coverage-ci-visibility.js +2 -2
  29. package/packages/dd-trace/src/exporters/agent/writer.js +6 -13
  30. package/packages/dd-trace/src/lambda/runtime/ritm.js +1 -2
  31. package/packages/dd-trace/src/llmobs/index.js +5 -5
  32. package/packages/dd-trace/src/llmobs/noop.js +6 -0
  33. package/packages/dd-trace/src/llmobs/plugins/ai/index.js +1 -0
  34. package/packages/dd-trace/src/llmobs/plugins/openai.js +41 -35
  35. package/packages/dd-trace/src/llmobs/sdk.js +5 -1
  36. package/packages/dd-trace/src/llmobs/span_processor.js +5 -5
  37. package/packages/dd-trace/src/llmobs/tagger.js +31 -17
  38. package/packages/dd-trace/src/msgpack/chunk.js +2 -2
  39. package/packages/dd-trace/src/msgpack/encoder.js +2 -3
  40. package/packages/dd-trace/src/msgpack/index.js +2 -2
  41. package/packages/dd-trace/src/openfeature/flagging_provider.js +5 -3
  42. package/packages/dd-trace/src/opentelemetry/logs/index.js +1 -1
  43. package/packages/dd-trace/src/opentelemetry/logs/logger.js +11 -6
  44. package/packages/dd-trace/src/opentelemetry/logs/otlp_http_log_exporter.js +1 -1
  45. package/packages/dd-trace/src/opentelemetry/logs/otlp_transformer.js +1 -9
  46. package/packages/dd-trace/src/opentelemetry/otlp/protobuf_loader.js +1 -1
  47. package/packages/dd-trace/src/plugins/database.js +1 -0
  48. package/packages/dd-trace/src/plugins/plugin.js +7 -9
  49. package/packages/dd-trace/src/profiling/exporter_cli.js +7 -6
  50. package/packages/dd-trace/src/remote_config/index.js +11 -1
  51. package/packages/dd-trace/src/require-package-json.js +1 -1
  52. package/packages/dd-trace/src/service-naming/index.js +31 -4
  53. package/packages/dd-trace/src/span_processor.js +9 -9
  54. /package/packages/dd-trace/src/{format.js → span_format.js} +0 -0
@@ -16,47 +16,17 @@ const {
16
16
  incrementWafConfigErrors,
17
17
  incrementWafRequests
18
18
  } = require('./waf')
19
- const telemetryMetrics = require('../../telemetry/metrics')
20
19
 
21
20
  const metricsStoreMap = new WeakMap()
22
21
 
23
- const appsecMetrics = telemetryMetrics.manager.namespace('appsec')
24
-
25
22
  let enabled = false
26
- let interval
27
- const SUPPORTED_ORIGINS = new Set(['env_var', 'code', 'remote_config', 'unknown'])
28
23
 
29
24
  function enable (config) {
30
- const telemetryConfig = config.telemetry
31
- enabled = telemetryConfig?.enabled && telemetryConfig.metrics
32
-
33
- if (enabled) {
34
- let origin = 'remote_config'
35
-
36
- if (config.appsec.enabled) {
37
- origin = config.getOrigin('appsec.enabled')
38
-
39
- if (!SUPPORTED_ORIGINS.has(origin)) {
40
- origin = 'unknown'
41
- }
42
- }
43
-
44
- const gauge = appsecMetrics.gauge('enabled', { origin })
45
- gauge.track()
46
-
47
- interval = setInterval(() => {
48
- gauge.track()
49
- }, telemetryConfig.heartbeatInterval)
50
- interval.unref?.()
51
- }
25
+ enabled = config.telemetry?.enabled && config.telemetry?.metrics
52
26
  }
53
27
 
54
28
  function disable () {
55
29
  enabled = false
56
- if (interval) {
57
- clearInterval(interval)
58
- interval = undefined
59
- }
60
30
  }
61
31
 
62
32
  function newStore () {
@@ -3,11 +3,18 @@
3
3
  const { storage } = require('../../datadog-core')
4
4
  const baggageStorage = storage('baggage')
5
5
 
6
+ /**
7
+ * @param {string} key
8
+ * @param {string} value
9
+ */
6
10
  function setBaggageItem (key, value) {
7
11
  storage('baggage').enterWith({ ...baggageStorage.getStore(), [key]: value })
8
12
  return storage('baggage').getStore()
9
13
  }
10
14
 
15
+ /**
16
+ * @param {string} key
17
+ */
11
18
  function getBaggageItem (key) {
12
19
  return storage('baggage').getStore()?.[key]
13
20
  }
@@ -16,6 +23,10 @@ function getAllBaggageItems () {
16
23
  return storage('baggage').getStore() ?? {}
17
24
  }
18
25
 
26
+ /**
27
+ * @param {string} keyToRemove
28
+ * @returns {Record<string, unknown>}
29
+ */
19
30
  function removeBaggageItem (keyToRemove) {
20
31
  const { [keyToRemove]: _, ...newBaggage } = storage('baggage').getStore()
21
32
  storage('baggage').enterWith(newBaggage)
@@ -245,7 +245,7 @@ function generateAndUploadPackFiles ({
245
245
 
246
246
  /**
247
247
  * This function uploads git metadata to CI Visibility's backend.
248
- */
248
+ */
249
249
  function sendGitMetadata (url, { isEvpProxy, evpProxyPrefix }, configRepositoryUrl, callback) {
250
250
  if (!isGitAvailable()) {
251
251
  return callback(new Error('Git is not available'))
@@ -77,6 +77,7 @@ module.exports = {
77
77
  'experimental.enableGetRumData': false,
78
78
  'experimental.exporter': undefined,
79
79
  'experimental.flaggingProvider.enabled': false,
80
+ 'experimental.flaggingProvider.initializationTimeoutMs': 30_000,
80
81
  flushInterval: 2000,
81
82
  flushMinSpans: 1000,
82
83
  gitMetadataEnabled: true,
@@ -2,6 +2,16 @@
2
2
 
3
3
  const inspector = require('./inspector_promises_polyfill')
4
4
 
5
- const session = module.exports = new inspector.Session()
5
+ /**
6
+ * @typedef {import('node:events').EventEmitter & {
7
+ * connect: () => void,
8
+ * connectToMainThread: () => void
9
+ * disconnect: () => void,
10
+ * post: (method: string, params?: object) => Promise<any>,
11
+ * }} CDPSession
12
+ */
13
+ const session = /** @type {CDPSession} */ (new inspector.Session())
6
14
 
7
15
  session.connectToMainThread()
16
+
17
+ module.exports = session
@@ -1,7 +1,7 @@
1
1
  'use strict'
2
2
 
3
3
  const { truncateSpan, normalizeSpan } = require('./tags-processors')
4
- const { Chunk, MsgpackEncoder } = require('../msgpack')
4
+ const { MsgpackChunk, MsgpackEncoder } = require('../msgpack')
5
5
  const log = require('../log')
6
6
  const { isTrue } = require('../util')
7
7
  const { memoize } = require('../log/utils')
@@ -27,8 +27,8 @@ class AgentEncoder {
27
27
  constructor (writer, limit = SOFT_LIMIT) {
28
28
  this._msgpack = new MsgpackEncoder()
29
29
  this._limit = limit
30
- this._traceBytes = new Chunk()
31
- this._stringBytes = new Chunk()
30
+ this._traceBytes = new MsgpackChunk()
31
+ this._stringBytes = new MsgpackChunk()
32
32
  this._writer = writer
33
33
  this._reset()
34
34
  this._debugEncoding = isTrue(getEnvironmentVariable('DD_TRACE_ENCODING_DEBUG'))
@@ -1,6 +1,6 @@
1
1
  'use strict'
2
2
  const { AgentEncoder } = require('./0.4')
3
- const { Chunk } = require('../msgpack')
3
+ const { MsgpackChunk } = require('../msgpack')
4
4
 
5
5
  const {
6
6
  distributionMetric,
@@ -15,7 +15,7 @@ const COVERAGE_KEYS_LENGTH = 2
15
15
  class CoverageCIVisibilityEncoder extends AgentEncoder {
16
16
  constructor () {
17
17
  super(...arguments)
18
- this._coverageBytes = new Chunk()
18
+ this._coverageBytes = new MsgpackChunk()
19
19
  this.form = new FormData()
20
20
  this._coveragesCount = 0
21
21
  this.reset()
@@ -9,7 +9,7 @@ const BaseWriter = require('../common/writer')
9
9
 
10
10
  const METRIC_PREFIX = 'datadog.tracer.node.exporter.agent'
11
11
 
12
- class Writer extends BaseWriter {
12
+ class AgentWriter extends BaseWriter {
13
13
  constructor ({ prioritySampler, lookup, protocolVersion, headers, config = {} }) {
14
14
  super(...arguments)
15
15
  const AgentEncoder = getEncoder(protocolVersion)
@@ -62,12 +62,6 @@ class Writer extends BaseWriter {
62
62
  }
63
63
  }
64
64
 
65
- function setHeader (headers, key, value) {
66
- if (value) {
67
- headers[key] = value
68
- }
69
- }
70
-
71
65
  function getEncoder (protocolVersion) {
72
66
  return protocolVersion === '0.5'
73
67
  ? require('../../encode/0.5').AgentEncoder
@@ -82,16 +76,15 @@ function makeRequest (version, data, count, url, headers, lookup, needsStartupLo
82
76
  ...headers,
83
77
  'Content-Type': 'application/msgpack',
84
78
  'Datadog-Meta-Tracer-Version': tracerVersion,
85
- 'X-Datadog-Trace-Count': String(count)
79
+ 'X-Datadog-Trace-Count': String(count),
80
+ 'Datadog-Meta-Lang': 'nodejs',
81
+ 'Datadog-Meta-Lang-Version': process.version,
82
+ 'Datadog-Meta-Lang-Interpreter': process.jsEngine || 'v8'
86
83
  },
87
84
  lookup,
88
85
  url
89
86
  }
90
87
 
91
- setHeader(options.headers, 'Datadog-Meta-Lang', 'nodejs')
92
- setHeader(options.headers, 'Datadog-Meta-Lang-Version', process.version)
93
- setHeader(options.headers, 'Datadog-Meta-Lang-Interpreter', process.jsEngine || 'v8')
94
-
95
88
  log.debug('Request to the agent: %j', options)
96
89
 
97
90
  request(data, options, (err, res, status) => {
@@ -105,4 +98,4 @@ function makeRequest (version, data, count, url, headers, lookup, needsStartupLo
105
98
  })
106
99
  }
107
100
 
108
- module.exports = Writer
101
+ module.exports = AgentWriter
@@ -53,8 +53,7 @@ function _extractModuleNameAndHandlerPath (handler) {
53
53
  const FUNCTION_EXPR = /^([^.]*)\.(.*)$/
54
54
  const match = handler.match(FUNCTION_EXPR)
55
55
  if (!match || match.length !== 3) {
56
- // Malformed Handler Name
57
- return // TODO: throw error
56
+ throw new Error(`Malformed handler name: ${handler}`)
58
57
  }
59
58
  return [match[1], match[2]] // [module, handler-path]
60
59
  }
@@ -12,7 +12,7 @@ const telemetry = require('./telemetry')
12
12
  const LLMObsSpanProcessor = require('./span_processor')
13
13
 
14
14
  const { channel } = require('dc-polyfill')
15
- const spanProcessCh = channel('dd-trace:span:process')
15
+ const spanFinishCh = channel('dd-trace:span:finish')
16
16
  const evalMetricAppendCh = channel('llmobs:eval-metric:append')
17
17
  const flushCh = channel('llmobs:writers:flush')
18
18
  const injectCh = channel('dd-trace:span:inject')
@@ -62,7 +62,7 @@ function enable (config) {
62
62
  // span processing
63
63
  spanProcessor = new LLMObsSpanProcessor(config)
64
64
  spanProcessor.setWriter(spanWriter)
65
- spanProcessCh.subscribe(handleSpanProcess)
65
+ spanFinishCh.subscribe(handleSpanProcess)
66
66
 
67
67
  // distributed tracing for llmobs
68
68
  injectCh.subscribe(handleLLMObsParentIdInjection)
@@ -86,7 +86,7 @@ function enable (config) {
86
86
  function disable () {
87
87
  if (evalMetricAppendCh.hasSubscribers) evalMetricAppendCh.unsubscribe(handleEvalMetricAppend)
88
88
  if (flushCh.hasSubscribers) flushCh.unsubscribe(handleFlush)
89
- if (spanProcessCh.hasSubscribers) spanProcessCh.unsubscribe(handleSpanProcess)
89
+ if (spanFinishCh.hasSubscribers) spanFinishCh.unsubscribe(handleSpanProcess)
90
90
  if (injectCh.hasSubscribers) injectCh.unsubscribe(handleLLMObsParentIdInjection)
91
91
  if (registerUserSpanProcessorCh.hasSubscribers) registerUserSpanProcessorCh.unsubscribe(handleRegisterProcessor)
92
92
 
@@ -133,8 +133,8 @@ function handleRegisterProcessor (userSpanProcessor) {
133
133
  spanProcessor.setUserSpanProcessor(userSpanProcessor)
134
134
  }
135
135
 
136
- function handleSpanProcess (data) {
137
- spanProcessor.process(data)
136
+ function handleSpanProcess (span) {
137
+ spanProcessor.process(span)
138
138
  }
139
139
 
140
140
  function handleEvalMetricAppend (payload) {
@@ -75,6 +75,12 @@ class NoopLLMObs {
75
75
  submitEvaluation (llmobsSpanContext, options) {}
76
76
 
77
77
  flush () {}
78
+
79
+ registerProcessor (processor) {}
80
+
81
+ deregisterProcessor () {}
82
+
83
+ annotationContext (options, fn) { return fn() }
78
84
  }
79
85
 
80
86
  module.exports = NoopLLMObs
@@ -79,6 +79,7 @@ class VercelAILLMObsPlugin extends BaseLLMObsPlugin {
79
79
  *
80
80
  * We use the tool description as the next best identifier for a tool.
81
81
  *
82
+ * @param {string} toolName
82
83
  * @param {string} toolDescription
83
84
  * @returns {string | undefined}
84
85
  */
@@ -67,6 +67,12 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
67
67
  if (!error) {
68
68
  const metrics = this._extractMetrics(response)
69
69
  this._tagger.tagMetrics(span, metrics)
70
+
71
+ const responseModel = response.model
72
+ if (responseModel) {
73
+ // override the model name with the response model (more accurate)
74
+ this._tagger.tagModelName(span, responseModel)
75
+ }
70
76
  }
71
77
  }
72
78
 
@@ -85,11 +91,11 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
85
91
 
86
92
  if (tokenUsage) {
87
93
  // Responses API uses input_tokens, Chat/Completions use prompt_tokens
88
- const inputTokens = tokenUsage.input_tokens ?? tokenUsage.prompt_tokens
94
+ const inputTokens = tokenUsage.input_tokens ?? tokenUsage.prompt_tokens ?? 0
89
95
  if (inputTokens !== undefined) metrics.inputTokens = inputTokens
90
96
 
91
97
  // Responses API uses output_tokens, Chat/Completions use completion_tokens
92
- const outputTokens = tokenUsage.output_tokens ?? tokenUsage.completion_tokens
98
+ const outputTokens = tokenUsage.output_tokens ?? tokenUsage.completion_tokens ?? 0
93
99
  if (outputTokens !== undefined) metrics.outputTokens = outputTokens
94
100
 
95
101
  const totalTokens = tokenUsage.total_tokens || (inputTokens + outputTokens)
@@ -105,7 +111,7 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
105
111
  } else if (tokenUsage.prompt_tokens_details) {
106
112
  // Chat/Completions API - only include if > 0
107
113
  const cacheReadTokens = tokenUsage.prompt_tokens_details.cached_tokens
108
- if (cacheReadTokens) {
114
+ if (cacheReadTokens != null) {
109
115
  metrics.cacheReadTokens = cacheReadTokens
110
116
  }
111
117
  }
@@ -159,6 +165,16 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
159
165
  _tagChatCompletion (span, inputs, response, error) {
160
166
  const { messages, model, ...parameters } = inputs
161
167
 
168
+ const metadata = Object.entries(parameters).reduce((obj, [key, value]) => {
169
+ if (!['tools', 'functions'].includes(key)) {
170
+ obj[key] = value
171
+ }
172
+
173
+ return obj
174
+ }, {})
175
+
176
+ this._tagger.tagMetadata(span, metadata)
177
+
162
178
  if (error) {
163
179
  this._tagger.tagLLMIO(span, messages, [{ content: '' }])
164
180
  return
@@ -200,16 +216,6 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
200
216
  }
201
217
 
202
218
  this._tagger.tagLLMIO(span, messages, outputMessages)
203
-
204
- const metadata = Object.entries(parameters).reduce((obj, [key, value]) => {
205
- if (!['tools', 'functions'].includes(key)) {
206
- obj[key] = value
207
- }
208
-
209
- return obj
210
- }, {})
211
-
212
- this._tagger.tagMetadata(span, metadata)
213
219
  }
214
220
 
215
221
  #tagResponse (span, inputs, response, error) {
@@ -269,6 +275,15 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
269
275
  inputMessages.push({ role: 'user', content: input })
270
276
  }
271
277
 
278
+ const inputMetadata = Object.entries(parameters).reduce((obj, [key, value]) => {
279
+ if (allowedParamKeys.has(key)) {
280
+ obj[key] = value
281
+ }
282
+ return obj
283
+ }, {})
284
+
285
+ this._tagger.tagMetadata(span, inputMetadata)
286
+
272
287
  if (error) {
273
288
  this._tagger.tagLLMIO(span, inputMessages, [{ content: '' }])
274
289
  return
@@ -287,17 +302,13 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
287
302
  for (const item of response.output) {
288
303
  // Handle reasoning type (reasoning responses)
289
304
  if (item.type === 'reasoning') {
290
- // Extract reasoning text from summary
291
- let reasoningText = ''
292
- if (Array.isArray(item.summary) && item.summary.length > 0) {
293
- const summaryItem = item.summary[0]
294
- if (summaryItem.type === 'summary_text' && summaryItem.text) {
295
- reasoningText = summaryItem.text
296
- }
297
- }
298
305
  outputMessages.push({
299
306
  role: 'reasoning',
300
- content: reasoningText
307
+ content: JSON.stringify({
308
+ summary: item.summary ?? [],
309
+ encrypted_content: item.encrypted_content ?? null,
310
+ id: item.id ?? ''
311
+ })
301
312
  })
302
313
  } else if (item.type === 'function_call') {
303
314
  // Handle function_call type (responses API tool calls)
@@ -369,24 +380,19 @@ class OpenAiLLMObsPlugin extends LLMObsPlugin {
369
380
 
370
381
  this._tagger.tagLLMIO(span, inputMessages, outputMessages)
371
382
 
372
- const metadata = Object.entries(parameters).reduce((obj, [key, value]) => {
373
- if (allowedParamKeys.has(key)) {
374
- obj[key] = value
375
- }
376
- return obj
377
- }, {})
383
+ const outputMetadata = {}
378
384
 
379
385
  // Add fields from response object (convert numbers to floats)
380
- if (response.temperature !== undefined) metadata.temperature = Number(response.temperature)
381
- if (response.top_p !== undefined) metadata.top_p = Number(response.top_p)
382
- if (response.tool_choice !== undefined) metadata.tool_choice = response.tool_choice
383
- if (response.truncation !== undefined) metadata.truncation = response.truncation
384
- if (response.text !== undefined) metadata.text = response.text
386
+ if (response.temperature !== undefined) outputMetadata.temperature = Number(response.temperature)
387
+ if (response.top_p !== undefined) outputMetadata.top_p = Number(response.top_p)
388
+ if (response.tool_choice !== undefined) outputMetadata.tool_choice = response.tool_choice
389
+ if (response.truncation !== undefined) outputMetadata.truncation = response.truncation
390
+ if (response.text !== undefined) outputMetadata.text = response.text
385
391
  if (response.usage?.output_tokens_details?.reasoning_tokens !== undefined) {
386
- metadata.reasoning_tokens = response.usage.output_tokens_details.reasoning_tokens
392
+ outputMetadata.reasoning_tokens = response.usage.output_tokens_details.reasoning_tokens
387
393
  }
388
394
 
389
- this._tagger.tagMetadata(span, metadata)
395
+ this._tagger.tagMetadata(span, outputMetadata) // update the metadata with the output metadata
390
396
  }
391
397
  }
392
398
 
@@ -369,7 +369,7 @@ class LLMObs extends NoopLLMObs {
369
369
  err = 'invalid_metric_label'
370
370
  throw new Error('label must be the specified name of the evaluation metric')
371
371
  }
372
- if (!metricType || !['categorical', 'score'].includes(metricType)) {
372
+ if (!metricType || !['categorical', 'score', 'boolean'].includes(metricType)) {
373
373
  err = 'invalid_metric_type'
374
374
  throw new Error('metricType must be one of "categorical" or "score"')
375
375
  }
@@ -381,6 +381,10 @@ class LLMObs extends NoopLLMObs {
381
381
  err = 'invalid_metric_value'
382
382
  throw new Error('value must be a number for a score metric.')
383
383
  }
384
+ if (metricType === 'boolean' && typeof value !== 'boolean') {
385
+ err = 'invalid_metric_value'
386
+ throw new Error('value must be a boolean for a boolean metric')
387
+ }
384
388
 
385
389
  const evaluationTags = {
386
390
  'ddtrace.version': tracerVersion,
@@ -72,7 +72,7 @@ class LLMObsSpanProcessor {
72
72
  }
73
73
 
74
74
  // TODO: instead of relying on the tagger's weakmap registry, can we use some namespaced storage correlation?
75
- process ({ span }) {
75
+ process (span) {
76
76
  if (!this.#config.llmobs.enabled) return
77
77
  // if the span is not in our private tagger map, it is not an llmobs span
78
78
  if (!LLMObsTagger.tagMap.has(span)) return
@@ -155,7 +155,7 @@ class LLMObsSpanProcessor {
155
155
  llmObsSpan._tags = tags
156
156
 
157
157
  const processedSpan = this.#runProcessor(llmObsSpan)
158
- if (processedSpan === null) return null
158
+ if (processedSpan === undefined) return null
159
159
 
160
160
  if (processedSpan.input) {
161
161
  if (inputType === 'messages') {
@@ -269,7 +269,7 @@ class LLMObsSpanProcessor {
269
269
  /**
270
270
  * Runs the user span processor, emitting telemetry and adding some guardrails against invalid return types
271
271
  * @param {LLMObservabilitySpan} span
272
- * @returns {LLMObservabilitySpan | null}
272
+ * @returns {LLMObservabilitySpan | undefined}
273
273
  */
274
274
  #runProcessor (span) {
275
275
  const processor = this.#userSpanProcessor
@@ -279,12 +279,12 @@ class LLMObsSpanProcessor {
279
279
 
280
280
  try {
281
281
  const processedLLMObsSpan = processor(span)
282
- if (processedLLMObsSpan === null) return null
282
+ if (processedLLMObsSpan === null) return
283
283
 
284
284
  if (!(processedLLMObsSpan instanceof LLMObservabilitySpan)) {
285
285
  error = true
286
286
  logger.warn('User span processor must return an instance of an LLMObservabilitySpan or null, dropping span.')
287
- return null
287
+ return
288
288
  }
289
289
 
290
290
  return processedLLMObsSpan
@@ -85,7 +85,7 @@ class LLMObsTagger {
85
85
  if (name) this._setTag(span, NAME, name)
86
86
 
87
87
  this._setTag(span, SPAN_KIND, kind)
88
- if (modelName) this._setTag(span, MODEL_NAME, modelName)
88
+ if (modelName) this.tagModelName(span, modelName)
89
89
  if (modelProvider) this._setTag(span, MODEL_PROVIDER, modelProvider)
90
90
 
91
91
  sessionId = sessionId || registry.get(parent)?.[SESSION_ID]
@@ -194,6 +194,10 @@ class LLMObsTagger {
194
194
  this._setTag(span, SPAN_KIND, newKind)
195
195
  }
196
196
 
197
+ tagModelName (span, modelName) {
198
+ this._setTag(span, MODEL_NAME, modelName)
199
+ }
200
+
197
201
  #tagText (span, data, key) {
198
202
  if (data) {
199
203
  if (typeof data === 'string') {
@@ -324,7 +328,7 @@ class LLMObsTagger {
324
328
 
325
329
  for (const message of data) {
326
330
  if (typeof message === 'string') {
327
- messages.push({ content: message })
331
+ messages.push({ content: message, role: '' })
328
332
  continue
329
333
  }
330
334
  if (message == null || typeof message !== 'object') {
@@ -332,20 +336,30 @@ class LLMObsTagger {
332
336
  continue
333
337
  }
334
338
 
335
- const { content = '', role } = message
336
- const toolCalls = message.toolCalls
337
- const toolResults = message.toolResults
338
- const toolId = message.toolId
339
- const messageObj = { content }
339
+ const {
340
+ role = '',
341
+ content,
342
+ toolCalls,
343
+ toolResults,
344
+ toolId
345
+ } = message
346
+ const messageObj = {}
340
347
 
341
348
  let condition = this.#tagConditionalString(role, 'Message role', messageObj, 'role')
342
349
 
343
- const valid = typeof content === 'string'
344
- if (!valid) {
345
- this.#handleFailure('Message content must be a string.', 'invalid_io_messages')
350
+ if (
351
+ content == null &&
352
+ toolCalls == null &&
353
+ toolResults == null
354
+ ) {
355
+ messageObj.content = ''
356
+ }
357
+
358
+ if (content != null) {
359
+ condition = this.#tagConditionalString(content, 'Message content', messageObj, 'content') && condition
346
360
  }
347
361
 
348
- if (toolCalls) {
362
+ if (toolCalls != null) {
349
363
  const filteredToolCalls = this.#filterToolCalls(toolCalls)
350
364
 
351
365
  if (filteredToolCalls.length) {
@@ -353,7 +367,7 @@ class LLMObsTagger {
353
367
  }
354
368
  }
355
369
 
356
- if (toolResults) {
370
+ if (toolResults != null) {
357
371
  const filteredToolResults = this.#filterToolResults(toolResults)
358
372
 
359
373
  if (filteredToolResults.length) {
@@ -363,13 +377,13 @@ class LLMObsTagger {
363
377
 
364
378
  if (toolId) {
365
379
  if (role === 'tool') {
366
- condition = this.#tagConditionalString(toolId, 'Tool ID', messageObj, 'tool_id')
380
+ condition = this.#tagConditionalString(toolId, 'Tool ID', messageObj, 'tool_id') && condition
367
381
  } else {
368
382
  log.warn(`Tool ID for tool message not associated with a "tool" role, instead got "${role}"`)
369
383
  }
370
384
  }
371
385
 
372
- if (valid && condition) {
386
+ if (condition) {
373
387
  messages.push(messageObj)
374
388
  }
375
389
  }
@@ -380,7 +394,7 @@ class LLMObsTagger {
380
394
  }
381
395
 
382
396
  #tagConditionalString (data, type, carrier, key) {
383
- if (!data) return true
397
+ if (data == null) return true
384
398
  if (typeof data !== 'string') {
385
399
  this.#handleFailure(`"${type}" must be a string.`)
386
400
  return false
@@ -390,7 +404,7 @@ class LLMObsTagger {
390
404
  }
391
405
 
392
406
  #tagConditionalNumber (data, type, carrier, key) {
393
- if (!data) return true
407
+ if (data == null) return true
394
408
  if (typeof data !== 'number') {
395
409
  this.#handleFailure(`"${type}" must be a number.`)
396
410
  return false
@@ -400,7 +414,7 @@ class LLMObsTagger {
400
414
  }
401
415
 
402
416
  #tagConditionalObject (data, type, carrier, key) {
403
- if (!data) return true
417
+ if (data == null) return true
404
418
  if (typeof data !== 'object') {
405
419
  this.#handleFailure(`"${type}" must be an object.`)
406
420
  return false
@@ -7,7 +7,7 @@ const DEFAULT_MIN_SIZE = 2 * 1024 * 1024 // 2MB
7
7
  * interfaces so that it can be used seamlessly by any encoder code that expects
8
8
  * either.
9
9
  */
10
- class Chunk {
10
+ class MsgpackChunk {
11
11
  constructor (minSize = DEFAULT_MIN_SIZE) {
12
12
  this.buffer = Buffer.allocUnsafe(minSize)
13
13
  this.view = new DataView(this.buffer.buffer)
@@ -66,4 +66,4 @@ class Chunk {
66
66
  }
67
67
  }
68
68
 
69
- module.exports = Chunk
69
+ module.exports = MsgpackChunk
@@ -1,11 +1,10 @@
1
1
  'use strict'
2
2
 
3
- const Chunk = require('./chunk')
3
+ const MsgpackChunk = require('./chunk')
4
4
 
5
5
  class MsgpackEncoder {
6
6
  encode (value) {
7
- const bytes = new Chunk()
8
-
7
+ const bytes = new MsgpackChunk()
9
8
  this.encodeValue(bytes, value)
10
9
 
11
10
  return bytes.buffer.subarray(0, bytes.length)
@@ -1,6 +1,6 @@
1
1
  'use strict'
2
2
 
3
- const Chunk = require('./chunk')
3
+ const MsgpackChunk = require('./chunk')
4
4
  const { MsgpackEncoder } = require('./encoder')
5
5
 
6
- module.exports = { Chunk, MsgpackEncoder }
6
+ module.exports = { MsgpackChunk, MsgpackEncoder }
@@ -15,15 +15,17 @@ class FlaggingProvider extends DatadogNodeServerProvider {
15
15
  * @param {import('../config')} config - Tracer configuration object
16
16
  */
17
17
  constructor (tracer, config) {
18
- // Call parent constructor with required options
18
+ // Call parent constructor with required options and timeout
19
19
  super({
20
- exposureChannel: channel(EXPOSURE_CHANNEL)
20
+ exposureChannel: channel(EXPOSURE_CHANNEL),
21
+ initializationTimeoutMs: config.experimental.flaggingProvider.initializationTimeoutMs
21
22
  })
22
23
 
23
24
  this._tracer = tracer
24
25
  this._config = config
25
26
 
26
- log.debug(this.constructor.name + ' created')
27
+ log.debug(this.constructor.name + ' created with timeout: ' +
28
+ config.experimental.flaggingProvider.initializationTimeoutMs + 'ms')
27
29
  }
28
30
 
29
31
  /**
@@ -7,7 +7,7 @@ const os = require('os')
7
7
  */
8
8
 
9
9
  /**
10
- * @fileoverview OpenTelemetry Logs Implementation for dd-trace-js
10
+ * OpenTelemetry Logs Implementation for `dd-trace-js`
11
11
  *
12
12
  * This package provides a custom OpenTelemetry Logs implementation that integrates
13
13
  * with the Datadog tracing library. It includes all necessary components for