dd-trace 5.33.0 → 5.33.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "dd-trace",
3
- "version": "5.33.0",
3
+ "version": "5.33.1",
4
4
  "description": "Datadog APM tracing client for JavaScript",
5
5
  "main": "index.js",
6
6
  "typings": "index.d.ts",
@@ -86,7 +86,7 @@
86
86
  "@datadog/native-iast-rewriter": "2.6.1",
87
87
  "@datadog/native-iast-taint-tracking": "3.2.0",
88
88
  "@datadog/native-metrics": "^3.1.0",
89
- "@datadog/pprof": "5.5.0",
89
+ "@datadog/pprof": "5.5.1",
90
90
  "@datadog/sketches-js": "^2.1.0",
91
91
  "@isaacs/ttlcache": "^1.4.1",
92
92
  "@opentelemetry/api": ">=1.0.0 <1.9.0",
@@ -36,9 +36,17 @@ class AmqplibProducerPlugin extends ProducerPlugin {
36
36
  if (this.config.dsmEnabled) {
37
37
  const hasRoutingKey = fields.routingKey != null
38
38
  const payloadSize = getAmqpMessageSize({ content: message, headers: fields.headers })
39
+
40
+ // there are two ways to send messages in RabbitMQ:
41
+ // 1. using an exchange and a routing key in which DSM connects via the exchange
42
+ // 2. using an unnamed exchange and a routing key in which DSM connects via the topic
43
+ const exchangeOrTopicTag = hasRoutingKey && !fields.exchange
44
+ ? `topic:${fields.routingKey}`
45
+ : `exchange:${fields.exchange}`
46
+
39
47
  const dataStreamsContext = this.tracer
40
48
  .setCheckpoint(
41
- ['direction:out', `exchange:${fields.exchange}`, `has_routing_key:${hasRoutingKey}`, 'type:rabbitmq']
49
+ ['direction:out', exchangeOrTopicTag, `has_routing_key:${hasRoutingKey}`, 'type:rabbitmq']
42
50
  , span, payloadSize)
43
51
  DsmPathwayCodec.encode(dataStreamsContext, fields.headers)
44
52
  }
@@ -1,16 +1,13 @@
1
1
  'use strict'
2
2
 
3
- const Sampler = require('../../../dd-trace/src/sampler')
3
+ const makeUtilities = require('../../../dd-trace/src/plugins/util/llm')
4
4
 
5
- const RE_NEWLINE = /\n/g
6
- const RE_TAB = /\t/g
7
-
8
- // TODO: should probably refactor the OpenAI integration to use a shared LLMTracingPlugin base class
9
- // This logic isn't particular to LangChain
10
5
  class LangChainHandler {
11
- constructor (config) {
12
- this.config = config
13
- this.sampler = new Sampler(config.spanPromptCompletionSampleRate)
6
+ constructor (tracerConfig) {
7
+ const utilities = makeUtilities('langchain', tracerConfig)
8
+
9
+ this.normalize = utilities.normalize
10
+ this.isPromptCompletionSampled = utilities.isPromptCompletionSampled
14
11
  }
15
12
 
16
13
  // no-op for default handler
@@ -27,27 +24,6 @@ class LangChainHandler {
27
24
 
28
25
  // no-op for default handler
29
26
  extractModel (instance) {}
30
-
31
- normalize (text) {
32
- if (!text) return
33
- if (typeof text !== 'string' || !text || (typeof text === 'string' && text.length === 0)) return
34
-
35
- const max = this.config.spanCharLimit
36
-
37
- text = text
38
- .replace(RE_NEWLINE, '\\n')
39
- .replace(RE_TAB, '\\t')
40
-
41
- if (text.length > max) {
42
- return text.substring(0, max) + '...'
43
- }
44
-
45
- return text
46
- }
47
-
48
- isPromptCompletionSampled () {
49
- return this.sampler.isSampled()
50
- }
51
27
  }
52
28
 
53
29
  module.exports = LangChainHandler
@@ -26,13 +26,12 @@ class LangChainTracingPlugin extends TracingPlugin {
26
26
  constructor () {
27
27
  super(...arguments)
28
28
 
29
- const langchainConfig = this._tracerConfig.langchain || {}
30
29
  this.handlers = {
31
- chain: new LangChainChainHandler(langchainConfig),
32
- chat_model: new LangChainChatModelHandler(langchainConfig),
33
- llm: new LangChainLLMHandler(langchainConfig),
34
- embedding: new LangChainEmbeddingHandler(langchainConfig),
35
- default: new LangChainHandler(langchainConfig)
30
+ chain: new LangChainChainHandler(this._tracerConfig),
31
+ chat_model: new LangChainChatModelHandler(this._tracerConfig),
32
+ llm: new LangChainLLMHandler(this._tracerConfig),
33
+ embedding: new LangChainEmbeddingHandler(this._tracerConfig),
34
+ default: new LangChainHandler(this._tracerConfig)
36
35
  }
37
36
  }
38
37
 
@@ -9,12 +9,9 @@ const Sampler = require('../../dd-trace/src/sampler')
9
9
  const { MEASURED } = require('../../../ext/tags')
10
10
  const { estimateTokens } = require('./token-estimator')
11
11
 
12
- // String#replaceAll unavailable on Node.js@v14 (dd-trace@<=v3)
13
- const RE_NEWLINE = /\n/g
14
- const RE_TAB = /\t/g
12
+ const makeUtilities = require('../../dd-trace/src/plugins/util/llm')
15
13
 
16
- // TODO: In the future we should refactor config.js to make it requirable
17
- let MAX_TEXT_LEN = 128
14
+ let normalize
18
15
 
19
16
  function safeRequire (path) {
20
17
  try {
@@ -44,9 +41,11 @@ class OpenAiTracingPlugin extends TracingPlugin {
44
41
 
45
42
  this.sampler = new Sampler(0.1) // default 10% log sampling
46
43
 
47
- // hoist the max length env var to avoid making all of these functions a class method
44
+ // hoist the normalize function to avoid making all of these functions a class method
48
45
  if (this._tracerConfig) {
49
- MAX_TEXT_LEN = this._tracerConfig.openaiSpanCharLimit
46
+ const utilities = makeUtilities('openai', this._tracerConfig)
47
+
48
+ normalize = utilities.normalize
50
49
  }
51
50
  }
52
51
 
@@ -116,7 +115,7 @@ class OpenAiTracingPlugin extends TracingPlugin {
116
115
  // createEdit, createEmbedding, createModeration
117
116
  if (payload.input) {
118
117
  const normalized = normalizeStringOrTokenArray(payload.input, false)
119
- tags['openai.request.input'] = truncateText(normalized)
118
+ tags['openai.request.input'] = normalize(normalized)
120
119
  openaiStore.input = normalized
121
120
  }
122
121
 
@@ -594,7 +593,7 @@ function commonImageResponseExtraction (tags, body) {
594
593
  for (let i = 0; i < body.data.length; i++) {
595
594
  const image = body.data[i]
596
595
  // exactly one of these two options is provided
597
- tags[`openai.response.images.${i}.url`] = truncateText(image.url)
596
+ tags[`openai.response.images.${i}.url`] = normalize(image.url)
598
597
  tags[`openai.response.images.${i}.b64_json`] = image.b64_json && 'returned'
599
598
  }
600
599
  }
@@ -731,14 +730,14 @@ function commonCreateResponseExtraction (tags, body, openaiStore, methodName) {
731
730
 
732
731
  tags[`openai.response.choices.${choiceIdx}.finish_reason`] = choice.finish_reason
733
732
  tags[`openai.response.choices.${choiceIdx}.logprobs`] = specifiesLogProb ? 'returned' : undefined
734
- tags[`openai.response.choices.${choiceIdx}.text`] = truncateText(choice.text)
733
+ tags[`openai.response.choices.${choiceIdx}.text`] = normalize(choice.text)
735
734
 
736
735
  // createChatCompletion only
737
736
  const message = choice.message || choice.delta // delta for streamed responses
738
737
  if (message) {
739
738
  tags[`openai.response.choices.${choiceIdx}.message.role`] = message.role
740
- tags[`openai.response.choices.${choiceIdx}.message.content`] = truncateText(message.content)
741
- tags[`openai.response.choices.${choiceIdx}.message.name`] = truncateText(message.name)
739
+ tags[`openai.response.choices.${choiceIdx}.message.content`] = normalize(message.content)
740
+ tags[`openai.response.choices.${choiceIdx}.message.name`] = normalize(message.name)
742
741
  if (message.tool_calls) {
743
742
  const toolCalls = message.tool_calls
744
743
  for (let toolIdx = 0; toolIdx < toolCalls.length; toolIdx++) {
@@ -795,24 +794,6 @@ function truncateApiKey (apiKey) {
795
794
  return apiKey && `sk-...${apiKey.substr(apiKey.length - 4)}`
796
795
  }
797
796
 
798
- /**
799
- * for cleaning up prompt and response
800
- */
801
- function truncateText (text) {
802
- if (!text) return
803
- if (typeof text !== 'string' || !text || (typeof text === 'string' && text.length === 0)) return
804
-
805
- text = text
806
- .replace(RE_NEWLINE, '\\n')
807
- .replace(RE_TAB, '\\t')
808
-
809
- if (text.length > MAX_TEXT_LEN) {
810
- return text.substring(0, MAX_TEXT_LEN) + '...'
811
- }
812
-
813
- return text
814
- }
815
-
816
797
  function tagChatCompletionRequestContent (contents, messageIdx, tags) {
817
798
  if (typeof contents === 'string') {
818
799
  tags[`openai.request.messages.${messageIdx}.content`] = contents
@@ -824,10 +805,10 @@ function tagChatCompletionRequestContent (contents, messageIdx, tags) {
824
805
  const type = content.type
825
806
  tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.type`] = content.type
826
807
  if (type === 'text') {
827
- tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.text`] = truncateText(content.text)
808
+ tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.text`] = normalize(content.text)
828
809
  } else if (type === 'image_url') {
829
810
  tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.image_url.url`] =
830
- truncateText(content.image_url.url)
811
+ normalize(content.image_url.url)
831
812
  }
832
813
  // unsupported type otherwise, won't be tagged
833
814
  }
@@ -1004,7 +985,7 @@ function normalizeStringOrTokenArray (input, truncate) {
1004
985
  const normalized = Array.isArray(input)
1005
986
  ? `[${input.join(', ')}]` // "[1, 2, 999]"
1006
987
  : input // "foo"
1007
- return truncate ? truncateText(normalized) : normalized
988
+ return truncate ? normalize(normalized) : normalized
1008
989
  }
1009
990
 
1010
991
  function defensiveArrayLength (maybeArray) {
@@ -522,7 +522,7 @@ class Config {
522
522
  this._setValue(defaults, 'inferredProxyServicesEnabled', false)
523
523
  this._setValue(defaults, 'memcachedCommandEnabled', false)
524
524
  this._setValue(defaults, 'openAiLogsEnabled', false)
525
- this._setValue(defaults, 'openaiSpanCharLimit', 128)
525
+ this._setValue(defaults, 'openai.spanCharLimit', 128)
526
526
  this._setValue(defaults, 'peerServiceMapping', {})
527
527
  this._setValue(defaults, 'plugins', true)
528
528
  this._setValue(defaults, 'port', '8126')
@@ -805,7 +805,7 @@ class Config {
805
805
  // Requires an accompanying DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND=true in the agent
806
806
  this._setBoolean(env, 'memcachedCommandEnabled', DD_TRACE_MEMCACHED_COMMAND_ENABLED)
807
807
  this._setBoolean(env, 'openAiLogsEnabled', DD_OPENAI_LOGS_ENABLED)
808
- this._setValue(env, 'openaiSpanCharLimit', maybeInt(DD_OPENAI_SPAN_CHAR_LIMIT))
808
+ this._setValue(env, 'openai.spanCharLimit', maybeInt(DD_OPENAI_SPAN_CHAR_LIMIT))
809
809
  this._envUnprocessed.openaiSpanCharLimit = DD_OPENAI_SPAN_CHAR_LIMIT
810
810
  if (DD_TRACE_PEER_SERVICE_MAPPING) {
811
811
  this._setValue(env, 'peerServiceMapping', fromEntries(
@@ -0,0 +1,35 @@
1
+ const Sampler = require('../../sampler')
2
+
3
+ const RE_NEWLINE = /\n/g
4
+ const RE_TAB = /\t/g
5
+
6
+ function normalize (text, limit = 128) {
7
+ if (!text) return
8
+ if (typeof text !== 'string' || !text || (typeof text === 'string' && text.length === 0)) return
9
+
10
+ text = text
11
+ .replace(RE_NEWLINE, '\\n')
12
+ .replace(RE_TAB, '\\t')
13
+
14
+ if (text.length > limit) {
15
+ return text.substring(0, limit) + '...'
16
+ }
17
+
18
+ return text
19
+ }
20
+
21
+ function isPromptCompletionSampled (sampler) {
22
+ return sampler.isSampled()
23
+ }
24
+
25
+ module.exports = function (integrationName, tracerConfig) {
26
+ const integrationConfig = tracerConfig[integrationName] || {}
27
+ const { spanCharLimit, spanPromptCompletionSampleRate } = integrationConfig
28
+
29
+ const sampler = new Sampler(spanPromptCompletionSampleRate ?? 1.0)
30
+
31
+ return {
32
+ normalize: str => normalize(str, spanCharLimit),
33
+ isPromptCompletionSampled: () => isPromptCompletionSampled(sampler)
34
+ }
35
+ }