ddtrace 3.11.0rc2__cp38-cp38-win_amd64.whl → 3.11.0rc3__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. ddtrace/_trace/sampling_rule.py +25 -33
  2. ddtrace/_trace/trace_handlers.py +9 -49
  3. ddtrace/_trace/utils_botocore/span_tags.py +48 -0
  4. ddtrace/_version.py +2 -2
  5. ddtrace/appsec/_constants.py +7 -0
  6. ddtrace/appsec/_handlers.py +11 -0
  7. ddtrace/appsec/_processor.py +1 -1
  8. ddtrace/contrib/internal/aiobotocore/patch.py +8 -0
  9. ddtrace/contrib/internal/boto/patch.py +14 -0
  10. ddtrace/contrib/internal/botocore/services/bedrock.py +3 -27
  11. ddtrace/contrib/internal/django/patch.py +31 -8
  12. ddtrace/contrib/internal/google_genai/_utils.py +2 -2
  13. ddtrace/contrib/internal/google_genai/patch.py +7 -7
  14. ddtrace/contrib/internal/google_generativeai/patch.py +7 -5
  15. ddtrace/contrib/internal/openai_agents/patch.py +44 -1
  16. ddtrace/contrib/internal/pytest/_plugin_v2.py +1 -1
  17. ddtrace/contrib/internal/vertexai/patch.py +7 -5
  18. ddtrace/ext/ci.py +20 -0
  19. ddtrace/ext/git.py +66 -11
  20. ddtrace/internal/_encoding.cp38-win_amd64.pyd +0 -0
  21. ddtrace/internal/_rand.cp38-win_amd64.pyd +0 -0
  22. ddtrace/internal/_tagset.cp38-win_amd64.pyd +0 -0
  23. ddtrace/internal/_threads.cp38-win_amd64.pyd +0 -0
  24. ddtrace/internal/ci_visibility/encoder.py +126 -55
  25. ddtrace/internal/datadog/profiling/dd_wrapper-unknown-amd64.dll +0 -0
  26. ddtrace/internal/datadog/profiling/ddup/_ddup.cp38-win_amd64.pyd +0 -0
  27. ddtrace/internal/datadog/profiling/ddup/dd_wrapper-unknown-amd64.dll +0 -0
  28. ddtrace/internal/endpoints.py +76 -0
  29. ddtrace/internal/native/_native.cp38-win_amd64.pyd +0 -0
  30. ddtrace/internal/schema/processor.py +6 -2
  31. ddtrace/internal/telemetry/metrics_namespaces.cp38-win_amd64.pyd +0 -0
  32. ddtrace/internal/telemetry/writer.py +18 -0
  33. ddtrace/llmobs/_constants.py +1 -0
  34. ddtrace/llmobs/_experiment.py +6 -0
  35. ddtrace/llmobs/_integrations/crewai.py +52 -3
  36. ddtrace/llmobs/_integrations/gemini.py +7 -7
  37. ddtrace/llmobs/_integrations/google_genai.py +10 -10
  38. ddtrace/llmobs/_integrations/{google_genai_utils.py → google_utils.py} +103 -7
  39. ddtrace/llmobs/_integrations/openai_agents.py +145 -0
  40. ddtrace/llmobs/_integrations/pydantic_ai.py +67 -26
  41. ddtrace/llmobs/_integrations/utils.py +68 -158
  42. ddtrace/llmobs/_integrations/vertexai.py +8 -8
  43. ddtrace/llmobs/_llmobs.py +5 -1
  44. ddtrace/llmobs/_utils.py +21 -0
  45. ddtrace/profiling/_threading.cp38-win_amd64.pyd +0 -0
  46. ddtrace/profiling/collector/_memalloc.cp38-win_amd64.pyd +0 -0
  47. ddtrace/profiling/collector/_task.cp38-win_amd64.pyd +0 -0
  48. ddtrace/profiling/collector/_traceback.cp38-win_amd64.pyd +0 -0
  49. ddtrace/profiling/collector/stack.cp38-win_amd64.pyd +0 -0
  50. ddtrace/settings/asm.py +9 -2
  51. ddtrace/vendor/psutil/_psutil_windows.cp38-win_amd64.pyd +0 -0
  52. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/METADATA +1 -1
  53. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/RECORD +60 -59
  54. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE +0 -0
  55. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE.Apache +0 -0
  56. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE.BSD3 +0 -0
  57. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/NOTICE +0 -0
  58. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/WHEEL +0 -0
  59. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/entry_points.txt +0 -0
  60. {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/top_level.txt +0 -0
@@ -2,9 +2,11 @@ from typing import Any
2
2
  from typing import Dict
3
3
  from typing import List
4
4
  from typing import Optional
5
+ from typing import Tuple
5
6
 
6
7
  from ddtrace.internal.utils import get_argument_value
7
8
  from ddtrace.internal.utils.formats import format_trace_id
9
+ from ddtrace.llmobs._constants import AGENT_MANIFEST
8
10
  from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY
9
11
  from ddtrace.llmobs._constants import INPUT_VALUE
10
12
  from ddtrace.llmobs._constants import METADATA
@@ -45,13 +47,19 @@ class PydanticAIIntegration(BaseLLMIntegration):
45
47
  span._set_ctx_item(SPAN_KIND, kind)
46
48
  return span
47
49
 
48
- def _set_base_span_tags(self, span: Span, model: Optional[str] = None, **kwargs) -> None:
50
+ def _set_base_span_tags(self, span: Span, model: Optional[Any] = None, **kwargs) -> None:
49
51
  if model:
50
- span.set_tag("pydantic_ai.request.model", getattr(model, "model_name", ""))
51
- system = getattr(model, "system", None)
52
- if system:
53
- system = PYDANTIC_AI_SYSTEM_TO_PROVIDER.get(system, system)
54
- span.set_tag("pydantic_ai.request.provider", system)
52
+ model_name, provider = self._get_model_and_provider(model)
53
+ span.set_tag("pydantic_ai.request.model", model_name)
54
+ if provider:
55
+ span.set_tag("pydantic_ai.request.provider", provider)
56
+
57
+ def _get_model_and_provider(self, model: Optional[Any]) -> Tuple[str, str]:
58
+ model_name = getattr(model, "model_name", "")
59
+ system = getattr(model, "system", None)
60
+ if system:
61
+ system = PYDANTIC_AI_SYSTEM_TO_PROVIDER.get(system, system)
62
+ return model_name, system
55
63
 
56
64
  def _llmobs_set_tags(
57
65
  self,
@@ -84,26 +92,8 @@ class PydanticAIIntegration(BaseLLMIntegration):
84
92
  from pydantic_ai.agent import AgentRun
85
93
 
86
94
  agent_instance = kwargs.get("instance", None)
87
- if agent_instance:
88
- agent_name = getattr(agent_instance, "name", None)
89
- agent_instructions = getattr(agent_instance, "_instructions", None)
90
- agent_system_prompts = getattr(agent_instance, "_system_prompts", None)
91
- agent_tools = list(getattr(agent_instance, "_function_tools", {}).keys())
92
- agent_model_settings = getattr(agent_instance, "model_settings", None)
93
- metadata = {
94
- "instructions": agent_instructions,
95
- "system_prompts": agent_system_prompts,
96
- "tools": agent_tools,
97
- }
98
- if agent_model_settings:
99
- metadata["max_tokens"] = agent_model_settings.get("max_tokens", None)
100
- metadata["temperature"] = agent_model_settings.get("temperature", None)
101
- span._set_ctx_items(
102
- {
103
- NAME: agent_name or "PydanticAI Agent",
104
- METADATA: metadata,
105
- }
106
- )
95
+ agent_name = getattr(agent_instance, "name", None)
96
+ self._tag_agent_manifest(span, kwargs, agent_instance)
107
97
  user_prompt = get_argument_value(args, kwargs, 0, "user_prompt")
108
98
  result = response
109
99
  if isinstance(result, AgentRun) and hasattr(result, "result"):
@@ -119,6 +109,7 @@ class PydanticAIIntegration(BaseLLMIntegration):
119
109
  metrics = self.extract_usage_metrics(response, kwargs)
120
110
  span._set_ctx_items(
121
111
  {
112
+ NAME: agent_name or "PydanticAI Agent",
122
113
  INPUT_VALUE: user_prompt,
123
114
  OUTPUT_VALUE: result,
124
115
  METRICS: metrics,
@@ -145,6 +136,56 @@ class PydanticAIIntegration(BaseLLMIntegration):
145
136
  if not span.error:
146
137
  span._set_ctx_item(OUTPUT_VALUE, getattr(response, "content", ""))
147
138
 
139
+ def _tag_agent_manifest(self, span: Span, kwargs: Dict[str, Any], agent: Any) -> None:
140
+ if not agent:
141
+ return
142
+
143
+ manifest: Dict[str, Any] = {}
144
+ manifest["framework"] = "PydanticAI"
145
+ manifest["name"] = agent.name if hasattr(agent, "name") and agent.name else "PydanticAI Agent"
146
+ model = getattr(agent, "model", None)
147
+ if model:
148
+ model_name, _ = self._get_model_and_provider(model)
149
+ if model_name:
150
+ manifest["model"] = model_name
151
+ if hasattr(agent, "model_settings"):
152
+ manifest["model_settings"] = agent.model_settings
153
+ if hasattr(agent, "_instructions"):
154
+ manifest["instructions"] = agent._instructions
155
+ if hasattr(agent, "_system_prompts"):
156
+ manifest["system_prompts"] = agent._system_prompts
157
+ if hasattr(agent, "_function_tools"):
158
+ manifest["tools"] = self._get_agent_tools(agent._function_tools)
159
+ if kwargs.get("deps", None):
160
+ agent_dependencies = kwargs.get("deps", None)
161
+ manifest["dependencies"] = getattr(agent_dependencies, "__dict__", agent_dependencies)
162
+
163
+ span._set_ctx_item(AGENT_MANIFEST, manifest)
164
+
165
+ def _get_agent_tools(self, tools: Any) -> List[Dict[str, Any]]:
166
+ if not tools:
167
+ return []
168
+ formatted_tools = []
169
+ for tool_name, tool_instance in tools.items():
170
+ tool_dict = {}
171
+ tool_dict["name"] = tool_name
172
+ if hasattr(tool_instance, "description"):
173
+ tool_dict["description"] = tool_instance.description
174
+ function_schema = getattr(tool_instance, "function_schema", {})
175
+ json_schema = getattr(function_schema, "json_schema", {})
176
+ required_params = {param: True for param in json_schema.get("required", [])}
177
+ parameters = {}
178
+ for param, schema in json_schema.get("properties", {}).items():
179
+ param_dict = {}
180
+ if "type" in schema:
181
+ param_dict["type"] = schema["type"]
182
+ if param in required_params:
183
+ param_dict["required"] = True
184
+ parameters[param] = param_dict
185
+ tool_dict["parameters"] = parameters
186
+ formatted_tools.append(tool_dict)
187
+ return formatted_tools
188
+
148
189
  def extract_usage_metrics(self, response: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
149
190
  response = kwargs.get("streamed_run_result", None) or response
150
191
  usage = None
@@ -1,6 +1,4 @@
1
- from dataclasses import asdict
2
1
  from dataclasses import dataclass
3
- from dataclasses import is_dataclass
4
2
  import json
5
3
  import re
6
4
  from typing import Any
@@ -19,7 +17,6 @@ from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL_OUTPUT_USED
19
17
  from ddtrace.llmobs._constants import INPUT_MESSAGES
20
18
  from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY
21
19
  from ddtrace.llmobs._constants import INPUT_VALUE
22
- from ddtrace.llmobs._constants import LITELLM_ROUTER_INSTANCE_KEY
23
20
  from ddtrace.llmobs._constants import METADATA
24
21
  from ddtrace.llmobs._constants import OAI_HANDOFF_TOOL_ARG
25
22
  from ddtrace.llmobs._constants import OUTPUT_MESSAGES
@@ -27,6 +24,7 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
27
24
  from ddtrace.llmobs._constants import OUTPUT_VALUE
28
25
  from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
29
26
  from ddtrace.llmobs._utils import _get_attr
27
+ from ddtrace.llmobs._utils import load_data_value
30
28
  from ddtrace.llmobs._utils import safe_json
31
29
 
32
30
 
@@ -39,31 +37,71 @@ except ModuleNotFoundError:
39
37
 
40
38
  logger = get_logger(__name__)
41
39
 
42
- OPENAI_SKIPPED_COMPLETION_TAGS = (
43
- "model",
44
- "prompt",
45
- "api_key",
46
- "user_api_key",
47
- "user_api_key_hash",
48
- LITELLM_ROUTER_INSTANCE_KEY,
40
+ COMMON_METADATA_KEYS = (
41
+ "stream",
42
+ "temperature",
43
+ "top_p",
44
+ "user",
49
45
  )
50
- OPENAI_SKIPPED_CHAT_TAGS = (
51
- "model",
52
- "messages",
46
+ OPENAI_METADATA_RESPONSE_KEYS = (
47
+ "background",
48
+ "include",
49
+ "max_output_tokens",
50
+ "max_tool_calls",
51
+ "parallel_tool_calls",
52
+ "previous_response_id",
53
+ "prompt",
54
+ "reasoning",
55
+ "service_tier",
56
+ "store",
57
+ "text",
58
+ "tool_choice",
53
59
  "tools",
54
- "functions",
55
- "api_key",
56
- "user_api_key",
57
- "user_api_key_hash",
58
- LITELLM_ROUTER_INSTANCE_KEY,
60
+ "top_logprobs",
61
+ "truncation",
62
+ )
63
+ OPENAI_METADATA_CHAT_KEYS = (
64
+ "audio",
65
+ "frequency_penalty",
66
+ "function_call",
67
+ "logit_bias",
68
+ "logprobs",
69
+ "max_completion_tokens",
70
+ "max_tokens",
71
+ "modalities",
72
+ "n",
73
+ "parallel_tool_calls",
74
+ "prediction",
75
+ "presence_penalty",
76
+ "reasoning_effort",
77
+ "response_format",
78
+ "seed",
79
+ "service_tier",
80
+ "stop",
81
+ "store",
82
+ "stream_options",
83
+ "tool_choice",
84
+ "top_logprobs",
85
+ "web_search_options",
86
+ )
87
+ OPENAI_METADATA_COMPLETION_KEYS = (
88
+ "best_of",
89
+ "echo",
90
+ "frequency_penalty",
91
+ "logit_bias",
92
+ "logprobs",
93
+ "max_tokens",
94
+ "n",
95
+ "presence_penalty",
96
+ "seed",
97
+ "stop",
98
+ "stream_options",
99
+ "suffix",
59
100
  )
60
101
 
61
102
  LITELLM_METADATA_CHAT_KEYS = (
62
103
  "timeout",
63
- "temperature",
64
- "top_p",
65
104
  "n",
66
- "stream",
67
105
  "stream_options",
68
106
  "stop",
69
107
  "max_completion_tokens",
@@ -73,7 +111,6 @@ LITELLM_METADATA_CHAT_KEYS = (
73
111
  "presence_penalty",
74
112
  "frequency_penalty",
75
113
  "logit_bias",
76
- "user",
77
114
  "response_format",
78
115
  "seed",
79
116
  "tool_choice",
@@ -97,12 +134,8 @@ LITELLM_METADATA_COMPLETION_KEYS = (
97
134
  "n",
98
135
  "presence_penalty",
99
136
  "stop",
100
- "stream",
101
137
  "stream_options",
102
138
  "suffix",
103
- "temperature",
104
- "top_p",
105
- "user",
106
139
  "api_base",
107
140
  "api_version",
108
141
  "model_list",
@@ -110,67 +143,6 @@ LITELLM_METADATA_COMPLETION_KEYS = (
110
143
  )
111
144
 
112
145
 
113
- def extract_model_name_google(instance, model_name_attr):
114
- """Extract the model name from the instance.
115
- Model names are stored in the format `"models/{model_name}"`
116
- so we do our best to return the model name instead of the full string.
117
- """
118
- model_name = _get_attr(instance, model_name_attr, "")
119
- if not model_name or not isinstance(model_name, str):
120
- return ""
121
- if "/" in model_name:
122
- return model_name.split("/")[-1]
123
- return model_name
124
-
125
-
126
- def get_generation_config_google(instance, kwargs):
127
- """
128
- The generation config can be defined on the model instance or
129
- as a kwarg of the request. Therefore, try to extract this information
130
- from the kwargs and otherwise default to checking the model instance attribute.
131
- """
132
- generation_config = kwargs.get("generation_config", {})
133
- return generation_config or _get_attr(instance, "_generation_config", {})
134
-
135
-
136
- def llmobs_get_metadata_google(kwargs, instance):
137
- metadata = {}
138
- model_config = getattr(instance, "_generation_config", {}) or {}
139
- model_config = model_config.to_dict() if hasattr(model_config, "to_dict") else model_config
140
- request_config = kwargs.get("generation_config", {}) or {}
141
- request_config = request_config.to_dict() if hasattr(request_config, "to_dict") else request_config
142
-
143
- parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k")
144
- for param in parameters:
145
- model_config_value = _get_attr(model_config, param, None)
146
- request_config_value = _get_attr(request_config, param, None)
147
- if model_config_value or request_config_value:
148
- metadata[param] = request_config_value or model_config_value
149
- return metadata
150
-
151
-
152
- def extract_message_from_part_google(part, role=None):
153
- text = _get_attr(part, "text", "")
154
- function_call = _get_attr(part, "function_call", None)
155
- function_response = _get_attr(part, "function_response", None)
156
- message = {"content": text}
157
- if role:
158
- message["role"] = role
159
- if function_call:
160
- function_call_dict = function_call
161
- if not isinstance(function_call, dict):
162
- function_call_dict = type(function_call).to_dict(function_call)
163
- message["tool_calls"] = [
164
- {"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})}
165
- ]
166
- if function_response:
167
- function_response_dict = function_response
168
- if not isinstance(function_response, dict):
169
- function_response_dict = type(function_response).to_dict(function_response)
170
- message["content"] = "[tool result: {}]".format(function_response_dict.get("response", ""))
171
- return message
172
-
173
-
174
146
  def get_llmobs_metrics_tags(integration_name, span):
175
147
  usage = {}
176
148
 
@@ -209,41 +181,6 @@ def parse_llmobs_metric_args(metrics):
209
181
  return usage
210
182
 
211
183
 
212
- def get_system_instructions_from_google_model(model_instance):
213
- """
214
- Extract system instructions from model and convert to []str for tagging.
215
- """
216
- try:
217
- from google.ai.generativelanguage_v1beta.types.content import Content
218
- except ImportError:
219
- Content = None
220
- try:
221
- from vertexai.generative_models._generative_models import Part
222
- except ImportError:
223
- Part = None
224
-
225
- raw_system_instructions = getattr(model_instance, "_system_instruction", [])
226
- if Content is not None and isinstance(raw_system_instructions, Content):
227
- system_instructions = []
228
- for part in raw_system_instructions.parts:
229
- system_instructions.append(_get_attr(part, "text", ""))
230
- return system_instructions
231
- elif isinstance(raw_system_instructions, str):
232
- return [raw_system_instructions]
233
- elif Part is not None and isinstance(raw_system_instructions, Part):
234
- return [_get_attr(raw_system_instructions, "text", "")]
235
- elif not isinstance(raw_system_instructions, list):
236
- return []
237
-
238
- system_instructions = []
239
- for elem in raw_system_instructions:
240
- if isinstance(elem, str):
241
- system_instructions.append(elem)
242
- elif Part is not None and isinstance(elem, Part):
243
- system_instructions.append(_get_attr(elem, "text", ""))
244
- return system_instructions
245
-
246
-
247
184
  LANGCHAIN_ROLE_MAPPING = {
248
185
  "human": "user",
249
186
  "ai": "assistant",
@@ -471,12 +408,12 @@ def get_metadata_from_kwargs(
471
408
  kwargs: Dict[str, Any], integration_name: str = "openai", operation: str = "chat"
472
409
  ) -> Dict[str, Any]:
473
410
  metadata = {}
411
+ keys_to_include: Tuple[str, ...] = COMMON_METADATA_KEYS
474
412
  if integration_name == "openai":
475
- keys_to_skip = OPENAI_SKIPPED_CHAT_TAGS if operation == "chat" else OPENAI_SKIPPED_COMPLETION_TAGS
476
- metadata = {k: v for k, v in kwargs.items() if k not in keys_to_skip}
413
+ keys_to_include += OPENAI_METADATA_CHAT_KEYS if operation == "chat" else OPENAI_METADATA_COMPLETION_KEYS
477
414
  elif integration_name == "litellm":
478
- keys_to_include = LITELLM_METADATA_CHAT_KEYS if operation == "chat" else LITELLM_METADATA_COMPLETION_KEYS
479
- metadata = {k: v for k, v in kwargs.items() if k in keys_to_include}
415
+ keys_to_include += LITELLM_METADATA_CHAT_KEYS if operation == "chat" else LITELLM_METADATA_COMPLETION_KEYS
416
+ metadata = {k: v for k, v in kwargs.items() if k in keys_to_include}
480
417
  return metadata
481
418
 
482
419
 
@@ -621,7 +558,7 @@ def openai_get_metadata_from_response(
621
558
  metadata = {}
622
559
 
623
560
  if kwargs:
624
- metadata.update({k: v for k, v in kwargs.items() if k not in ("model", "input", "instructions")})
561
+ metadata.update({k: v for k, v in kwargs.items() if k in OPENAI_METADATA_RESPONSE_KEYS + COMMON_METADATA_KEYS})
625
562
 
626
563
  if not response:
627
564
  return metadata
@@ -630,7 +567,7 @@ def openai_get_metadata_from_response(
630
567
  for field in ["temperature", "max_output_tokens", "top_p", "tools", "tool_choice", "truncation", "text", "user"]:
631
568
  value = getattr(response, field, None)
632
569
  if value is not None:
633
- metadata[field] = load_oai_span_data_value(value)
570
+ metadata[field] = load_data_value(value)
634
571
 
635
572
  usage = getattr(response, "usage", None)
636
573
  output_tokens_details = getattr(usage, "output_tokens_details", None)
@@ -863,7 +800,7 @@ class OaiSpanAdapter:
863
800
  data = self.data
864
801
  if not data:
865
802
  return {}
866
- return load_oai_span_data_value(data)
803
+ return load_data_value(data)
867
804
 
868
805
  @property
869
806
  def response_output_text(self) -> str:
@@ -922,25 +859,14 @@ class OaiSpanAdapter:
922
859
  if hasattr(self.response, field):
923
860
  value = getattr(self.response, field)
924
861
  if value is not None:
925
- metadata[field] = load_oai_span_data_value(value)
862
+ metadata[field] = load_data_value(value)
926
863
 
927
864
  if hasattr(self.response, "text") and self.response.text:
928
- metadata["text"] = load_oai_span_data_value(self.response.text)
865
+ metadata["text"] = load_data_value(self.response.text)
929
866
 
930
867
  if hasattr(self.response, "usage") and hasattr(self.response.usage, "output_tokens_details"):
931
868
  metadata["reasoning_tokens"] = self.response.usage.output_tokens_details.reasoning_tokens
932
869
 
933
- if self.span_type == "agent":
934
- agent_metadata: Dict[str, List[str]] = {
935
- "handoffs": [],
936
- "tools": [],
937
- }
938
- if self.handoffs:
939
- agent_metadata["handoffs"] = load_oai_span_data_value(self.handoffs)
940
- if self.tools:
941
- agent_metadata["tools"] = load_oai_span_data_value(self.tools)
942
- metadata.update(agent_metadata)
943
-
944
870
  if self.span_type == "custom" and hasattr(self._raw_oai_span.span_data, "data"):
945
871
  custom_data = getattr(self._raw_oai_span.span_data, "data", None)
946
872
  if custom_data:
@@ -1153,22 +1079,6 @@ class OaiTraceAdapter:
1153
1079
  return self._trace
1154
1080
 
1155
1081
 
1156
- def load_oai_span_data_value(value):
1157
- """Helper function to load values stored in openai span data in a consistent way"""
1158
- if isinstance(value, list):
1159
- return [load_oai_span_data_value(item) for item in value]
1160
- elif hasattr(value, "model_dump"):
1161
- return value.model_dump()
1162
- elif is_dataclass(value):
1163
- return asdict(value)
1164
- else:
1165
- value_str = safe_json(value)
1166
- try:
1167
- return json.loads(value_str)
1168
- except json.JSONDecodeError:
1169
- return value_str
1170
-
1171
-
1172
1082
  @dataclass
1173
1083
  class LLMObsTraceInfo:
1174
1084
  """Metadata for llmobs trace used for setting root span attributes and span links"""
@@ -17,9 +17,9 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
17
17
  from ddtrace.llmobs._constants import SPAN_KIND
18
18
  from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
19
19
  from ddtrace.llmobs._integrations.base import BaseLLMIntegration
20
- from ddtrace.llmobs._integrations.utils import extract_message_from_part_google
21
- from ddtrace.llmobs._integrations.utils import get_system_instructions_from_google_model
22
- from ddtrace.llmobs._integrations.utils import llmobs_get_metadata_google
20
+ from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_gemini_vertexai
21
+ from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai
22
+ from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai
23
23
  from ddtrace.llmobs._utils import _get_attr
24
24
  from ddtrace.trace import Span
25
25
 
@@ -46,9 +46,9 @@ class VertexAIIntegration(BaseLLMIntegration):
46
46
  instance = kwargs.get("instance", None)
47
47
  history = kwargs.get("history", [])
48
48
  metrics = kwargs.get("metrics", {})
49
- metadata = llmobs_get_metadata_google(kwargs, instance)
49
+ metadata = llmobs_get_metadata_gemini_vertexai(kwargs, instance)
50
50
 
51
- system_instruction = get_system_instructions_from_google_model(instance)
51
+ system_instruction = get_system_instructions_gemini_vertexai(instance)
52
52
  input_contents = None
53
53
  try:
54
54
  input_contents = get_argument_value(args, kwargs, 0, "content")
@@ -117,7 +117,7 @@ class VertexAIIntegration(BaseLLMIntegration):
117
117
  messages.append({"content": contents})
118
118
  return messages
119
119
  if isinstance(contents, Part):
120
- message = extract_message_from_part_google(contents)
120
+ message = extract_message_from_part_gemini_vertexai(contents)
121
121
  messages.append(message)
122
122
  return messages
123
123
  if not isinstance(contents, list):
@@ -128,7 +128,7 @@ class VertexAIIntegration(BaseLLMIntegration):
128
128
  messages.append({"content": content})
129
129
  continue
130
130
  if isinstance(content, Part):
131
- message = extract_message_from_part_google(content)
131
+ message = extract_message_from_part_gemini_vertexai(content)
132
132
  messages.append(message)
133
133
  continue
134
134
  messages.extend(self._extract_messages_from_content(content))
@@ -170,6 +170,6 @@ class VertexAIIntegration(BaseLLMIntegration):
170
170
  messages.append(message)
171
171
  return messages
172
172
  for part in parts:
173
- message = extract_message_from_part_google(part, role)
173
+ message = extract_message_from_part_gemini_vertexai(part, role)
174
174
  messages.append(message)
175
175
  return messages
ddtrace/llmobs/_llmobs.py CHANGED
@@ -44,6 +44,7 @@ from ddtrace.internal.utils.formats import format_trace_id
44
44
  from ddtrace.internal.utils.formats import parse_tags_str
45
45
  from ddtrace.llmobs import _constants as constants
46
46
  from ddtrace.llmobs import _telemetry as telemetry
47
+ from ddtrace.llmobs._constants import AGENT_MANIFEST
47
48
  from ddtrace.llmobs._constants import ANNOTATIONS_CONTEXT_ID
48
49
  from ddtrace.llmobs._constants import DECORATOR
49
50
  from ddtrace.llmobs._constants import DEFAULT_PROJECT_NAME
@@ -256,7 +257,10 @@ class LLMObs(Service):
256
257
  if span_kind in ("llm", "embedding") and span._get_ctx_item(MODEL_NAME) is not None:
257
258
  meta["model_name"] = span._get_ctx_item(MODEL_NAME)
258
259
  meta["model_provider"] = (span._get_ctx_item(MODEL_PROVIDER) or "custom").lower()
259
- meta["metadata"] = span._get_ctx_item(METADATA) or {}
260
+ metadata = span._get_ctx_item(METADATA) or {}
261
+ if span_kind == "agent" and span._get_ctx_item(AGENT_MANIFEST) is not None:
262
+ metadata["agent_manifest"] = span._get_ctx_item(AGENT_MANIFEST)
263
+ meta["metadata"] = metadata
260
264
 
261
265
  input_type: Literal["value", "messages", ""] = ""
262
266
  output_type: Literal["value", "messages", ""] = ""
ddtrace/llmobs/_utils.py CHANGED
@@ -1,4 +1,6 @@
1
+ from dataclasses import asdict
1
2
  from dataclasses import dataclass
3
+ from dataclasses import is_dataclass
2
4
  import json
3
5
  from typing import Dict
4
6
  from typing import List
@@ -215,6 +217,25 @@ def safe_json(obj, ensure_ascii=True):
215
217
  log.error("Failed to serialize object to JSON.", exc_info=True)
216
218
 
217
219
 
220
+ def load_data_value(value):
221
+ if isinstance(value, (list, tuple, set)):
222
+ return [load_data_value(item) for item in value]
223
+ elif isinstance(value, dict):
224
+ return {str(k): load_data_value(v) for k, v in value.items()}
225
+ elif hasattr(value, "model_dump"):
226
+ return value.model_dump()
227
+ elif is_dataclass(value):
228
+ return asdict(value)
229
+ elif isinstance(value, (int, float, str, bool)) or value is None:
230
+ return value
231
+ else:
232
+ value_str = safe_json(value)
233
+ try:
234
+ return json.loads(value_str)
235
+ except json.JSONDecodeError:
236
+ return value_str
237
+
238
+
218
239
  def add_span_link(span: Span, span_id: str, trace_id: str, from_io: str, to_io: str) -> None:
219
240
  current_span_links = span._get_ctx_item(SPAN_LINKS) or []
220
241
  current_span_links.append(
ddtrace/settings/asm.py CHANGED
@@ -16,6 +16,7 @@ from ddtrace.appsec._constants import TELEMETRY_INFORMATION_NAME
16
16
  from ddtrace.constants import APPSEC_ENV
17
17
  from ddtrace.ext import SpanTypes
18
18
  from ddtrace.internal import core
19
+ from ddtrace.internal.endpoints import HttpEndPointsCollection
19
20
  from ddtrace.internal.serverless import in_aws_lambda
20
21
  from ddtrace.settings._config import config as tracer_config
21
22
  from ddtrace.settings._core import DDConfig
@@ -59,6 +60,9 @@ def build_libddwaf_filename() -> str:
59
60
  return os.path.join(_DIRNAME, "appsec", "_ddwaf", "libddwaf", ARCHITECTURE, "lib", "libddwaf." + FILE_EXTENSION)
60
61
 
61
62
 
63
+ endpoint_collection = HttpEndPointsCollection()
64
+
65
+
62
66
  class ASMConfig(DDConfig):
63
67
  _asm_enabled = DDConfig.var(bool, APPSEC_ENV, default=False)
64
68
  _asm_enabled_origin = APPSEC.ENABLED_ORIGIN_UNKNOWN
@@ -92,6 +96,10 @@ class ASMConfig(DDConfig):
92
96
  _api_security_enabled = DDConfig.var(bool, API_SECURITY.ENV_VAR_ENABLED, default=True)
93
97
  _api_security_sample_delay = DDConfig.var(float, API_SECURITY.SAMPLE_DELAY, default=30.0)
94
98
  _api_security_parse_response_body = DDConfig.var(bool, API_SECURITY.PARSE_RESPONSE_BODY, default=True)
99
+ _api_security_endpoint_collection = DDConfig.var(bool, API_SECURITY.ENDPOINT_COLLECTION, default=True)
100
+ _api_security_endpoint_collection_limit = DDConfig.var(
101
+ int, API_SECURITY.ENDPOINT_COLLECTION_LIMIT, default=DEFAULT.ENDPOINT_COLLECTION_LIMIT
102
+ )
95
103
 
96
104
  # internal state of the API security Manager service.
97
105
  # updated in API Manager enable/disable
@@ -246,9 +254,8 @@ class ASMConfig(DDConfig):
246
254
  self._asm_processed_span_types.add(SpanTypes.SERVERLESS)
247
255
  self._asm_http_span_types.add(SpanTypes.SERVERLESS)
248
256
 
249
- # As a first step, only Threat Management in monitoring mode should be enabled in AWS Lambda
257
+ # Disable all features that are not supported in Lambda
250
258
  tracer_config._remote_config_enabled = False
251
- self._api_security_enabled = False
252
259
  self._ep_enabled = False
253
260
  self._iast_supported = False
254
261
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ddtrace
3
- Version: 3.11.0rc2
3
+ Version: 3.11.0rc3
4
4
  Summary: Datadog APM client library
5
5
  Author-email: "Datadog, Inc." <dev@datadoghq.com>
6
6
  License: LICENSE.BSD3