ddtrace 3.11.0rc2__cp38-cp38-win32.whl → 3.11.0rc3__cp38-cp38-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ddtrace might be problematic. Click here for more details.
- ddtrace/_trace/sampling_rule.py +25 -33
- ddtrace/_trace/trace_handlers.py +9 -49
- ddtrace/_trace/utils_botocore/span_tags.py +48 -0
- ddtrace/_version.py +2 -2
- ddtrace/appsec/_constants.py +7 -0
- ddtrace/appsec/_handlers.py +11 -0
- ddtrace/appsec/_processor.py +1 -1
- ddtrace/contrib/internal/aiobotocore/patch.py +8 -0
- ddtrace/contrib/internal/boto/patch.py +14 -0
- ddtrace/contrib/internal/botocore/services/bedrock.py +3 -27
- ddtrace/contrib/internal/django/patch.py +31 -8
- ddtrace/contrib/internal/google_genai/_utils.py +2 -2
- ddtrace/contrib/internal/google_genai/patch.py +7 -7
- ddtrace/contrib/internal/google_generativeai/patch.py +7 -5
- ddtrace/contrib/internal/openai_agents/patch.py +44 -1
- ddtrace/contrib/internal/pytest/_plugin_v2.py +1 -1
- ddtrace/contrib/internal/vertexai/patch.py +7 -5
- ddtrace/ext/ci.py +20 -0
- ddtrace/ext/git.py +66 -11
- ddtrace/internal/_encoding.cp38-win32.pyd +0 -0
- ddtrace/internal/_rand.cp38-win32.pyd +0 -0
- ddtrace/internal/_tagset.cp38-win32.pyd +0 -0
- ddtrace/internal/_threads.cp38-win32.pyd +0 -0
- ddtrace/internal/ci_visibility/encoder.py +126 -55
- ddtrace/internal/datadog/profiling/dd_wrapper-unknown-amd64.dll +0 -0
- ddtrace/internal/datadog/profiling/ddup/_ddup.cp38-win32.pyd +0 -0
- ddtrace/internal/datadog/profiling/ddup/dd_wrapper-unknown-amd64.dll +0 -0
- ddtrace/internal/endpoints.py +76 -0
- ddtrace/internal/native/_native.cp38-win32.pyd +0 -0
- ddtrace/internal/schema/processor.py +6 -2
- ddtrace/internal/telemetry/metrics_namespaces.cp38-win32.pyd +0 -0
- ddtrace/internal/telemetry/writer.py +18 -0
- ddtrace/llmobs/_constants.py +1 -0
- ddtrace/llmobs/_experiment.py +6 -0
- ddtrace/llmobs/_integrations/crewai.py +52 -3
- ddtrace/llmobs/_integrations/gemini.py +7 -7
- ddtrace/llmobs/_integrations/google_genai.py +10 -10
- ddtrace/llmobs/_integrations/{google_genai_utils.py → google_utils.py} +103 -7
- ddtrace/llmobs/_integrations/openai_agents.py +145 -0
- ddtrace/llmobs/_integrations/pydantic_ai.py +67 -26
- ddtrace/llmobs/_integrations/utils.py +68 -158
- ddtrace/llmobs/_integrations/vertexai.py +8 -8
- ddtrace/llmobs/_llmobs.py +5 -1
- ddtrace/llmobs/_utils.py +21 -0
- ddtrace/profiling/_threading.cp38-win32.pyd +0 -0
- ddtrace/profiling/collector/_memalloc.cp38-win32.pyd +0 -0
- ddtrace/profiling/collector/_task.cp38-win32.pyd +0 -0
- ddtrace/profiling/collector/_traceback.cp38-win32.pyd +0 -0
- ddtrace/profiling/collector/stack.cp38-win32.pyd +0 -0
- ddtrace/settings/asm.py +9 -2
- ddtrace/vendor/psutil/_psutil_windows.cp38-win32.pyd +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/METADATA +1 -1
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/RECORD +60 -59
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE.Apache +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/LICENSE.BSD3 +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/NOTICE +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/WHEEL +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/entry_points.txt +0 -0
- {ddtrace-3.11.0rc2.dist-info → ddtrace-3.11.0rc3.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,6 @@
|
|
1
1
|
from ddtrace._trace.processor import TraceProcessor
|
2
2
|
from ddtrace.constants import _BASE_SERVICE_KEY
|
3
|
+
from ddtrace.internal.serverless import in_aws_lambda
|
3
4
|
from ddtrace.settings._config import config
|
4
5
|
|
5
6
|
from . import schematize_service_name
|
@@ -8,10 +9,13 @@ from . import schematize_service_name
|
|
8
9
|
class BaseServiceProcessor(TraceProcessor):
|
9
10
|
def __init__(self):
|
10
11
|
self._global_service = schematize_service_name((config.service or "").lower())
|
12
|
+
self._in_aws_lambda = in_aws_lambda()
|
11
13
|
|
12
14
|
def process_trace(self, trace):
|
13
|
-
|
14
|
-
|
15
|
+
# AWS Lambda spans receive unhelpful base_service value of runtime
|
16
|
+
# Remove base_service to prevent service overrides in Lambda spans
|
17
|
+
if not trace or self._in_aws_lambda:
|
18
|
+
return trace
|
15
19
|
|
16
20
|
traces_to_process = filter(
|
17
21
|
lambda x: x.service and x.service.lower() != self._global_service,
|
Binary file
|
@@ -418,6 +418,23 @@ class TelemetryWriter(PeriodicService):
|
|
418
418
|
payload = {"dependencies": packages}
|
419
419
|
self.add_event(payload, "app-dependencies-loaded")
|
420
420
|
|
421
|
+
def _add_endpoints_event(self):
|
422
|
+
"""Adds a Telemetry event which sends the list of HTTP endpoints found at startup to the agent"""
|
423
|
+
import ddtrace.settings.asm as asm_config_module
|
424
|
+
|
425
|
+
if not asm_config_module.config._api_security_endpoint_collection or not self._enabled:
|
426
|
+
return
|
427
|
+
|
428
|
+
if not asm_config_module.endpoint_collection.endpoints:
|
429
|
+
return
|
430
|
+
|
431
|
+
with self._service_lock:
|
432
|
+
payload = asm_config_module.endpoint_collection.flush(
|
433
|
+
asm_config_module.config._api_security_endpoint_collection_limit
|
434
|
+
)
|
435
|
+
|
436
|
+
self.add_event(payload, "app-endpoints")
|
437
|
+
|
421
438
|
def _app_product_change(self):
|
422
439
|
# type: () -> None
|
423
440
|
"""Adds a Telemetry event which reports the enablement of an APM product"""
|
@@ -660,6 +677,7 @@ class TelemetryWriter(PeriodicService):
|
|
660
677
|
self._app_client_configuration_changed_event(configurations)
|
661
678
|
|
662
679
|
self._app_dependencies_loaded_event()
|
680
|
+
self._add_endpoints_event()
|
663
681
|
|
664
682
|
if shutting_down:
|
665
683
|
self._app_closing_event()
|
ddtrace/llmobs/_constants.py
CHANGED
@@ -9,6 +9,7 @@ PARENT_ID_KEY = "_ml_obs.llmobs_parent_id"
|
|
9
9
|
PROPAGATED_LLMOBS_TRACE_ID_KEY = "_dd.p.llmobs_trace_id"
|
10
10
|
LLMOBS_TRACE_ID = "_ml_obs.llmobs_trace_id"
|
11
11
|
TAGS = "_ml_obs.tags"
|
12
|
+
AGENT_MANIFEST = "_ml_obs.meta.agent_manifest"
|
12
13
|
|
13
14
|
MODEL_NAME = "_ml_obs.meta.model_name"
|
14
15
|
MODEL_PROVIDER = "_ml_obs.meta.model_provider"
|
ddtrace/llmobs/_experiment.py
CHANGED
@@ -297,6 +297,12 @@ class Experiment:
|
|
297
297
|
)
|
298
298
|
return experiment_results
|
299
299
|
|
300
|
+
@property
|
301
|
+
def url(self) -> str:
|
302
|
+
# FIXME: need to use the user's site
|
303
|
+
# also will not work for subdomain orgs
|
304
|
+
return f"https://app.datadoghq.com/llm/experiments/{self._id}"
|
305
|
+
|
300
306
|
def _process_record(self, idx_record: Tuple[int, DatasetRecord]) -> Optional[TaskResult]:
|
301
307
|
if not self._llmobs_instance or not self._llmobs_instance.enabled:
|
302
308
|
return None
|
@@ -8,6 +8,7 @@ from ddtrace.internal import core
|
|
8
8
|
from ddtrace.internal.logger import get_logger
|
9
9
|
from ddtrace.internal.utils import get_argument_value
|
10
10
|
from ddtrace.internal.utils.formats import format_trace_id
|
11
|
+
from ddtrace.llmobs._constants import AGENT_MANIFEST
|
11
12
|
from ddtrace.llmobs._constants import INPUT_VALUE
|
12
13
|
from ddtrace.llmobs._constants import METADATA
|
13
14
|
from ddtrace.llmobs._constants import NAME
|
@@ -151,9 +152,8 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
151
152
|
Agent spans are 1:1 with its parent (task/tool) span, so we link them directly here, even on the parent itself.
|
152
153
|
"""
|
153
154
|
agent_instance = kwargs.get("instance")
|
155
|
+
self._tag_agent_manifest(span, agent_instance)
|
154
156
|
agent_role = getattr(agent_instance, "role", "")
|
155
|
-
agent_goal = getattr(agent_instance, "goal", "")
|
156
|
-
agent_backstory = getattr(agent_instance, "backstory", "")
|
157
157
|
task_description = getattr(kwargs.get("task"), "description", "")
|
158
158
|
context = get_argument_value(args, kwargs, 1, "context", optional=True) or ""
|
159
159
|
|
@@ -174,7 +174,6 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
174
174
|
span._set_ctx_items(
|
175
175
|
{
|
176
176
|
NAME: agent_role if agent_role else "CrewAI Agent",
|
177
|
-
METADATA: {"description": agent_goal, "backstory": agent_backstory},
|
178
177
|
INPUT_VALUE: {"context": context, "input": task_description},
|
179
178
|
SPAN_LINKS: curr_span_links + [span_link],
|
180
179
|
}
|
@@ -198,6 +197,56 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
198
197
|
return
|
199
198
|
span._set_ctx_item(OUTPUT_VALUE, response)
|
200
199
|
|
200
|
+
def _tag_agent_manifest(self, span, agent):
|
201
|
+
if not agent:
|
202
|
+
return
|
203
|
+
|
204
|
+
manifest = {}
|
205
|
+
manifest["framework"] = "CrewAI"
|
206
|
+
manifest["name"] = agent.role if hasattr(agent, "role") and agent.role else "CrewAI Agent"
|
207
|
+
if hasattr(agent, "goal"):
|
208
|
+
manifest["goal"] = agent.goal
|
209
|
+
if hasattr(agent, "backstory"):
|
210
|
+
manifest["backstory"] = agent.backstory
|
211
|
+
if hasattr(agent, "llm"):
|
212
|
+
if hasattr(agent.llm, "model"):
|
213
|
+
manifest["model"] = agent.llm.model
|
214
|
+
model_settings = {}
|
215
|
+
if hasattr(agent.llm, "max_tokens"):
|
216
|
+
model_settings["max_tokens"] = agent.llm.max_tokens
|
217
|
+
if hasattr(agent.llm, "temperature"):
|
218
|
+
model_settings["temperature"] = agent.llm.temperature
|
219
|
+
if model_settings:
|
220
|
+
manifest["model_settings"] = model_settings
|
221
|
+
if hasattr(agent, "allow_delegation"):
|
222
|
+
manifest["handoffs"] = {"allow_delegation": agent.allow_delegation}
|
223
|
+
code_execution_permissions = {}
|
224
|
+
if hasattr(agent, "allow_code_execution"):
|
225
|
+
manifest["code_execution_permissions"] = {"allow_code_execution": agent.allow_code_execution}
|
226
|
+
if hasattr(agent, "code_execution_mode"):
|
227
|
+
manifest["code_execution_permissions"] = {"code_execution_mode": agent.code_execution_mode}
|
228
|
+
if code_execution_permissions:
|
229
|
+
manifest["code_execution_permissions"] = code_execution_permissions
|
230
|
+
if hasattr(agent, "max_iter"):
|
231
|
+
manifest["max_iterations"] = agent.max_iter
|
232
|
+
if hasattr(agent, "tools"):
|
233
|
+
manifest["tools"] = self._get_agent_tools(agent.tools)
|
234
|
+
|
235
|
+
span._set_ctx_item(AGENT_MANIFEST, manifest)
|
236
|
+
|
237
|
+
def _get_agent_tools(self, tools):
|
238
|
+
if not tools or not isinstance(tools, list):
|
239
|
+
return []
|
240
|
+
formatted_tools = []
|
241
|
+
for tool in tools:
|
242
|
+
tool_dict = {}
|
243
|
+
if hasattr(tool, "name"):
|
244
|
+
tool_dict["name"] = tool.name
|
245
|
+
if hasattr(tool, "description"):
|
246
|
+
tool_dict["description"] = tool.description
|
247
|
+
formatted_tools.append(tool_dict)
|
248
|
+
return formatted_tools
|
249
|
+
|
201
250
|
def _llmobs_set_span_link_on_task(self, span, args, kwargs):
|
202
251
|
"""Set span links for the next queued task in a CrewAI workflow.
|
203
252
|
This happens between task executions, (the current span is the crew span and the task span hasn't started yet)
|
@@ -16,9 +16,9 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
|
|
16
16
|
from ddtrace.llmobs._constants import SPAN_KIND
|
17
17
|
from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
18
18
|
from ddtrace.llmobs._integrations.base import BaseLLMIntegration
|
19
|
-
from ddtrace.llmobs._integrations.
|
20
|
-
from ddtrace.llmobs._integrations.
|
21
|
-
from ddtrace.llmobs._integrations.
|
19
|
+
from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_gemini_vertexai
|
20
|
+
from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai
|
21
|
+
from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai
|
22
22
|
from ddtrace.llmobs._utils import _get_attr
|
23
23
|
from ddtrace.trace import Span
|
24
24
|
|
@@ -43,9 +43,9 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
43
43
|
operation: str = "",
|
44
44
|
) -> None:
|
45
45
|
instance = kwargs.get("instance", None)
|
46
|
-
metadata =
|
46
|
+
metadata = llmobs_get_metadata_gemini_vertexai(kwargs, instance)
|
47
47
|
|
48
|
-
system_instruction =
|
48
|
+
system_instruction = get_system_instructions_gemini_vertexai(instance)
|
49
49
|
input_contents = get_argument_value(args, kwargs, 0, "contents")
|
50
50
|
input_messages = self._extract_input_message(input_contents, system_instruction)
|
51
51
|
|
@@ -95,7 +95,7 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
95
95
|
messages.append(message)
|
96
96
|
continue
|
97
97
|
for part in parts:
|
98
|
-
message =
|
98
|
+
message = extract_message_from_part_gemini_vertexai(part, role)
|
99
99
|
messages.append(message)
|
100
100
|
return messages
|
101
101
|
|
@@ -107,7 +107,7 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
107
107
|
role = content.get("role", "model")
|
108
108
|
parts = content.get("parts", [])
|
109
109
|
for part in parts:
|
110
|
-
message =
|
110
|
+
message = extract_message_from_part_gemini_vertexai(part, role)
|
111
111
|
output_messages.append(message)
|
112
112
|
return output_messages
|
113
113
|
|
@@ -14,12 +14,12 @@ from ddtrace.llmobs._constants import OUTPUT_MESSAGES
|
|
14
14
|
from ddtrace.llmobs._constants import OUTPUT_VALUE
|
15
15
|
from ddtrace.llmobs._constants import SPAN_KIND
|
16
16
|
from ddtrace.llmobs._integrations.base import BaseLLMIntegration
|
17
|
-
from ddtrace.llmobs._integrations.
|
18
|
-
from ddtrace.llmobs._integrations.
|
19
|
-
from ddtrace.llmobs._integrations.
|
20
|
-
from ddtrace.llmobs._integrations.
|
21
|
-
from ddtrace.llmobs._integrations.
|
22
|
-
from ddtrace.llmobs._integrations.
|
17
|
+
from ddtrace.llmobs._integrations.google_utils import GOOGLE_GENAI_DEFAULT_MODEL_ROLE
|
18
|
+
from ddtrace.llmobs._integrations.google_utils import extract_embedding_metrics_google_genai
|
19
|
+
from ddtrace.llmobs._integrations.google_utils import extract_generation_metrics_google_genai
|
20
|
+
from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_google_genai
|
21
|
+
from ddtrace.llmobs._integrations.google_utils import extract_provider_and_model_name
|
22
|
+
from ddtrace.llmobs._integrations.google_utils import normalize_contents_google_genai
|
23
23
|
from ddtrace.llmobs._utils import _get_attr
|
24
24
|
from ddtrace.llmobs.utils import Document
|
25
25
|
|
@@ -71,7 +71,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
71
71
|
response: Optional[Any] = None,
|
72
72
|
operation: str = "",
|
73
73
|
) -> None:
|
74
|
-
provider_name, model_name = extract_provider_and_model_name(kwargs)
|
74
|
+
provider_name, model_name = extract_provider_and_model_name(kwargs=kwargs)
|
75
75
|
span._set_ctx_items(
|
76
76
|
{
|
77
77
|
SPAN_KIND: operation,
|
@@ -120,7 +120,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
120
120
|
|
121
121
|
def _extract_messages_from_contents(self, contents, default_role: str) -> List[Dict[str, Any]]:
|
122
122
|
messages = []
|
123
|
-
for content in
|
123
|
+
for content in normalize_contents_google_genai(contents):
|
124
124
|
role = content.get("role") or default_role
|
125
125
|
for part in content.get("parts", []):
|
126
126
|
messages.append(extract_message_from_part_google_genai(part, role))
|
@@ -128,7 +128,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
128
128
|
|
129
129
|
def _extract_output_messages(self, response) -> List[Dict[str, Any]]:
|
130
130
|
if not response:
|
131
|
-
return [{"content": "", "role":
|
131
|
+
return [{"content": "", "role": GOOGLE_GENAI_DEFAULT_MODEL_ROLE}]
|
132
132
|
messages = []
|
133
133
|
candidates = _get_attr(response, "candidates", [])
|
134
134
|
for candidate in candidates:
|
@@ -136,7 +136,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
136
136
|
if not content:
|
137
137
|
continue
|
138
138
|
parts = _get_attr(content, "parts", [])
|
139
|
-
role = _get_attr(content, "role",
|
139
|
+
role = _get_attr(content, "role", GOOGLE_GENAI_DEFAULT_MODEL_ROLE)
|
140
140
|
for part in parts:
|
141
141
|
message = extract_message_from_part_google_genai(part, role)
|
142
142
|
messages.append(message)
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from typing import Any
|
2
2
|
from typing import Dict
|
3
3
|
from typing import List
|
4
|
+
from typing import Optional
|
4
5
|
from typing import Tuple
|
5
6
|
|
6
7
|
from ddtrace.llmobs._constants import BILLABLE_CHARACTER_COUNT_METRIC_KEY
|
@@ -11,9 +12,9 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
|
11
12
|
from ddtrace.llmobs._utils import _get_attr
|
12
13
|
|
13
14
|
|
14
|
-
#
|
15
|
+
# Google GenAI has roles "model" and "user", but in order to stay consistent with other integrations,
|
15
16
|
# we use "assistant" as the default role for model messages
|
16
|
-
|
17
|
+
GOOGLE_GENAI_DEFAULT_MODEL_ROLE = "assistant"
|
17
18
|
|
18
19
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-partner-models
|
19
20
|
# GeminiAPI: only exports google provided models
|
@@ -40,9 +41,31 @@ KNOWN_MODEL_PREFIX_TO_PROVIDER = {
|
|
40
41
|
}
|
41
42
|
|
42
43
|
|
43
|
-
def extract_provider_and_model_name(
|
44
|
-
|
45
|
-
|
44
|
+
def extract_provider_and_model_name(
|
45
|
+
kwargs: Optional[Dict[str, Any]] = None, instance: Any = None, model_name_attr: Optional[str] = None
|
46
|
+
) -> Tuple[str, str]:
|
47
|
+
"""
|
48
|
+
Function to extract provider and model name from either kwargs or instance attributes.
|
49
|
+
Args:
|
50
|
+
kwargs: Dictionary containing model information (used for google_genai)
|
51
|
+
instance: Model instance with attributes (used for vertexai and google_generativeai)
|
52
|
+
model_name_attr: Attribute name to extract from instance (e.g., "_model_name", "model_name", used for vertexai
|
53
|
+
and google_generativeai)
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
Tuple of (provider_name, model_name)
|
57
|
+
"""
|
58
|
+
model_path = ""
|
59
|
+
if kwargs is not None:
|
60
|
+
model_path = kwargs.get("model", "")
|
61
|
+
elif instance is not None and model_name_attr is not None:
|
62
|
+
model_path = _get_attr(instance, model_name_attr, "")
|
63
|
+
|
64
|
+
if not model_path or not isinstance(model_path, str):
|
65
|
+
return "custom", "custom"
|
66
|
+
|
67
|
+
model_name = model_path.split("/")[-1] if "/" in model_path else model_path
|
68
|
+
|
46
69
|
for prefix in KNOWN_MODEL_PREFIX_TO_PROVIDER.keys():
|
47
70
|
if model_name.lower().startswith(prefix):
|
48
71
|
provider_name = KNOWN_MODEL_PREFIX_TO_PROVIDER[prefix]
|
@@ -50,7 +73,7 @@ def extract_provider_and_model_name(kwargs: Dict[str, Any]) -> Tuple[str, str]:
|
|
50
73
|
return "custom", model_name if model_name else "custom"
|
51
74
|
|
52
75
|
|
53
|
-
def
|
76
|
+
def normalize_contents_google_genai(contents) -> List[Dict[str, Any]]:
|
54
77
|
"""
|
55
78
|
contents has a complex union type structure:
|
56
79
|
- contents: Union[ContentListUnion, ContentListUnionDict]
|
@@ -142,7 +165,7 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]:
|
|
142
165
|
returns a dict representing a message with format {"role": role, "content": content}
|
143
166
|
"""
|
144
167
|
if role == "model":
|
145
|
-
role =
|
168
|
+
role = GOOGLE_GENAI_DEFAULT_MODEL_ROLE
|
146
169
|
|
147
170
|
message: Dict[str, Any] = {"role": role}
|
148
171
|
if isinstance(part, str):
|
@@ -187,3 +210,76 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]:
|
|
187
210
|
return message
|
188
211
|
|
189
212
|
return {"content": "Unsupported file type: {}".format(type(part)), "role": role}
|
213
|
+
|
214
|
+
|
215
|
+
def llmobs_get_metadata_gemini_vertexai(kwargs, instance):
|
216
|
+
metadata = {}
|
217
|
+
model_config = getattr(instance, "_generation_config", {}) or {}
|
218
|
+
model_config = model_config.to_dict() if hasattr(model_config, "to_dict") else model_config
|
219
|
+
request_config = kwargs.get("generation_config", {}) or {}
|
220
|
+
request_config = request_config.to_dict() if hasattr(request_config, "to_dict") else request_config
|
221
|
+
|
222
|
+
parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k")
|
223
|
+
for param in parameters:
|
224
|
+
model_config_value = _get_attr(model_config, param, None)
|
225
|
+
request_config_value = _get_attr(request_config, param, None)
|
226
|
+
if model_config_value or request_config_value:
|
227
|
+
metadata[param] = request_config_value or model_config_value
|
228
|
+
return metadata
|
229
|
+
|
230
|
+
|
231
|
+
def extract_message_from_part_gemini_vertexai(part, role=None):
|
232
|
+
text = _get_attr(part, "text", "")
|
233
|
+
function_call = _get_attr(part, "function_call", None)
|
234
|
+
function_response = _get_attr(part, "function_response", None)
|
235
|
+
message = {"content": text}
|
236
|
+
if role:
|
237
|
+
message["role"] = role
|
238
|
+
if function_call:
|
239
|
+
function_call_dict = function_call
|
240
|
+
if not isinstance(function_call, dict):
|
241
|
+
function_call_dict = type(function_call).to_dict(function_call)
|
242
|
+
message["tool_calls"] = [
|
243
|
+
{"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})}
|
244
|
+
]
|
245
|
+
if function_response:
|
246
|
+
function_response_dict = function_response
|
247
|
+
if not isinstance(function_response, dict):
|
248
|
+
function_response_dict = type(function_response).to_dict(function_response)
|
249
|
+
message["content"] = "[tool result: {}]".format(function_response_dict.get("response", ""))
|
250
|
+
return message
|
251
|
+
|
252
|
+
|
253
|
+
def get_system_instructions_gemini_vertexai(model_instance):
|
254
|
+
"""
|
255
|
+
Extract system instructions from model and convert to []str for tagging.
|
256
|
+
"""
|
257
|
+
try:
|
258
|
+
from google.ai.generativelanguage_v1beta.types.content import Content
|
259
|
+
except ImportError:
|
260
|
+
Content = None
|
261
|
+
try:
|
262
|
+
from vertexai.generative_models._generative_models import Part
|
263
|
+
except ImportError:
|
264
|
+
Part = None
|
265
|
+
|
266
|
+
raw_system_instructions = getattr(model_instance, "_system_instruction", [])
|
267
|
+
if Content is not None and isinstance(raw_system_instructions, Content):
|
268
|
+
system_instructions = []
|
269
|
+
for part in raw_system_instructions.parts:
|
270
|
+
system_instructions.append(_get_attr(part, "text", ""))
|
271
|
+
return system_instructions
|
272
|
+
elif isinstance(raw_system_instructions, str):
|
273
|
+
return [raw_system_instructions]
|
274
|
+
elif Part is not None and isinstance(raw_system_instructions, Part):
|
275
|
+
return [_get_attr(raw_system_instructions, "text", "")]
|
276
|
+
elif not isinstance(raw_system_instructions, list):
|
277
|
+
return []
|
278
|
+
|
279
|
+
system_instructions = []
|
280
|
+
for elem in raw_system_instructions:
|
281
|
+
if isinstance(elem, str):
|
282
|
+
system_instructions.append(elem)
|
283
|
+
elif Part is not None and isinstance(elem, Part):
|
284
|
+
system_instructions.append(_get_attr(elem, "text", ""))
|
285
|
+
return system_instructions
|
@@ -8,7 +8,9 @@ import weakref
|
|
8
8
|
|
9
9
|
from ddtrace.internal import core
|
10
10
|
from ddtrace.internal.logger import get_logger
|
11
|
+
from ddtrace.internal.utils import get_argument_value
|
11
12
|
from ddtrace.internal.utils.formats import format_trace_id
|
13
|
+
from ddtrace.llmobs._constants import AGENT_MANIFEST
|
12
14
|
from ddtrace.llmobs._constants import DISPATCH_ON_LLM_TOOL_CHOICE
|
13
15
|
from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL
|
14
16
|
from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL_OUTPUT_USED
|
@@ -31,6 +33,7 @@ from ddtrace.llmobs._integrations.utils import OaiSpanAdapter
|
|
31
33
|
from ddtrace.llmobs._integrations.utils import OaiTraceAdapter
|
32
34
|
from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor
|
33
35
|
from ddtrace.llmobs._utils import _get_span_name
|
36
|
+
from ddtrace.llmobs._utils import load_data_value
|
34
37
|
from ddtrace.trace import Pin
|
35
38
|
from ddtrace.trace import Span
|
36
39
|
|
@@ -296,3 +299,145 @@ class OpenAIAgentsIntegration(BaseLLMIntegration):
|
|
296
299
|
def clear_state(self) -> None:
|
297
300
|
self.oai_to_llmobs_span.clear()
|
298
301
|
self.llmobs_traces.clear()
|
302
|
+
|
303
|
+
def tag_agent_manifest(self, span: Span, args: List[Any], kwargs: Dict[str, Any], agent_index: int) -> None:
|
304
|
+
agent = get_argument_value(args, kwargs, agent_index, "agent", True)
|
305
|
+
if not agent or not self.llmobs_enabled:
|
306
|
+
return
|
307
|
+
|
308
|
+
manifest = {}
|
309
|
+
manifest["framework"] = "OpenAI"
|
310
|
+
if hasattr(agent, "name"):
|
311
|
+
manifest["name"] = agent.name
|
312
|
+
if hasattr(agent, "instructions"):
|
313
|
+
manifest["instructions"] = agent.instructions
|
314
|
+
if hasattr(agent, "handoff_description"):
|
315
|
+
manifest["handoff_description"] = agent.handoff_description
|
316
|
+
if hasattr(agent, "model"):
|
317
|
+
model = agent.model
|
318
|
+
manifest["model"] = model if isinstance(model, str) else getattr(model, "model", "")
|
319
|
+
|
320
|
+
model_settings = self._extract_model_settings_from_agent(agent)
|
321
|
+
if model_settings:
|
322
|
+
manifest["model_settings"] = model_settings
|
323
|
+
|
324
|
+
tools = self._extract_tools_from_agent(agent)
|
325
|
+
if tools:
|
326
|
+
manifest["tools"] = tools
|
327
|
+
|
328
|
+
handoffs = self._extract_handoffs_from_agent(agent)
|
329
|
+
if handoffs:
|
330
|
+
manifest["handoffs"] = handoffs
|
331
|
+
|
332
|
+
guardrails = self._extract_guardrails_from_agent(agent)
|
333
|
+
if guardrails:
|
334
|
+
manifest["guardrails"] = guardrails
|
335
|
+
|
336
|
+
span._set_ctx_item(AGENT_MANIFEST, manifest)
|
337
|
+
|
338
|
+
def _extract_model_settings_from_agent(self, agent):
|
339
|
+
if not hasattr(agent, "model_settings"):
|
340
|
+
return None
|
341
|
+
|
342
|
+
# convert model_settings to dict if it's not already
|
343
|
+
model_settings = agent.model_settings
|
344
|
+
if type(model_settings) != dict:
|
345
|
+
model_settings = getattr(model_settings, "__dict__", None)
|
346
|
+
|
347
|
+
return load_data_value(model_settings)
|
348
|
+
|
349
|
+
def _extract_tools_from_agent(self, agent):
|
350
|
+
if not hasattr(agent, "tools") or not agent.tools:
|
351
|
+
return None
|
352
|
+
|
353
|
+
tools = []
|
354
|
+
for tool in agent.tools:
|
355
|
+
tool_dict = {}
|
356
|
+
tool_name = getattr(tool, "name", None)
|
357
|
+
if tool_name:
|
358
|
+
tool_dict["name"] = tool_name
|
359
|
+
if tool_name == "web_search_preview":
|
360
|
+
if hasattr(tool, "user_location"):
|
361
|
+
tool_dict["user_location"] = tool.user_location
|
362
|
+
if hasattr(tool, "search_context_size"):
|
363
|
+
tool_dict["search_context_size"] = tool.search_context_size
|
364
|
+
elif tool_name == "file_search":
|
365
|
+
if hasattr(tool, "vector_store_ids"):
|
366
|
+
tool_dict["vector_store_ids"] = tool.vector_store_ids
|
367
|
+
if hasattr(tool, "max_num_results"):
|
368
|
+
tool_dict["max_num_results"] = tool.max_num_results
|
369
|
+
if hasattr(tool, "include_search_results"):
|
370
|
+
tool_dict["include_search_results"] = tool.include_search_results
|
371
|
+
if hasattr(tool, "ranking_options"):
|
372
|
+
tool_dict["ranking_options"] = tool.ranking_options
|
373
|
+
if hasattr(tool, "filters"):
|
374
|
+
tool_dict["filters"] = tool.filters
|
375
|
+
elif tool_name == "computer_use_preview":
|
376
|
+
if hasattr(tool, "computer"):
|
377
|
+
tool_dict["computer"] = tool.computer
|
378
|
+
if hasattr(tool, "on_safety_check"):
|
379
|
+
tool_dict["on_safety_check"] = tool.on_safety_check
|
380
|
+
elif tool_name == "code_interpreter":
|
381
|
+
if hasattr(tool, "tool_config"):
|
382
|
+
tool_dict["tool_config"] = tool.tool_config
|
383
|
+
elif tool_name == "hosted_mcp":
|
384
|
+
if hasattr(tool, "tool_config"):
|
385
|
+
tool_dict["tool_config"] = tool.tool_config
|
386
|
+
if hasattr(tool, "on_approval_request"):
|
387
|
+
tool_dict["on_approval_request"] = tool.on_approval_request
|
388
|
+
elif tool_name == "image_generation":
|
389
|
+
if hasattr(tool, "tool_config"):
|
390
|
+
tool_dict["tool_config"] = tool.tool_config
|
391
|
+
elif tool_name == "local_shell":
|
392
|
+
if hasattr(tool, "executor"):
|
393
|
+
tool_dict["executor"] = tool.executor
|
394
|
+
else:
|
395
|
+
if hasattr(tool, "description"):
|
396
|
+
tool_dict["description"] = tool.description
|
397
|
+
if hasattr(tool, "strict_json_schema"):
|
398
|
+
tool_dict["strict_json_schema"] = tool.strict_json_schema
|
399
|
+
if hasattr(tool, "params_json_schema"):
|
400
|
+
parameter_schema = tool.params_json_schema
|
401
|
+
required_params = {param: True for param in parameter_schema.get("required", [])}
|
402
|
+
parameters = {}
|
403
|
+
for param, schema in parameter_schema.get("properties", {}).items():
|
404
|
+
param_dict = {}
|
405
|
+
if "type" in schema:
|
406
|
+
param_dict["type"] = schema["type"]
|
407
|
+
if "title" in schema:
|
408
|
+
param_dict["title"] = schema["title"]
|
409
|
+
if param in required_params:
|
410
|
+
param_dict["required"] = True
|
411
|
+
parameters[param] = param_dict
|
412
|
+
tool_dict["parameters"] = parameters
|
413
|
+
tools.append(tool_dict)
|
414
|
+
|
415
|
+
return tools
|
416
|
+
|
417
|
+
def _extract_handoffs_from_agent(self, agent):
|
418
|
+
if not hasattr(agent, "handoffs") or not agent.handoffs:
|
419
|
+
return None
|
420
|
+
|
421
|
+
handoffs = []
|
422
|
+
for handoff in agent.handoffs:
|
423
|
+
handoff_dict = {}
|
424
|
+
if hasattr(handoff, "handoff_description") or hasattr(handoff, "tool_description"):
|
425
|
+
handoff_dict["handoff_description"] = getattr(handoff, "handoff_description", None) or getattr(
|
426
|
+
handoff, "tool_description", None
|
427
|
+
)
|
428
|
+
if hasattr(handoff, "name") or hasattr(handoff, "agent_name"):
|
429
|
+
handoff_dict["agent_name"] = getattr(handoff, "name", None) or getattr(handoff, "agent_name", None)
|
430
|
+
if hasattr(handoff, "tool_name"):
|
431
|
+
handoff_dict["tool_name"] = handoff.tool_name
|
432
|
+
if handoff_dict:
|
433
|
+
handoffs.append(handoff_dict)
|
434
|
+
|
435
|
+
return handoffs
|
436
|
+
|
437
|
+
def _extract_guardrails_from_agent(self, agent):
|
438
|
+
guardrails = []
|
439
|
+
if hasattr(agent, "input_guardrails"):
|
440
|
+
guardrails.extend([getattr(guardrail, "name", "") for guardrail in agent.input_guardrails])
|
441
|
+
if hasattr(agent, "output_guardrails"):
|
442
|
+
guardrails.extend([getattr(guardrail, "name", "") for guardrail in agent.output_guardrails])
|
443
|
+
return guardrails
|