ddtrace 3.11.0rc1__cp312-cp312-win32.whl → 3.11.0rc3__cp312-cp312-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddtrace/_logger.py +5 -6
- ddtrace/_trace/product.py +1 -1
- ddtrace/_trace/sampling_rule.py +25 -33
- ddtrace/_trace/trace_handlers.py +12 -50
- ddtrace/_trace/utils_botocore/span_tags.py +48 -0
- ddtrace/_version.py +2 -2
- ddtrace/appsec/_asm_request_context.py +3 -1
- ddtrace/appsec/_constants.py +7 -0
- ddtrace/appsec/_handlers.py +11 -0
- ddtrace/appsec/_iast/_listener.py +12 -2
- ddtrace/appsec/_processor.py +1 -1
- ddtrace/contrib/integration_registry/registry.yaml +10 -0
- ddtrace/contrib/internal/aiobotocore/patch.py +8 -0
- ddtrace/contrib/internal/avro/__init__.py +17 -0
- ddtrace/contrib/internal/azure_functions/patch.py +23 -12
- ddtrace/contrib/internal/azure_functions/utils.py +14 -0
- ddtrace/contrib/internal/boto/patch.py +14 -0
- ddtrace/contrib/internal/botocore/__init__.py +153 -0
- ddtrace/contrib/internal/botocore/services/bedrock.py +3 -27
- ddtrace/contrib/internal/django/patch.py +31 -8
- ddtrace/contrib/{_freezegun.py → internal/freezegun/__init__.py} +1 -1
- ddtrace/contrib/internal/google_genai/_utils.py +2 -2
- ddtrace/contrib/internal/google_genai/patch.py +7 -7
- ddtrace/contrib/internal/google_generativeai/patch.py +7 -5
- ddtrace/contrib/internal/langchain/patch.py +11 -443
- ddtrace/contrib/internal/langchain/utils.py +0 -26
- ddtrace/contrib/internal/logbook/patch.py +1 -2
- ddtrace/contrib/internal/logging/patch.py +4 -7
- ddtrace/contrib/internal/loguru/patch.py +1 -3
- ddtrace/contrib/internal/openai_agents/patch.py +44 -1
- ddtrace/contrib/internal/protobuf/__init__.py +17 -0
- ddtrace/contrib/internal/pytest/__init__.py +62 -0
- ddtrace/contrib/internal/pytest/_plugin_v2.py +13 -4
- ddtrace/contrib/internal/pytest_bdd/__init__.py +23 -0
- ddtrace/contrib/internal/pytest_benchmark/__init__.py +3 -0
- ddtrace/contrib/internal/structlog/patch.py +2 -4
- ddtrace/contrib/internal/unittest/__init__.py +36 -0
- ddtrace/contrib/internal/vertexai/patch.py +7 -5
- ddtrace/ext/ci.py +20 -0
- ddtrace/ext/git.py +66 -11
- ddtrace/internal/_encoding.cp312-win32.pyd +0 -0
- ddtrace/internal/_encoding.pyi +1 -1
- ddtrace/internal/_rand.cp312-win32.pyd +0 -0
- ddtrace/internal/_tagset.cp312-win32.pyd +0 -0
- ddtrace/internal/_threads.cp312-win32.pyd +0 -0
- ddtrace/internal/ci_visibility/encoder.py +126 -49
- ddtrace/internal/ci_visibility/utils.py +4 -4
- ddtrace/internal/core/__init__.py +5 -2
- ddtrace/internal/datadog/profiling/dd_wrapper-unknown-amd64.dll +0 -0
- ddtrace/internal/datadog/profiling/dd_wrapper-unknown-amd64.lib +0 -0
- ddtrace/internal/datadog/profiling/ddup/_ddup.cp312-win32.pyd +0 -0
- ddtrace/internal/datadog/profiling/ddup/_ddup.cp312-win32.pyd.lib +0 -0
- ddtrace/internal/datadog/profiling/ddup/dd_wrapper-unknown-amd64.dll +0 -0
- ddtrace/internal/datadog/profiling/ddup/dd_wrapper-unknown-amd64.lib +0 -0
- ddtrace/internal/endpoints.py +76 -0
- ddtrace/internal/native/_native.cp312-win32.pyd +0 -0
- ddtrace/internal/schema/processor.py +6 -2
- ddtrace/internal/telemetry/metrics_namespaces.cp312-win32.pyd +0 -0
- ddtrace/internal/telemetry/writer.py +18 -0
- ddtrace/internal/test_visibility/coverage_lines.py +4 -4
- ddtrace/internal/writer/writer.py +24 -11
- ddtrace/llmobs/_constants.py +3 -0
- ddtrace/llmobs/_experiment.py +75 -10
- ddtrace/llmobs/_integrations/bedrock.py +4 -0
- ddtrace/llmobs/_integrations/bedrock_agents.py +5 -1
- ddtrace/llmobs/_integrations/crewai.py +52 -3
- ddtrace/llmobs/_integrations/gemini.py +7 -7
- ddtrace/llmobs/_integrations/google_genai.py +10 -10
- ddtrace/llmobs/_integrations/{google_genai_utils.py → google_utils.py} +103 -7
- ddtrace/llmobs/_integrations/langchain.py +29 -20
- ddtrace/llmobs/_integrations/openai_agents.py +145 -0
- ddtrace/llmobs/_integrations/pydantic_ai.py +67 -26
- ddtrace/llmobs/_integrations/utils.py +68 -158
- ddtrace/llmobs/_integrations/vertexai.py +8 -8
- ddtrace/llmobs/_llmobs.py +83 -14
- ddtrace/llmobs/_telemetry.py +20 -5
- ddtrace/llmobs/_utils.py +27 -0
- ddtrace/profiling/_threading.cp312-win32.pyd +0 -0
- ddtrace/profiling/collector/_memalloc.cp312-win32.pyd +0 -0
- ddtrace/profiling/collector/_task.cp312-win32.pyd +0 -0
- ddtrace/profiling/collector/_traceback.cp312-win32.pyd +0 -0
- ddtrace/profiling/collector/stack.cp312-win32.pyd +0 -0
- ddtrace/settings/_config.py +1 -2
- ddtrace/settings/asm.py +9 -2
- ddtrace/settings/profiling.py +0 -9
- ddtrace/vendor/psutil/_psutil_windows.cp312-win32.pyd +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/METADATA +1 -1
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/RECORD +171 -177
- ddtrace/contrib/_avro.py +0 -17
- ddtrace/contrib/_botocore.py +0 -153
- ddtrace/contrib/_protobuf.py +0 -17
- ddtrace/contrib/_pytest.py +0 -62
- ddtrace/contrib/_pytest_bdd.py +0 -23
- ddtrace/contrib/_pytest_benchmark.py +0 -3
- ddtrace/contrib/_unittest.py +0 -36
- /ddtrace/contrib/{_aiobotocore.py → internal/aiobotocore/__init__.py} +0 -0
- /ddtrace/contrib/{_aiohttp_jinja2.py → internal/aiohttp_jinja2/__init__.py} +0 -0
- /ddtrace/contrib/{_aiomysql.py → internal/aiomysql/__init__.py} +0 -0
- /ddtrace/contrib/{_aiopg.py → internal/aiopg/__init__.py} +0 -0
- /ddtrace/contrib/{_aioredis.py → internal/aioredis/__init__.py} +0 -0
- /ddtrace/contrib/{_algoliasearch.py → internal/algoliasearch/__init__.py} +0 -0
- /ddtrace/contrib/{_anthropic.py → internal/anthropic/__init__.py} +0 -0
- /ddtrace/contrib/{_aredis.py → internal/aredis/__init__.py} +0 -0
- /ddtrace/contrib/{_asyncio.py → internal/asyncio/__init__.py} +0 -0
- /ddtrace/contrib/{_asyncpg.py → internal/asyncpg/__init__.py} +0 -0
- /ddtrace/contrib/{_aws_lambda.py → internal/aws_lambda/__init__.py} +0 -0
- /ddtrace/contrib/{_azure_functions.py → internal/azure_functions/__init__.py} +0 -0
- /ddtrace/contrib/{_azure_servicebus.py → internal/azure_servicebus/__init__.py} +0 -0
- /ddtrace/contrib/{_boto.py → internal/boto/__init__.py} +0 -0
- /ddtrace/contrib/{_cassandra.py → internal/cassandra/__init__.py} +0 -0
- /ddtrace/contrib/{_consul.py → internal/consul/__init__.py} +0 -0
- /ddtrace/contrib/{_coverage.py → internal/coverage/__init__.py} +0 -0
- /ddtrace/contrib/{_crewai.py → internal/crewai/__init__.py} +0 -0
- /ddtrace/contrib/{_django.py → internal/django/__init__.py} +0 -0
- /ddtrace/contrib/{_dogpile_cache.py → internal/dogpile_cache/__init__.py} +0 -0
- /ddtrace/contrib/{_dramatiq.py → internal/dramatiq/__init__.py} +0 -0
- /ddtrace/contrib/{_elasticsearch.py → internal/elasticsearch/__init__.py} +0 -0
- /ddtrace/contrib/{_fastapi.py → internal/fastapi/__init__.py} +0 -0
- /ddtrace/contrib/{_flask.py → internal/flask/__init__.py} +0 -0
- /ddtrace/contrib/{_futures.py → internal/futures/__init__.py} +0 -0
- /ddtrace/contrib/{_gevent.py → internal/gevent/__init__.py} +0 -0
- /ddtrace/contrib/{_google_genai.py → internal/google_genai/__init__.py} +0 -0
- /ddtrace/contrib/{_google_generativeai.py → internal/google_generativeai/__init__.py} +0 -0
- /ddtrace/contrib/{_graphql.py → internal/graphql/__init__.py} +0 -0
- /ddtrace/contrib/{_grpc.py → internal/grpc/__init__.py} +0 -0
- /ddtrace/contrib/{_gunicorn.py → internal/gunicorn/__init__.py} +0 -0
- /ddtrace/contrib/{_httplib.py → internal/httplib/__init__.py} +0 -0
- /ddtrace/contrib/{_httpx.py → internal/httpx/__init__.py} +0 -0
- /ddtrace/contrib/{_jinja2.py → internal/jinja2/__init__.py} +0 -0
- /ddtrace/contrib/{_kafka.py → internal/kafka/__init__.py} +0 -0
- /ddtrace/contrib/{_kombu.py → internal/kombu/__init__.py} +0 -0
- /ddtrace/contrib/{_langchain.py → internal/langchain/__init__.py} +0 -0
- /ddtrace/contrib/{_langgraph.py → internal/langgraph/__init__.py} +0 -0
- /ddtrace/contrib/{_litellm.py → internal/litellm/__init__.py} +0 -0
- /ddtrace/contrib/{_logbook.py → internal/logbook/__init__.py} +0 -0
- /ddtrace/contrib/{_logging.py → internal/logging/__init__.py} +0 -0
- /ddtrace/contrib/{_loguru.py → internal/loguru/__init__.py} +0 -0
- /ddtrace/contrib/{_mako.py → internal/mako/__init__.py} +0 -0
- /ddtrace/contrib/{_mariadb.py → internal/mariadb/__init__.py} +0 -0
- /ddtrace/contrib/{_mcp.py → internal/mcp/__init__.py} +0 -0
- /ddtrace/contrib/{_molten.py → internal/molten/__init__.py} +0 -0
- /ddtrace/contrib/{_mongoengine.py → internal/mongoengine/__init__.py} +0 -0
- /ddtrace/contrib/{_mysql.py → internal/mysql/__init__.py} +0 -0
- /ddtrace/contrib/{_mysqldb.py → internal/mysqldb/__init__.py} +0 -0
- /ddtrace/contrib/{_openai.py → internal/openai/__init__.py} +0 -0
- /ddtrace/contrib/{_openai_agents.py → internal/openai_agents/__init__.py} +0 -0
- /ddtrace/contrib/{_psycopg.py → internal/psycopg/__init__.py} +0 -0
- /ddtrace/contrib/{_pydantic_ai.py → internal/pydantic_ai/__init__.py} +0 -0
- /ddtrace/contrib/{_pymemcache.py → internal/pymemcache/__init__.py} +0 -0
- /ddtrace/contrib/{_pymongo.py → internal/pymongo/__init__.py} +0 -0
- /ddtrace/contrib/{_pymysql.py → internal/pymysql/__init__.py} +0 -0
- /ddtrace/contrib/{_pynamodb.py → internal/pynamodb/__init__.py} +0 -0
- /ddtrace/contrib/{_pyodbc.py → internal/pyodbc/__init__.py} +0 -0
- /ddtrace/contrib/{_redis.py → internal/redis/__init__.py} +0 -0
- /ddtrace/contrib/{_rediscluster.py → internal/rediscluster/__init__.py} +0 -0
- /ddtrace/contrib/{_rq.py → internal/rq/__init__.py} +0 -0
- /ddtrace/contrib/{_sanic.py → internal/sanic/__init__.py} +0 -0
- /ddtrace/contrib/{_selenium.py → internal/selenium/__init__.py} +0 -0
- /ddtrace/contrib/{_snowflake.py → internal/snowflake/__init__.py} +0 -0
- /ddtrace/contrib/{_sqlite3.py → internal/sqlite3/__init__.py} +0 -0
- /ddtrace/contrib/{_starlette.py → internal/starlette/__init__.py} +0 -0
- /ddtrace/contrib/{_structlog.py → internal/structlog/__init__.py} +0 -0
- /ddtrace/contrib/{_subprocess.py → internal/subprocess/__init__.py} +0 -0
- /ddtrace/contrib/{_urllib.py → internal/urllib/__init__.py} +0 -0
- /ddtrace/contrib/{_urllib3.py → internal/urllib3/__init__.py} +0 -0
- /ddtrace/contrib/{_vertexai.py → internal/vertexai/__init__.py} +0 -0
- /ddtrace/contrib/{_vertica.py → internal/vertica/__init__.py} +0 -0
- /ddtrace/contrib/{_webbrowser.py → internal/webbrowser/__init__.py} +0 -0
- /ddtrace/contrib/{_yaaredis.py → internal/yaaredis/__init__.py} +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/WHEEL +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/entry_points.txt +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE.Apache +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE.BSD3 +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/NOTICE +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,9 @@ import weakref
|
|
8
8
|
|
9
9
|
from ddtrace.internal import core
|
10
10
|
from ddtrace.internal.logger import get_logger
|
11
|
+
from ddtrace.internal.utils import get_argument_value
|
11
12
|
from ddtrace.internal.utils.formats import format_trace_id
|
13
|
+
from ddtrace.llmobs._constants import AGENT_MANIFEST
|
12
14
|
from ddtrace.llmobs._constants import DISPATCH_ON_LLM_TOOL_CHOICE
|
13
15
|
from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL
|
14
16
|
from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL_OUTPUT_USED
|
@@ -31,6 +33,7 @@ from ddtrace.llmobs._integrations.utils import OaiSpanAdapter
|
|
31
33
|
from ddtrace.llmobs._integrations.utils import OaiTraceAdapter
|
32
34
|
from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor
|
33
35
|
from ddtrace.llmobs._utils import _get_span_name
|
36
|
+
from ddtrace.llmobs._utils import load_data_value
|
34
37
|
from ddtrace.trace import Pin
|
35
38
|
from ddtrace.trace import Span
|
36
39
|
|
@@ -296,3 +299,145 @@ class OpenAIAgentsIntegration(BaseLLMIntegration):
|
|
296
299
|
def clear_state(self) -> None:
|
297
300
|
self.oai_to_llmobs_span.clear()
|
298
301
|
self.llmobs_traces.clear()
|
302
|
+
|
303
|
+
def tag_agent_manifest(self, span: Span, args: List[Any], kwargs: Dict[str, Any], agent_index: int) -> None:
|
304
|
+
agent = get_argument_value(args, kwargs, agent_index, "agent", True)
|
305
|
+
if not agent or not self.llmobs_enabled:
|
306
|
+
return
|
307
|
+
|
308
|
+
manifest = {}
|
309
|
+
manifest["framework"] = "OpenAI"
|
310
|
+
if hasattr(agent, "name"):
|
311
|
+
manifest["name"] = agent.name
|
312
|
+
if hasattr(agent, "instructions"):
|
313
|
+
manifest["instructions"] = agent.instructions
|
314
|
+
if hasattr(agent, "handoff_description"):
|
315
|
+
manifest["handoff_description"] = agent.handoff_description
|
316
|
+
if hasattr(agent, "model"):
|
317
|
+
model = agent.model
|
318
|
+
manifest["model"] = model if isinstance(model, str) else getattr(model, "model", "")
|
319
|
+
|
320
|
+
model_settings = self._extract_model_settings_from_agent(agent)
|
321
|
+
if model_settings:
|
322
|
+
manifest["model_settings"] = model_settings
|
323
|
+
|
324
|
+
tools = self._extract_tools_from_agent(agent)
|
325
|
+
if tools:
|
326
|
+
manifest["tools"] = tools
|
327
|
+
|
328
|
+
handoffs = self._extract_handoffs_from_agent(agent)
|
329
|
+
if handoffs:
|
330
|
+
manifest["handoffs"] = handoffs
|
331
|
+
|
332
|
+
guardrails = self._extract_guardrails_from_agent(agent)
|
333
|
+
if guardrails:
|
334
|
+
manifest["guardrails"] = guardrails
|
335
|
+
|
336
|
+
span._set_ctx_item(AGENT_MANIFEST, manifest)
|
337
|
+
|
338
|
+
def _extract_model_settings_from_agent(self, agent):
|
339
|
+
if not hasattr(agent, "model_settings"):
|
340
|
+
return None
|
341
|
+
|
342
|
+
# convert model_settings to dict if it's not already
|
343
|
+
model_settings = agent.model_settings
|
344
|
+
if type(model_settings) != dict:
|
345
|
+
model_settings = getattr(model_settings, "__dict__", None)
|
346
|
+
|
347
|
+
return load_data_value(model_settings)
|
348
|
+
|
349
|
+
def _extract_tools_from_agent(self, agent):
|
350
|
+
if not hasattr(agent, "tools") or not agent.tools:
|
351
|
+
return None
|
352
|
+
|
353
|
+
tools = []
|
354
|
+
for tool in agent.tools:
|
355
|
+
tool_dict = {}
|
356
|
+
tool_name = getattr(tool, "name", None)
|
357
|
+
if tool_name:
|
358
|
+
tool_dict["name"] = tool_name
|
359
|
+
if tool_name == "web_search_preview":
|
360
|
+
if hasattr(tool, "user_location"):
|
361
|
+
tool_dict["user_location"] = tool.user_location
|
362
|
+
if hasattr(tool, "search_context_size"):
|
363
|
+
tool_dict["search_context_size"] = tool.search_context_size
|
364
|
+
elif tool_name == "file_search":
|
365
|
+
if hasattr(tool, "vector_store_ids"):
|
366
|
+
tool_dict["vector_store_ids"] = tool.vector_store_ids
|
367
|
+
if hasattr(tool, "max_num_results"):
|
368
|
+
tool_dict["max_num_results"] = tool.max_num_results
|
369
|
+
if hasattr(tool, "include_search_results"):
|
370
|
+
tool_dict["include_search_results"] = tool.include_search_results
|
371
|
+
if hasattr(tool, "ranking_options"):
|
372
|
+
tool_dict["ranking_options"] = tool.ranking_options
|
373
|
+
if hasattr(tool, "filters"):
|
374
|
+
tool_dict["filters"] = tool.filters
|
375
|
+
elif tool_name == "computer_use_preview":
|
376
|
+
if hasattr(tool, "computer"):
|
377
|
+
tool_dict["computer"] = tool.computer
|
378
|
+
if hasattr(tool, "on_safety_check"):
|
379
|
+
tool_dict["on_safety_check"] = tool.on_safety_check
|
380
|
+
elif tool_name == "code_interpreter":
|
381
|
+
if hasattr(tool, "tool_config"):
|
382
|
+
tool_dict["tool_config"] = tool.tool_config
|
383
|
+
elif tool_name == "hosted_mcp":
|
384
|
+
if hasattr(tool, "tool_config"):
|
385
|
+
tool_dict["tool_config"] = tool.tool_config
|
386
|
+
if hasattr(tool, "on_approval_request"):
|
387
|
+
tool_dict["on_approval_request"] = tool.on_approval_request
|
388
|
+
elif tool_name == "image_generation":
|
389
|
+
if hasattr(tool, "tool_config"):
|
390
|
+
tool_dict["tool_config"] = tool.tool_config
|
391
|
+
elif tool_name == "local_shell":
|
392
|
+
if hasattr(tool, "executor"):
|
393
|
+
tool_dict["executor"] = tool.executor
|
394
|
+
else:
|
395
|
+
if hasattr(tool, "description"):
|
396
|
+
tool_dict["description"] = tool.description
|
397
|
+
if hasattr(tool, "strict_json_schema"):
|
398
|
+
tool_dict["strict_json_schema"] = tool.strict_json_schema
|
399
|
+
if hasattr(tool, "params_json_schema"):
|
400
|
+
parameter_schema = tool.params_json_schema
|
401
|
+
required_params = {param: True for param in parameter_schema.get("required", [])}
|
402
|
+
parameters = {}
|
403
|
+
for param, schema in parameter_schema.get("properties", {}).items():
|
404
|
+
param_dict = {}
|
405
|
+
if "type" in schema:
|
406
|
+
param_dict["type"] = schema["type"]
|
407
|
+
if "title" in schema:
|
408
|
+
param_dict["title"] = schema["title"]
|
409
|
+
if param in required_params:
|
410
|
+
param_dict["required"] = True
|
411
|
+
parameters[param] = param_dict
|
412
|
+
tool_dict["parameters"] = parameters
|
413
|
+
tools.append(tool_dict)
|
414
|
+
|
415
|
+
return tools
|
416
|
+
|
417
|
+
def _extract_handoffs_from_agent(self, agent):
|
418
|
+
if not hasattr(agent, "handoffs") or not agent.handoffs:
|
419
|
+
return None
|
420
|
+
|
421
|
+
handoffs = []
|
422
|
+
for handoff in agent.handoffs:
|
423
|
+
handoff_dict = {}
|
424
|
+
if hasattr(handoff, "handoff_description") or hasattr(handoff, "tool_description"):
|
425
|
+
handoff_dict["handoff_description"] = getattr(handoff, "handoff_description", None) or getattr(
|
426
|
+
handoff, "tool_description", None
|
427
|
+
)
|
428
|
+
if hasattr(handoff, "name") or hasattr(handoff, "agent_name"):
|
429
|
+
handoff_dict["agent_name"] = getattr(handoff, "name", None) or getattr(handoff, "agent_name", None)
|
430
|
+
if hasattr(handoff, "tool_name"):
|
431
|
+
handoff_dict["tool_name"] = handoff.tool_name
|
432
|
+
if handoff_dict:
|
433
|
+
handoffs.append(handoff_dict)
|
434
|
+
|
435
|
+
return handoffs
|
436
|
+
|
437
|
+
def _extract_guardrails_from_agent(self, agent):
|
438
|
+
guardrails = []
|
439
|
+
if hasattr(agent, "input_guardrails"):
|
440
|
+
guardrails.extend([getattr(guardrail, "name", "") for guardrail in agent.input_guardrails])
|
441
|
+
if hasattr(agent, "output_guardrails"):
|
442
|
+
guardrails.extend([getattr(guardrail, "name", "") for guardrail in agent.output_guardrails])
|
443
|
+
return guardrails
|
@@ -2,9 +2,11 @@ from typing import Any
|
|
2
2
|
from typing import Dict
|
3
3
|
from typing import List
|
4
4
|
from typing import Optional
|
5
|
+
from typing import Tuple
|
5
6
|
|
6
7
|
from ddtrace.internal.utils import get_argument_value
|
7
8
|
from ddtrace.internal.utils.formats import format_trace_id
|
9
|
+
from ddtrace.llmobs._constants import AGENT_MANIFEST
|
8
10
|
from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY
|
9
11
|
from ddtrace.llmobs._constants import INPUT_VALUE
|
10
12
|
from ddtrace.llmobs._constants import METADATA
|
@@ -45,13 +47,19 @@ class PydanticAIIntegration(BaseLLMIntegration):
|
|
45
47
|
span._set_ctx_item(SPAN_KIND, kind)
|
46
48
|
return span
|
47
49
|
|
48
|
-
def _set_base_span_tags(self, span: Span, model: Optional[
|
50
|
+
def _set_base_span_tags(self, span: Span, model: Optional[Any] = None, **kwargs) -> None:
|
49
51
|
if model:
|
50
|
-
|
51
|
-
|
52
|
-
if
|
53
|
-
|
54
|
-
|
52
|
+
model_name, provider = self._get_model_and_provider(model)
|
53
|
+
span.set_tag("pydantic_ai.request.model", model_name)
|
54
|
+
if provider:
|
55
|
+
span.set_tag("pydantic_ai.request.provider", provider)
|
56
|
+
|
57
|
+
def _get_model_and_provider(self, model: Optional[Any]) -> Tuple[str, str]:
|
58
|
+
model_name = getattr(model, "model_name", "")
|
59
|
+
system = getattr(model, "system", None)
|
60
|
+
if system:
|
61
|
+
system = PYDANTIC_AI_SYSTEM_TO_PROVIDER.get(system, system)
|
62
|
+
return model_name, system
|
55
63
|
|
56
64
|
def _llmobs_set_tags(
|
57
65
|
self,
|
@@ -84,26 +92,8 @@ class PydanticAIIntegration(BaseLLMIntegration):
|
|
84
92
|
from pydantic_ai.agent import AgentRun
|
85
93
|
|
86
94
|
agent_instance = kwargs.get("instance", None)
|
87
|
-
|
88
|
-
|
89
|
-
agent_instructions = getattr(agent_instance, "_instructions", None)
|
90
|
-
agent_system_prompts = getattr(agent_instance, "_system_prompts", None)
|
91
|
-
agent_tools = list(getattr(agent_instance, "_function_tools", {}).keys())
|
92
|
-
agent_model_settings = getattr(agent_instance, "model_settings", None)
|
93
|
-
metadata = {
|
94
|
-
"instructions": agent_instructions,
|
95
|
-
"system_prompts": agent_system_prompts,
|
96
|
-
"tools": agent_tools,
|
97
|
-
}
|
98
|
-
if agent_model_settings:
|
99
|
-
metadata["max_tokens"] = agent_model_settings.get("max_tokens", None)
|
100
|
-
metadata["temperature"] = agent_model_settings.get("temperature", None)
|
101
|
-
span._set_ctx_items(
|
102
|
-
{
|
103
|
-
NAME: agent_name or "PydanticAI Agent",
|
104
|
-
METADATA: metadata,
|
105
|
-
}
|
106
|
-
)
|
95
|
+
agent_name = getattr(agent_instance, "name", None)
|
96
|
+
self._tag_agent_manifest(span, kwargs, agent_instance)
|
107
97
|
user_prompt = get_argument_value(args, kwargs, 0, "user_prompt")
|
108
98
|
result = response
|
109
99
|
if isinstance(result, AgentRun) and hasattr(result, "result"):
|
@@ -119,6 +109,7 @@ class PydanticAIIntegration(BaseLLMIntegration):
|
|
119
109
|
metrics = self.extract_usage_metrics(response, kwargs)
|
120
110
|
span._set_ctx_items(
|
121
111
|
{
|
112
|
+
NAME: agent_name or "PydanticAI Agent",
|
122
113
|
INPUT_VALUE: user_prompt,
|
123
114
|
OUTPUT_VALUE: result,
|
124
115
|
METRICS: metrics,
|
@@ -145,6 +136,56 @@ class PydanticAIIntegration(BaseLLMIntegration):
|
|
145
136
|
if not span.error:
|
146
137
|
span._set_ctx_item(OUTPUT_VALUE, getattr(response, "content", ""))
|
147
138
|
|
139
|
+
def _tag_agent_manifest(self, span: Span, kwargs: Dict[str, Any], agent: Any) -> None:
|
140
|
+
if not agent:
|
141
|
+
return
|
142
|
+
|
143
|
+
manifest: Dict[str, Any] = {}
|
144
|
+
manifest["framework"] = "PydanticAI"
|
145
|
+
manifest["name"] = agent.name if hasattr(agent, "name") and agent.name else "PydanticAI Agent"
|
146
|
+
model = getattr(agent, "model", None)
|
147
|
+
if model:
|
148
|
+
model_name, _ = self._get_model_and_provider(model)
|
149
|
+
if model_name:
|
150
|
+
manifest["model"] = model_name
|
151
|
+
if hasattr(agent, "model_settings"):
|
152
|
+
manifest["model_settings"] = agent.model_settings
|
153
|
+
if hasattr(agent, "_instructions"):
|
154
|
+
manifest["instructions"] = agent._instructions
|
155
|
+
if hasattr(agent, "_system_prompts"):
|
156
|
+
manifest["system_prompts"] = agent._system_prompts
|
157
|
+
if hasattr(agent, "_function_tools"):
|
158
|
+
manifest["tools"] = self._get_agent_tools(agent._function_tools)
|
159
|
+
if kwargs.get("deps", None):
|
160
|
+
agent_dependencies = kwargs.get("deps", None)
|
161
|
+
manifest["dependencies"] = getattr(agent_dependencies, "__dict__", agent_dependencies)
|
162
|
+
|
163
|
+
span._set_ctx_item(AGENT_MANIFEST, manifest)
|
164
|
+
|
165
|
+
def _get_agent_tools(self, tools: Any) -> List[Dict[str, Any]]:
|
166
|
+
if not tools:
|
167
|
+
return []
|
168
|
+
formatted_tools = []
|
169
|
+
for tool_name, tool_instance in tools.items():
|
170
|
+
tool_dict = {}
|
171
|
+
tool_dict["name"] = tool_name
|
172
|
+
if hasattr(tool_instance, "description"):
|
173
|
+
tool_dict["description"] = tool_instance.description
|
174
|
+
function_schema = getattr(tool_instance, "function_schema", {})
|
175
|
+
json_schema = getattr(function_schema, "json_schema", {})
|
176
|
+
required_params = {param: True for param in json_schema.get("required", [])}
|
177
|
+
parameters = {}
|
178
|
+
for param, schema in json_schema.get("properties", {}).items():
|
179
|
+
param_dict = {}
|
180
|
+
if "type" in schema:
|
181
|
+
param_dict["type"] = schema["type"]
|
182
|
+
if param in required_params:
|
183
|
+
param_dict["required"] = True
|
184
|
+
parameters[param] = param_dict
|
185
|
+
tool_dict["parameters"] = parameters
|
186
|
+
formatted_tools.append(tool_dict)
|
187
|
+
return formatted_tools
|
188
|
+
|
148
189
|
def extract_usage_metrics(self, response: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
149
190
|
response = kwargs.get("streamed_run_result", None) or response
|
150
191
|
usage = None
|
@@ -1,6 +1,4 @@
|
|
1
|
-
from dataclasses import asdict
|
2
1
|
from dataclasses import dataclass
|
3
|
-
from dataclasses import is_dataclass
|
4
2
|
import json
|
5
3
|
import re
|
6
4
|
from typing import Any
|
@@ -19,7 +17,6 @@ from ddtrace.llmobs._constants import DISPATCH_ON_TOOL_CALL_OUTPUT_USED
|
|
19
17
|
from ddtrace.llmobs._constants import INPUT_MESSAGES
|
20
18
|
from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY
|
21
19
|
from ddtrace.llmobs._constants import INPUT_VALUE
|
22
|
-
from ddtrace.llmobs._constants import LITELLM_ROUTER_INSTANCE_KEY
|
23
20
|
from ddtrace.llmobs._constants import METADATA
|
24
21
|
from ddtrace.llmobs._constants import OAI_HANDOFF_TOOL_ARG
|
25
22
|
from ddtrace.llmobs._constants import OUTPUT_MESSAGES
|
@@ -27,6 +24,7 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
|
|
27
24
|
from ddtrace.llmobs._constants import OUTPUT_VALUE
|
28
25
|
from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
29
26
|
from ddtrace.llmobs._utils import _get_attr
|
27
|
+
from ddtrace.llmobs._utils import load_data_value
|
30
28
|
from ddtrace.llmobs._utils import safe_json
|
31
29
|
|
32
30
|
|
@@ -39,31 +37,71 @@ except ModuleNotFoundError:
|
|
39
37
|
|
40
38
|
logger = get_logger(__name__)
|
41
39
|
|
42
|
-
|
43
|
-
"
|
44
|
-
"
|
45
|
-
"
|
46
|
-
"
|
47
|
-
"user_api_key_hash",
|
48
|
-
LITELLM_ROUTER_INSTANCE_KEY,
|
40
|
+
COMMON_METADATA_KEYS = (
|
41
|
+
"stream",
|
42
|
+
"temperature",
|
43
|
+
"top_p",
|
44
|
+
"user",
|
49
45
|
)
|
50
|
-
|
51
|
-
"
|
52
|
-
"
|
46
|
+
OPENAI_METADATA_RESPONSE_KEYS = (
|
47
|
+
"background",
|
48
|
+
"include",
|
49
|
+
"max_output_tokens",
|
50
|
+
"max_tool_calls",
|
51
|
+
"parallel_tool_calls",
|
52
|
+
"previous_response_id",
|
53
|
+
"prompt",
|
54
|
+
"reasoning",
|
55
|
+
"service_tier",
|
56
|
+
"store",
|
57
|
+
"text",
|
58
|
+
"tool_choice",
|
53
59
|
"tools",
|
54
|
-
"
|
55
|
-
"
|
56
|
-
|
57
|
-
|
58
|
-
|
60
|
+
"top_logprobs",
|
61
|
+
"truncation",
|
62
|
+
)
|
63
|
+
OPENAI_METADATA_CHAT_KEYS = (
|
64
|
+
"audio",
|
65
|
+
"frequency_penalty",
|
66
|
+
"function_call",
|
67
|
+
"logit_bias",
|
68
|
+
"logprobs",
|
69
|
+
"max_completion_tokens",
|
70
|
+
"max_tokens",
|
71
|
+
"modalities",
|
72
|
+
"n",
|
73
|
+
"parallel_tool_calls",
|
74
|
+
"prediction",
|
75
|
+
"presence_penalty",
|
76
|
+
"reasoning_effort",
|
77
|
+
"response_format",
|
78
|
+
"seed",
|
79
|
+
"service_tier",
|
80
|
+
"stop",
|
81
|
+
"store",
|
82
|
+
"stream_options",
|
83
|
+
"tool_choice",
|
84
|
+
"top_logprobs",
|
85
|
+
"web_search_options",
|
86
|
+
)
|
87
|
+
OPENAI_METADATA_COMPLETION_KEYS = (
|
88
|
+
"best_of",
|
89
|
+
"echo",
|
90
|
+
"frequency_penalty",
|
91
|
+
"logit_bias",
|
92
|
+
"logprobs",
|
93
|
+
"max_tokens",
|
94
|
+
"n",
|
95
|
+
"presence_penalty",
|
96
|
+
"seed",
|
97
|
+
"stop",
|
98
|
+
"stream_options",
|
99
|
+
"suffix",
|
59
100
|
)
|
60
101
|
|
61
102
|
LITELLM_METADATA_CHAT_KEYS = (
|
62
103
|
"timeout",
|
63
|
-
"temperature",
|
64
|
-
"top_p",
|
65
104
|
"n",
|
66
|
-
"stream",
|
67
105
|
"stream_options",
|
68
106
|
"stop",
|
69
107
|
"max_completion_tokens",
|
@@ -73,7 +111,6 @@ LITELLM_METADATA_CHAT_KEYS = (
|
|
73
111
|
"presence_penalty",
|
74
112
|
"frequency_penalty",
|
75
113
|
"logit_bias",
|
76
|
-
"user",
|
77
114
|
"response_format",
|
78
115
|
"seed",
|
79
116
|
"tool_choice",
|
@@ -97,12 +134,8 @@ LITELLM_METADATA_COMPLETION_KEYS = (
|
|
97
134
|
"n",
|
98
135
|
"presence_penalty",
|
99
136
|
"stop",
|
100
|
-
"stream",
|
101
137
|
"stream_options",
|
102
138
|
"suffix",
|
103
|
-
"temperature",
|
104
|
-
"top_p",
|
105
|
-
"user",
|
106
139
|
"api_base",
|
107
140
|
"api_version",
|
108
141
|
"model_list",
|
@@ -110,67 +143,6 @@ LITELLM_METADATA_COMPLETION_KEYS = (
|
|
110
143
|
)
|
111
144
|
|
112
145
|
|
113
|
-
def extract_model_name_google(instance, model_name_attr):
|
114
|
-
"""Extract the model name from the instance.
|
115
|
-
Model names are stored in the format `"models/{model_name}"`
|
116
|
-
so we do our best to return the model name instead of the full string.
|
117
|
-
"""
|
118
|
-
model_name = _get_attr(instance, model_name_attr, "")
|
119
|
-
if not model_name or not isinstance(model_name, str):
|
120
|
-
return ""
|
121
|
-
if "/" in model_name:
|
122
|
-
return model_name.split("/")[-1]
|
123
|
-
return model_name
|
124
|
-
|
125
|
-
|
126
|
-
def get_generation_config_google(instance, kwargs):
|
127
|
-
"""
|
128
|
-
The generation config can be defined on the model instance or
|
129
|
-
as a kwarg of the request. Therefore, try to extract this information
|
130
|
-
from the kwargs and otherwise default to checking the model instance attribute.
|
131
|
-
"""
|
132
|
-
generation_config = kwargs.get("generation_config", {})
|
133
|
-
return generation_config or _get_attr(instance, "_generation_config", {})
|
134
|
-
|
135
|
-
|
136
|
-
def llmobs_get_metadata_google(kwargs, instance):
|
137
|
-
metadata = {}
|
138
|
-
model_config = getattr(instance, "_generation_config", {}) or {}
|
139
|
-
model_config = model_config.to_dict() if hasattr(model_config, "to_dict") else model_config
|
140
|
-
request_config = kwargs.get("generation_config", {}) or {}
|
141
|
-
request_config = request_config.to_dict() if hasattr(request_config, "to_dict") else request_config
|
142
|
-
|
143
|
-
parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k")
|
144
|
-
for param in parameters:
|
145
|
-
model_config_value = _get_attr(model_config, param, None)
|
146
|
-
request_config_value = _get_attr(request_config, param, None)
|
147
|
-
if model_config_value or request_config_value:
|
148
|
-
metadata[param] = request_config_value or model_config_value
|
149
|
-
return metadata
|
150
|
-
|
151
|
-
|
152
|
-
def extract_message_from_part_google(part, role=None):
|
153
|
-
text = _get_attr(part, "text", "")
|
154
|
-
function_call = _get_attr(part, "function_call", None)
|
155
|
-
function_response = _get_attr(part, "function_response", None)
|
156
|
-
message = {"content": text}
|
157
|
-
if role:
|
158
|
-
message["role"] = role
|
159
|
-
if function_call:
|
160
|
-
function_call_dict = function_call
|
161
|
-
if not isinstance(function_call, dict):
|
162
|
-
function_call_dict = type(function_call).to_dict(function_call)
|
163
|
-
message["tool_calls"] = [
|
164
|
-
{"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})}
|
165
|
-
]
|
166
|
-
if function_response:
|
167
|
-
function_response_dict = function_response
|
168
|
-
if not isinstance(function_response, dict):
|
169
|
-
function_response_dict = type(function_response).to_dict(function_response)
|
170
|
-
message["content"] = "[tool result: {}]".format(function_response_dict.get("response", ""))
|
171
|
-
return message
|
172
|
-
|
173
|
-
|
174
146
|
def get_llmobs_metrics_tags(integration_name, span):
|
175
147
|
usage = {}
|
176
148
|
|
@@ -209,41 +181,6 @@ def parse_llmobs_metric_args(metrics):
|
|
209
181
|
return usage
|
210
182
|
|
211
183
|
|
212
|
-
def get_system_instructions_from_google_model(model_instance):
|
213
|
-
"""
|
214
|
-
Extract system instructions from model and convert to []str for tagging.
|
215
|
-
"""
|
216
|
-
try:
|
217
|
-
from google.ai.generativelanguage_v1beta.types.content import Content
|
218
|
-
except ImportError:
|
219
|
-
Content = None
|
220
|
-
try:
|
221
|
-
from vertexai.generative_models._generative_models import Part
|
222
|
-
except ImportError:
|
223
|
-
Part = None
|
224
|
-
|
225
|
-
raw_system_instructions = getattr(model_instance, "_system_instruction", [])
|
226
|
-
if Content is not None and isinstance(raw_system_instructions, Content):
|
227
|
-
system_instructions = []
|
228
|
-
for part in raw_system_instructions.parts:
|
229
|
-
system_instructions.append(_get_attr(part, "text", ""))
|
230
|
-
return system_instructions
|
231
|
-
elif isinstance(raw_system_instructions, str):
|
232
|
-
return [raw_system_instructions]
|
233
|
-
elif Part is not None and isinstance(raw_system_instructions, Part):
|
234
|
-
return [_get_attr(raw_system_instructions, "text", "")]
|
235
|
-
elif not isinstance(raw_system_instructions, list):
|
236
|
-
return []
|
237
|
-
|
238
|
-
system_instructions = []
|
239
|
-
for elem in raw_system_instructions:
|
240
|
-
if isinstance(elem, str):
|
241
|
-
system_instructions.append(elem)
|
242
|
-
elif Part is not None and isinstance(elem, Part):
|
243
|
-
system_instructions.append(_get_attr(elem, "text", ""))
|
244
|
-
return system_instructions
|
245
|
-
|
246
|
-
|
247
184
|
LANGCHAIN_ROLE_MAPPING = {
|
248
185
|
"human": "user",
|
249
186
|
"ai": "assistant",
|
@@ -471,12 +408,12 @@ def get_metadata_from_kwargs(
|
|
471
408
|
kwargs: Dict[str, Any], integration_name: str = "openai", operation: str = "chat"
|
472
409
|
) -> Dict[str, Any]:
|
473
410
|
metadata = {}
|
411
|
+
keys_to_include: Tuple[str, ...] = COMMON_METADATA_KEYS
|
474
412
|
if integration_name == "openai":
|
475
|
-
|
476
|
-
metadata = {k: v for k, v in kwargs.items() if k not in keys_to_skip}
|
413
|
+
keys_to_include += OPENAI_METADATA_CHAT_KEYS if operation == "chat" else OPENAI_METADATA_COMPLETION_KEYS
|
477
414
|
elif integration_name == "litellm":
|
478
|
-
keys_to_include
|
479
|
-
|
415
|
+
keys_to_include += LITELLM_METADATA_CHAT_KEYS if operation == "chat" else LITELLM_METADATA_COMPLETION_KEYS
|
416
|
+
metadata = {k: v for k, v in kwargs.items() if k in keys_to_include}
|
480
417
|
return metadata
|
481
418
|
|
482
419
|
|
@@ -621,7 +558,7 @@ def openai_get_metadata_from_response(
|
|
621
558
|
metadata = {}
|
622
559
|
|
623
560
|
if kwargs:
|
624
|
-
metadata.update({k: v for k, v in kwargs.items() if k
|
561
|
+
metadata.update({k: v for k, v in kwargs.items() if k in OPENAI_METADATA_RESPONSE_KEYS + COMMON_METADATA_KEYS})
|
625
562
|
|
626
563
|
if not response:
|
627
564
|
return metadata
|
@@ -630,7 +567,7 @@ def openai_get_metadata_from_response(
|
|
630
567
|
for field in ["temperature", "max_output_tokens", "top_p", "tools", "tool_choice", "truncation", "text", "user"]:
|
631
568
|
value = getattr(response, field, None)
|
632
569
|
if value is not None:
|
633
|
-
metadata[field] =
|
570
|
+
metadata[field] = load_data_value(value)
|
634
571
|
|
635
572
|
usage = getattr(response, "usage", None)
|
636
573
|
output_tokens_details = getattr(usage, "output_tokens_details", None)
|
@@ -863,7 +800,7 @@ class OaiSpanAdapter:
|
|
863
800
|
data = self.data
|
864
801
|
if not data:
|
865
802
|
return {}
|
866
|
-
return
|
803
|
+
return load_data_value(data)
|
867
804
|
|
868
805
|
@property
|
869
806
|
def response_output_text(self) -> str:
|
@@ -922,25 +859,14 @@ class OaiSpanAdapter:
|
|
922
859
|
if hasattr(self.response, field):
|
923
860
|
value = getattr(self.response, field)
|
924
861
|
if value is not None:
|
925
|
-
metadata[field] =
|
862
|
+
metadata[field] = load_data_value(value)
|
926
863
|
|
927
864
|
if hasattr(self.response, "text") and self.response.text:
|
928
|
-
metadata["text"] =
|
865
|
+
metadata["text"] = load_data_value(self.response.text)
|
929
866
|
|
930
867
|
if hasattr(self.response, "usage") and hasattr(self.response.usage, "output_tokens_details"):
|
931
868
|
metadata["reasoning_tokens"] = self.response.usage.output_tokens_details.reasoning_tokens
|
932
869
|
|
933
|
-
if self.span_type == "agent":
|
934
|
-
agent_metadata: Dict[str, List[str]] = {
|
935
|
-
"handoffs": [],
|
936
|
-
"tools": [],
|
937
|
-
}
|
938
|
-
if self.handoffs:
|
939
|
-
agent_metadata["handoffs"] = load_oai_span_data_value(self.handoffs)
|
940
|
-
if self.tools:
|
941
|
-
agent_metadata["tools"] = load_oai_span_data_value(self.tools)
|
942
|
-
metadata.update(agent_metadata)
|
943
|
-
|
944
870
|
if self.span_type == "custom" and hasattr(self._raw_oai_span.span_data, "data"):
|
945
871
|
custom_data = getattr(self._raw_oai_span.span_data, "data", None)
|
946
872
|
if custom_data:
|
@@ -1153,22 +1079,6 @@ class OaiTraceAdapter:
|
|
1153
1079
|
return self._trace
|
1154
1080
|
|
1155
1081
|
|
1156
|
-
def load_oai_span_data_value(value):
|
1157
|
-
"""Helper function to load values stored in openai span data in a consistent way"""
|
1158
|
-
if isinstance(value, list):
|
1159
|
-
return [load_oai_span_data_value(item) for item in value]
|
1160
|
-
elif hasattr(value, "model_dump"):
|
1161
|
-
return value.model_dump()
|
1162
|
-
elif is_dataclass(value):
|
1163
|
-
return asdict(value)
|
1164
|
-
else:
|
1165
|
-
value_str = safe_json(value)
|
1166
|
-
try:
|
1167
|
-
return json.loads(value_str)
|
1168
|
-
except json.JSONDecodeError:
|
1169
|
-
return value_str
|
1170
|
-
|
1171
|
-
|
1172
1082
|
@dataclass
|
1173
1083
|
class LLMObsTraceInfo:
|
1174
1084
|
"""Metadata for llmobs trace used for setting root span attributes and span links"""
|
@@ -17,9 +17,9 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
|
|
17
17
|
from ddtrace.llmobs._constants import SPAN_KIND
|
18
18
|
from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
19
19
|
from ddtrace.llmobs._integrations.base import BaseLLMIntegration
|
20
|
-
from ddtrace.llmobs._integrations.
|
21
|
-
from ddtrace.llmobs._integrations.
|
22
|
-
from ddtrace.llmobs._integrations.
|
20
|
+
from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_gemini_vertexai
|
21
|
+
from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai
|
22
|
+
from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai
|
23
23
|
from ddtrace.llmobs._utils import _get_attr
|
24
24
|
from ddtrace.trace import Span
|
25
25
|
|
@@ -46,9 +46,9 @@ class VertexAIIntegration(BaseLLMIntegration):
|
|
46
46
|
instance = kwargs.get("instance", None)
|
47
47
|
history = kwargs.get("history", [])
|
48
48
|
metrics = kwargs.get("metrics", {})
|
49
|
-
metadata =
|
49
|
+
metadata = llmobs_get_metadata_gemini_vertexai(kwargs, instance)
|
50
50
|
|
51
|
-
system_instruction =
|
51
|
+
system_instruction = get_system_instructions_gemini_vertexai(instance)
|
52
52
|
input_contents = None
|
53
53
|
try:
|
54
54
|
input_contents = get_argument_value(args, kwargs, 0, "content")
|
@@ -117,7 +117,7 @@ class VertexAIIntegration(BaseLLMIntegration):
|
|
117
117
|
messages.append({"content": contents})
|
118
118
|
return messages
|
119
119
|
if isinstance(contents, Part):
|
120
|
-
message =
|
120
|
+
message = extract_message_from_part_gemini_vertexai(contents)
|
121
121
|
messages.append(message)
|
122
122
|
return messages
|
123
123
|
if not isinstance(contents, list):
|
@@ -128,7 +128,7 @@ class VertexAIIntegration(BaseLLMIntegration):
|
|
128
128
|
messages.append({"content": content})
|
129
129
|
continue
|
130
130
|
if isinstance(content, Part):
|
131
|
-
message =
|
131
|
+
message = extract_message_from_part_gemini_vertexai(content)
|
132
132
|
messages.append(message)
|
133
133
|
continue
|
134
134
|
messages.extend(self._extract_messages_from_content(content))
|
@@ -170,6 +170,6 @@ class VertexAIIntegration(BaseLLMIntegration):
|
|
170
170
|
messages.append(message)
|
171
171
|
return messages
|
172
172
|
for part in parts:
|
173
|
-
message =
|
173
|
+
message = extract_message_from_part_gemini_vertexai(part, role)
|
174
174
|
messages.append(message)
|
175
175
|
return messages
|