ddtrace 3.11.0rc1__cp312-cp312-musllinux_1_2_aarch64.whl → 3.11.0rc3__cp312-cp312-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ddtrace might be problematic. Click here for more details.
- ddtrace/_logger.py +5 -6
- ddtrace/_trace/product.py +1 -1
- ddtrace/_trace/sampling_rule.py +25 -33
- ddtrace/_trace/trace_handlers.py +12 -50
- ddtrace/_trace/utils_botocore/span_tags.py +48 -0
- ddtrace/_version.py +2 -2
- ddtrace/appsec/_asm_request_context.py +3 -1
- ddtrace/appsec/_constants.py +7 -0
- ddtrace/appsec/_handlers.py +11 -0
- ddtrace/appsec/_iast/_listener.py +12 -2
- ddtrace/appsec/_processor.py +1 -1
- ddtrace/contrib/integration_registry/registry.yaml +10 -0
- ddtrace/contrib/internal/aiobotocore/patch.py +8 -0
- ddtrace/contrib/internal/avro/__init__.py +17 -0
- ddtrace/contrib/internal/azure_functions/patch.py +23 -12
- ddtrace/contrib/internal/azure_functions/utils.py +14 -0
- ddtrace/contrib/internal/boto/patch.py +14 -0
- ddtrace/contrib/internal/botocore/__init__.py +153 -0
- ddtrace/contrib/internal/botocore/services/bedrock.py +3 -27
- ddtrace/contrib/internal/django/patch.py +31 -8
- ddtrace/contrib/{_freezegun.py → internal/freezegun/__init__.py} +1 -1
- ddtrace/contrib/internal/google_genai/_utils.py +2 -2
- ddtrace/contrib/internal/google_genai/patch.py +7 -7
- ddtrace/contrib/internal/google_generativeai/patch.py +7 -5
- ddtrace/contrib/internal/langchain/patch.py +11 -443
- ddtrace/contrib/internal/langchain/utils.py +0 -26
- ddtrace/contrib/internal/logbook/patch.py +1 -2
- ddtrace/contrib/internal/logging/patch.py +4 -7
- ddtrace/contrib/internal/loguru/patch.py +1 -3
- ddtrace/contrib/internal/openai_agents/patch.py +44 -1
- ddtrace/contrib/internal/protobuf/__init__.py +17 -0
- ddtrace/contrib/internal/pytest/__init__.py +62 -0
- ddtrace/contrib/internal/pytest/_plugin_v2.py +13 -4
- ddtrace/contrib/internal/pytest_bdd/__init__.py +23 -0
- ddtrace/contrib/internal/pytest_benchmark/__init__.py +3 -0
- ddtrace/contrib/internal/structlog/patch.py +2 -4
- ddtrace/contrib/internal/unittest/__init__.py +36 -0
- ddtrace/contrib/internal/vertexai/patch.py +7 -5
- ddtrace/ext/ci.py +20 -0
- ddtrace/ext/git.py +66 -11
- ddtrace/internal/_encoding.cpython-312-aarch64-linux-musl.so +0 -0
- ddtrace/internal/_encoding.pyi +1 -1
- ddtrace/internal/ci_visibility/encoder.py +126 -49
- ddtrace/internal/ci_visibility/utils.py +4 -4
- ddtrace/internal/core/__init__.py +5 -2
- ddtrace/internal/endpoints.py +76 -0
- ddtrace/internal/schema/processor.py +6 -2
- ddtrace/internal/telemetry/writer.py +18 -0
- ddtrace/internal/test_visibility/coverage_lines.py +4 -4
- ddtrace/internal/writer/writer.py +24 -11
- ddtrace/llmobs/_constants.py +3 -0
- ddtrace/llmobs/_experiment.py +75 -10
- ddtrace/llmobs/_integrations/bedrock.py +4 -0
- ddtrace/llmobs/_integrations/bedrock_agents.py +5 -1
- ddtrace/llmobs/_integrations/crewai.py +52 -3
- ddtrace/llmobs/_integrations/gemini.py +7 -7
- ddtrace/llmobs/_integrations/google_genai.py +10 -10
- ddtrace/llmobs/_integrations/{google_genai_utils.py → google_utils.py} +103 -7
- ddtrace/llmobs/_integrations/langchain.py +29 -20
- ddtrace/llmobs/_integrations/openai_agents.py +145 -0
- ddtrace/llmobs/_integrations/pydantic_ai.py +67 -26
- ddtrace/llmobs/_integrations/utils.py +68 -158
- ddtrace/llmobs/_integrations/vertexai.py +8 -8
- ddtrace/llmobs/_llmobs.py +83 -14
- ddtrace/llmobs/_telemetry.py +20 -5
- ddtrace/llmobs/_utils.py +27 -0
- ddtrace/settings/_config.py +1 -2
- ddtrace/settings/asm.py +9 -2
- ddtrace/settings/profiling.py +0 -9
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/METADATA +1 -1
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/RECORD +154 -160
- ddtrace/contrib/_avro.py +0 -17
- ddtrace/contrib/_botocore.py +0 -153
- ddtrace/contrib/_protobuf.py +0 -17
- ddtrace/contrib/_pytest.py +0 -62
- ddtrace/contrib/_pytest_bdd.py +0 -23
- ddtrace/contrib/_pytest_benchmark.py +0 -3
- ddtrace/contrib/_unittest.py +0 -36
- /ddtrace/contrib/{_aiobotocore.py → internal/aiobotocore/__init__.py} +0 -0
- /ddtrace/contrib/{_aiohttp_jinja2.py → internal/aiohttp_jinja2/__init__.py} +0 -0
- /ddtrace/contrib/{_aiomysql.py → internal/aiomysql/__init__.py} +0 -0
- /ddtrace/contrib/{_aiopg.py → internal/aiopg/__init__.py} +0 -0
- /ddtrace/contrib/{_aioredis.py → internal/aioredis/__init__.py} +0 -0
- /ddtrace/contrib/{_algoliasearch.py → internal/algoliasearch/__init__.py} +0 -0
- /ddtrace/contrib/{_anthropic.py → internal/anthropic/__init__.py} +0 -0
- /ddtrace/contrib/{_aredis.py → internal/aredis/__init__.py} +0 -0
- /ddtrace/contrib/{_asyncio.py → internal/asyncio/__init__.py} +0 -0
- /ddtrace/contrib/{_asyncpg.py → internal/asyncpg/__init__.py} +0 -0
- /ddtrace/contrib/{_aws_lambda.py → internal/aws_lambda/__init__.py} +0 -0
- /ddtrace/contrib/{_azure_functions.py → internal/azure_functions/__init__.py} +0 -0
- /ddtrace/contrib/{_azure_servicebus.py → internal/azure_servicebus/__init__.py} +0 -0
- /ddtrace/contrib/{_boto.py → internal/boto/__init__.py} +0 -0
- /ddtrace/contrib/{_cassandra.py → internal/cassandra/__init__.py} +0 -0
- /ddtrace/contrib/{_consul.py → internal/consul/__init__.py} +0 -0
- /ddtrace/contrib/{_coverage.py → internal/coverage/__init__.py} +0 -0
- /ddtrace/contrib/{_crewai.py → internal/crewai/__init__.py} +0 -0
- /ddtrace/contrib/{_django.py → internal/django/__init__.py} +0 -0
- /ddtrace/contrib/{_dogpile_cache.py → internal/dogpile_cache/__init__.py} +0 -0
- /ddtrace/contrib/{_dramatiq.py → internal/dramatiq/__init__.py} +0 -0
- /ddtrace/contrib/{_elasticsearch.py → internal/elasticsearch/__init__.py} +0 -0
- /ddtrace/contrib/{_fastapi.py → internal/fastapi/__init__.py} +0 -0
- /ddtrace/contrib/{_flask.py → internal/flask/__init__.py} +0 -0
- /ddtrace/contrib/{_futures.py → internal/futures/__init__.py} +0 -0
- /ddtrace/contrib/{_gevent.py → internal/gevent/__init__.py} +0 -0
- /ddtrace/contrib/{_google_genai.py → internal/google_genai/__init__.py} +0 -0
- /ddtrace/contrib/{_google_generativeai.py → internal/google_generativeai/__init__.py} +0 -0
- /ddtrace/contrib/{_graphql.py → internal/graphql/__init__.py} +0 -0
- /ddtrace/contrib/{_grpc.py → internal/grpc/__init__.py} +0 -0
- /ddtrace/contrib/{_gunicorn.py → internal/gunicorn/__init__.py} +0 -0
- /ddtrace/contrib/{_httplib.py → internal/httplib/__init__.py} +0 -0
- /ddtrace/contrib/{_httpx.py → internal/httpx/__init__.py} +0 -0
- /ddtrace/contrib/{_jinja2.py → internal/jinja2/__init__.py} +0 -0
- /ddtrace/contrib/{_kafka.py → internal/kafka/__init__.py} +0 -0
- /ddtrace/contrib/{_kombu.py → internal/kombu/__init__.py} +0 -0
- /ddtrace/contrib/{_langchain.py → internal/langchain/__init__.py} +0 -0
- /ddtrace/contrib/{_langgraph.py → internal/langgraph/__init__.py} +0 -0
- /ddtrace/contrib/{_litellm.py → internal/litellm/__init__.py} +0 -0
- /ddtrace/contrib/{_logbook.py → internal/logbook/__init__.py} +0 -0
- /ddtrace/contrib/{_logging.py → internal/logging/__init__.py} +0 -0
- /ddtrace/contrib/{_loguru.py → internal/loguru/__init__.py} +0 -0
- /ddtrace/contrib/{_mako.py → internal/mako/__init__.py} +0 -0
- /ddtrace/contrib/{_mariadb.py → internal/mariadb/__init__.py} +0 -0
- /ddtrace/contrib/{_mcp.py → internal/mcp/__init__.py} +0 -0
- /ddtrace/contrib/{_molten.py → internal/molten/__init__.py} +0 -0
- /ddtrace/contrib/{_mongoengine.py → internal/mongoengine/__init__.py} +0 -0
- /ddtrace/contrib/{_mysql.py → internal/mysql/__init__.py} +0 -0
- /ddtrace/contrib/{_mysqldb.py → internal/mysqldb/__init__.py} +0 -0
- /ddtrace/contrib/{_openai.py → internal/openai/__init__.py} +0 -0
- /ddtrace/contrib/{_openai_agents.py → internal/openai_agents/__init__.py} +0 -0
- /ddtrace/contrib/{_psycopg.py → internal/psycopg/__init__.py} +0 -0
- /ddtrace/contrib/{_pydantic_ai.py → internal/pydantic_ai/__init__.py} +0 -0
- /ddtrace/contrib/{_pymemcache.py → internal/pymemcache/__init__.py} +0 -0
- /ddtrace/contrib/{_pymongo.py → internal/pymongo/__init__.py} +0 -0
- /ddtrace/contrib/{_pymysql.py → internal/pymysql/__init__.py} +0 -0
- /ddtrace/contrib/{_pynamodb.py → internal/pynamodb/__init__.py} +0 -0
- /ddtrace/contrib/{_pyodbc.py → internal/pyodbc/__init__.py} +0 -0
- /ddtrace/contrib/{_redis.py → internal/redis/__init__.py} +0 -0
- /ddtrace/contrib/{_rediscluster.py → internal/rediscluster/__init__.py} +0 -0
- /ddtrace/contrib/{_rq.py → internal/rq/__init__.py} +0 -0
- /ddtrace/contrib/{_sanic.py → internal/sanic/__init__.py} +0 -0
- /ddtrace/contrib/{_selenium.py → internal/selenium/__init__.py} +0 -0
- /ddtrace/contrib/{_snowflake.py → internal/snowflake/__init__.py} +0 -0
- /ddtrace/contrib/{_sqlite3.py → internal/sqlite3/__init__.py} +0 -0
- /ddtrace/contrib/{_starlette.py → internal/starlette/__init__.py} +0 -0
- /ddtrace/contrib/{_structlog.py → internal/structlog/__init__.py} +0 -0
- /ddtrace/contrib/{_subprocess.py → internal/subprocess/__init__.py} +0 -0
- /ddtrace/contrib/{_urllib.py → internal/urllib/__init__.py} +0 -0
- /ddtrace/contrib/{_urllib3.py → internal/urllib3/__init__.py} +0 -0
- /ddtrace/contrib/{_vertexai.py → internal/vertexai/__init__.py} +0 -0
- /ddtrace/contrib/{_vertica.py → internal/vertica/__init__.py} +0 -0
- /ddtrace/contrib/{_webbrowser.py → internal/webbrowser/__init__.py} +0 -0
- /ddtrace/contrib/{_yaaredis.py → internal/yaaredis/__init__.py} +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/WHEEL +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/entry_points.txt +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE.Apache +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/LICENSE.BSD3 +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/licenses/NOTICE +0 -0
- {ddtrace-3.11.0rc1.dist-info → ddtrace-3.11.0rc3.dist-info}/top_level.txt +0 -0
ddtrace/llmobs/_experiment.py
CHANGED
@@ -21,6 +21,7 @@ from ddtrace.constants import ERROR_STACK
|
|
21
21
|
from ddtrace.constants import ERROR_TYPE
|
22
22
|
from ddtrace.internal.logger import get_logger
|
23
23
|
from ddtrace.llmobs._constants import EXPERIMENT_EXPECTED_OUTPUT
|
24
|
+
from ddtrace.llmobs._utils import convert_tags_dict_to_list
|
24
25
|
|
25
26
|
|
26
27
|
if TYPE_CHECKING:
|
@@ -153,6 +154,12 @@ class Dataset:
|
|
153
154
|
self._deleted_record_ids.append(record_id)
|
154
155
|
del self._records[index]
|
155
156
|
|
157
|
+
@property
|
158
|
+
def url(self) -> str:
|
159
|
+
# FIXME: need to use the user's site
|
160
|
+
# also will not work for subdomain orgs
|
161
|
+
return f"https://app.datadoghq.com/llm/datasets/{self._id}"
|
162
|
+
|
156
163
|
@overload
|
157
164
|
def __getitem__(self, index: int) -> DatasetRecord:
|
158
165
|
...
|
@@ -170,6 +177,50 @@ class Dataset:
|
|
170
177
|
def __iter__(self) -> Iterator[DatasetRecord]:
|
171
178
|
return iter(self._records)
|
172
179
|
|
180
|
+
def as_dataframe(self) -> None:
|
181
|
+
try:
|
182
|
+
import pandas as pd
|
183
|
+
except ImportError as e:
|
184
|
+
raise ImportError(
|
185
|
+
"pandas is required to convert dataset to DataFrame. Please install via `pip install pandas`"
|
186
|
+
) from e
|
187
|
+
|
188
|
+
column_tuples = set()
|
189
|
+
data_rows = []
|
190
|
+
for record in self._records:
|
191
|
+
flat_record = {} # type: Dict[Union[str, Tuple[str, str]], Any]
|
192
|
+
|
193
|
+
input_data = record.get("input_data", {})
|
194
|
+
if isinstance(input_data, dict):
|
195
|
+
for input_data_col, input_data_val in input_data.items():
|
196
|
+
flat_record[("input_data", input_data_col)] = input_data_val
|
197
|
+
column_tuples.add(("input_data", input_data_col))
|
198
|
+
else:
|
199
|
+
flat_record[("input_data", "")] = input_data
|
200
|
+
column_tuples.add(("input_data", ""))
|
201
|
+
|
202
|
+
expected_output = record.get("expected_output", {})
|
203
|
+
if isinstance(expected_output, dict):
|
204
|
+
for expected_output_col, expected_output_val in expected_output.items():
|
205
|
+
flat_record[("expected_output", expected_output_col)] = expected_output_val
|
206
|
+
column_tuples.add(("expected_output", expected_output_col))
|
207
|
+
else:
|
208
|
+
flat_record[("expected_output", "")] = expected_output
|
209
|
+
column_tuples.add(("expected_output", ""))
|
210
|
+
|
211
|
+
for metadata_col, metadata_val in record.get("metadata", {}).items():
|
212
|
+
flat_record[("metadata", metadata_col)] = metadata_val
|
213
|
+
column_tuples.add(("metadata", metadata_col))
|
214
|
+
|
215
|
+
data_rows.append(flat_record)
|
216
|
+
|
217
|
+
records_list = []
|
218
|
+
for flat_record in data_rows:
|
219
|
+
row = [flat_record.get(col, None) for col in column_tuples]
|
220
|
+
records_list.append(row)
|
221
|
+
|
222
|
+
return pd.DataFrame(data=records_list, columns=pd.MultiIndex.from_tuples(column_tuples))
|
223
|
+
|
173
224
|
|
174
225
|
class Experiment:
|
175
226
|
def __init__(
|
@@ -180,7 +231,7 @@ class Experiment:
|
|
180
231
|
evaluators: List[Callable[[DatasetRecordInputType, JSONType, JSONType], JSONType]],
|
181
232
|
project_name: str,
|
182
233
|
description: str = "",
|
183
|
-
tags: Optional[
|
234
|
+
tags: Optional[Dict[str, str]] = None,
|
184
235
|
config: Optional[ExperimentConfigType] = None,
|
185
236
|
_llmobs_instance: Optional["LLMObs"] = None,
|
186
237
|
) -> None:
|
@@ -189,8 +240,8 @@ class Experiment:
|
|
189
240
|
self._dataset = dataset
|
190
241
|
self._evaluators = evaluators
|
191
242
|
self._description = description
|
192
|
-
self._tags:
|
193
|
-
self._tags.
|
243
|
+
self._tags: Dict[str, str] = tags or {}
|
244
|
+
self._tags["ddtrace.version"] = str(ddtrace.__version__)
|
194
245
|
self._config: Dict[str, JSONType] = config or {}
|
195
246
|
self._llmobs_instance = _llmobs_instance
|
196
247
|
|
@@ -217,7 +268,8 @@ class Experiment:
|
|
217
268
|
if not self._llmobs_instance.enabled:
|
218
269
|
logger.warning(
|
219
270
|
"Skipping experiment as LLMObs is not enabled. "
|
220
|
-
"Ensure LLM Observability is enabled via `LLMObs.enable(...)`
|
271
|
+
"Ensure LLM Observability is enabled via `LLMObs.enable(...)` "
|
272
|
+
"or set `DD_LLMOBS_ENABLED=1` and use `ddtrace-run` to run your application."
|
221
273
|
)
|
222
274
|
return []
|
223
275
|
|
@@ -230,19 +282,27 @@ class Experiment:
|
|
230
282
|
self._project_id,
|
231
283
|
self._dataset._version,
|
232
284
|
self._config,
|
233
|
-
self._tags,
|
285
|
+
convert_tags_dict_to_list(self._tags),
|
234
286
|
self._description,
|
235
287
|
)
|
236
288
|
self._id = experiment_id
|
237
|
-
self._tags
|
289
|
+
self._tags["experiment_id"] = str(experiment_id)
|
238
290
|
self._run_name = experiment_run_name
|
239
291
|
task_results = self._run_task(jobs, raise_errors, sample_size)
|
240
292
|
evaluations = self._run_evaluators(task_results, raise_errors=raise_errors)
|
241
293
|
experiment_results = self._merge_results(task_results, evaluations)
|
242
294
|
experiment_evals = self._generate_metrics_from_exp_results(experiment_results)
|
243
|
-
self._llmobs_instance._dne_client.experiment_eval_post(
|
295
|
+
self._llmobs_instance._dne_client.experiment_eval_post(
|
296
|
+
self._id, experiment_evals, convert_tags_dict_to_list(self._tags)
|
297
|
+
)
|
244
298
|
return experiment_results
|
245
299
|
|
300
|
+
@property
|
301
|
+
def url(self) -> str:
|
302
|
+
# FIXME: need to use the user's site
|
303
|
+
# also will not work for subdomain orgs
|
304
|
+
return f"https://app.datadoghq.com/llm/experiments/{self._id}"
|
305
|
+
|
246
306
|
def _process_record(self, idx_record: Tuple[int, DatasetRecord]) -> Optional[TaskResult]:
|
247
307
|
if not self._llmobs_instance or not self._llmobs_instance.enabled:
|
248
308
|
return None
|
@@ -256,7 +316,12 @@ class Experiment:
|
|
256
316
|
span_id, trace_id = "", ""
|
257
317
|
input_data = record["input_data"]
|
258
318
|
record_id = record.get("record_id", "")
|
259
|
-
tags = {
|
319
|
+
tags = {
|
320
|
+
**self._tags,
|
321
|
+
"dataset_id": str(self._dataset._id),
|
322
|
+
"dataset_record_id": str(record_id),
|
323
|
+
"experiment_id": str(self._id),
|
324
|
+
}
|
260
325
|
output_data = None
|
261
326
|
try:
|
262
327
|
output_data = self._task(input_data, self._config)
|
@@ -342,7 +407,7 @@ class Experiment:
|
|
342
407
|
experiment_results = []
|
343
408
|
for idx, task_result in enumerate(task_results):
|
344
409
|
output_data = task_result["output"]
|
345
|
-
metadata: Dict[str, JSONType] = {"tags": cast(List[JSONType], self._tags)}
|
410
|
+
metadata: Dict[str, JSONType] = {"tags": cast(List[JSONType], convert_tags_dict_to_list(self._tags))}
|
346
411
|
metadata.update(task_result.get("metadata") or {})
|
347
412
|
record: DatasetRecord = self._dataset[idx]
|
348
413
|
evals = evaluations[idx]["evaluations"]
|
@@ -383,7 +448,7 @@ class Experiment:
|
|
383
448
|
"label": eval_name,
|
384
449
|
f"{metric_type}_value": eval_value, # type: ignore
|
385
450
|
"error": err,
|
386
|
-
"tags": self._tags,
|
451
|
+
"tags": convert_tags_dict_to_list(self._tags),
|
387
452
|
"experiment_id": self._id,
|
388
453
|
}
|
389
454
|
|
@@ -13,6 +13,7 @@ from ddtrace.llmobs._constants import CACHE_READ_INPUT_TOKENS_METRIC_KEY
|
|
13
13
|
from ddtrace.llmobs._constants import CACHE_WRITE_INPUT_TOKENS_METRIC_KEY
|
14
14
|
from ddtrace.llmobs._constants import INPUT_MESSAGES
|
15
15
|
from ddtrace.llmobs._constants import INPUT_VALUE
|
16
|
+
from ddtrace.llmobs._constants import INTEGRATION
|
16
17
|
from ddtrace.llmobs._constants import METADATA
|
17
18
|
from ddtrace.llmobs._constants import METRICS
|
18
19
|
from ddtrace.llmobs._constants import MODEL_NAME
|
@@ -30,6 +31,7 @@ from ddtrace.llmobs._integrations.bedrock_utils import normalize_input_tokens
|
|
30
31
|
from ddtrace.llmobs._integrations.utils import get_final_message_converse_stream_message
|
31
32
|
from ddtrace.llmobs._integrations.utils import get_messages_from_converse_content
|
32
33
|
from ddtrace.llmobs._integrations.utils import update_proxy_workflow_input_output_value
|
34
|
+
from ddtrace.llmobs._telemetry import record_bedrock_agent_span_event_created
|
33
35
|
from ddtrace.llmobs._writer import LLMObsSpanEvent
|
34
36
|
from ddtrace.trace import Span
|
35
37
|
|
@@ -151,6 +153,7 @@ class BedrockIntegration(BaseLLMIntegration):
|
|
151
153
|
INPUT_VALUE: str(input_value),
|
152
154
|
TAGS: {"session_id": session_id},
|
153
155
|
METADATA: {"agent_id": agent_id, "agent_alias_id": agent_alias_id},
|
156
|
+
INTEGRATION: "bedrock_agents",
|
154
157
|
}
|
155
158
|
)
|
156
159
|
if not response:
|
@@ -176,6 +179,7 @@ class BedrockIntegration(BaseLLMIntegration):
|
|
176
179
|
)
|
177
180
|
for _, span_event in self._spans.items():
|
178
181
|
LLMObs._instance._llmobs_span_writer.enqueue(span_event)
|
182
|
+
record_bedrock_agent_span_event_created(span_event)
|
179
183
|
self._spans.clear()
|
180
184
|
self._active_span_by_step_id.clear()
|
181
185
|
|
@@ -15,6 +15,7 @@ from ddtrace.internal.utils.formats import format_trace_id
|
|
15
15
|
from ddtrace.llmobs._constants import LLMOBS_TRACE_ID
|
16
16
|
from ddtrace.llmobs._integrations.bedrock_utils import parse_model_id
|
17
17
|
from ddtrace.llmobs._utils import _get_ml_app
|
18
|
+
from ddtrace.llmobs._utils import _get_session_id
|
18
19
|
from ddtrace.llmobs._utils import safe_json
|
19
20
|
|
20
21
|
|
@@ -57,12 +58,15 @@ def _build_span_event(
|
|
57
58
|
llmobs_trace_id = root_span._get_ctx_item(LLMOBS_TRACE_ID)
|
58
59
|
if llmobs_trace_id is None:
|
59
60
|
llmobs_trace_id = root_span.trace_id
|
61
|
+
session_id = _get_session_id(root_span)
|
62
|
+
ml_app = _get_ml_app(root_span)
|
63
|
+
tags = [f"ml_app:{ml_app}", f"session_id:{session_id}", "integration:bedrock_agents"]
|
60
64
|
span_event = {
|
61
65
|
"name": span_name,
|
62
66
|
"span_id": str(span_id),
|
63
67
|
"trace_id": format_trace_id(llmobs_trace_id),
|
64
68
|
"parent_id": str(parent_id or root_span.span_id),
|
65
|
-
"tags":
|
69
|
+
"tags": tags,
|
66
70
|
"start_ns": int(start_ns or root_span.start_ns),
|
67
71
|
"duration": int(duration_ns or DEFAULT_SPAN_DURATION),
|
68
72
|
"status": "error" if error else "ok",
|
@@ -8,6 +8,7 @@ from ddtrace.internal import core
|
|
8
8
|
from ddtrace.internal.logger import get_logger
|
9
9
|
from ddtrace.internal.utils import get_argument_value
|
10
10
|
from ddtrace.internal.utils.formats import format_trace_id
|
11
|
+
from ddtrace.llmobs._constants import AGENT_MANIFEST
|
11
12
|
from ddtrace.llmobs._constants import INPUT_VALUE
|
12
13
|
from ddtrace.llmobs._constants import METADATA
|
13
14
|
from ddtrace.llmobs._constants import NAME
|
@@ -151,9 +152,8 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
151
152
|
Agent spans are 1:1 with its parent (task/tool) span, so we link them directly here, even on the parent itself.
|
152
153
|
"""
|
153
154
|
agent_instance = kwargs.get("instance")
|
155
|
+
self._tag_agent_manifest(span, agent_instance)
|
154
156
|
agent_role = getattr(agent_instance, "role", "")
|
155
|
-
agent_goal = getattr(agent_instance, "goal", "")
|
156
|
-
agent_backstory = getattr(agent_instance, "backstory", "")
|
157
157
|
task_description = getattr(kwargs.get("task"), "description", "")
|
158
158
|
context = get_argument_value(args, kwargs, 1, "context", optional=True) or ""
|
159
159
|
|
@@ -174,7 +174,6 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
174
174
|
span._set_ctx_items(
|
175
175
|
{
|
176
176
|
NAME: agent_role if agent_role else "CrewAI Agent",
|
177
|
-
METADATA: {"description": agent_goal, "backstory": agent_backstory},
|
178
177
|
INPUT_VALUE: {"context": context, "input": task_description},
|
179
178
|
SPAN_LINKS: curr_span_links + [span_link],
|
180
179
|
}
|
@@ -198,6 +197,56 @@ class CrewAIIntegration(BaseLLMIntegration):
|
|
198
197
|
return
|
199
198
|
span._set_ctx_item(OUTPUT_VALUE, response)
|
200
199
|
|
200
|
+
def _tag_agent_manifest(self, span, agent):
|
201
|
+
if not agent:
|
202
|
+
return
|
203
|
+
|
204
|
+
manifest = {}
|
205
|
+
manifest["framework"] = "CrewAI"
|
206
|
+
manifest["name"] = agent.role if hasattr(agent, "role") and agent.role else "CrewAI Agent"
|
207
|
+
if hasattr(agent, "goal"):
|
208
|
+
manifest["goal"] = agent.goal
|
209
|
+
if hasattr(agent, "backstory"):
|
210
|
+
manifest["backstory"] = agent.backstory
|
211
|
+
if hasattr(agent, "llm"):
|
212
|
+
if hasattr(agent.llm, "model"):
|
213
|
+
manifest["model"] = agent.llm.model
|
214
|
+
model_settings = {}
|
215
|
+
if hasattr(agent.llm, "max_tokens"):
|
216
|
+
model_settings["max_tokens"] = agent.llm.max_tokens
|
217
|
+
if hasattr(agent.llm, "temperature"):
|
218
|
+
model_settings["temperature"] = agent.llm.temperature
|
219
|
+
if model_settings:
|
220
|
+
manifest["model_settings"] = model_settings
|
221
|
+
if hasattr(agent, "allow_delegation"):
|
222
|
+
manifest["handoffs"] = {"allow_delegation": agent.allow_delegation}
|
223
|
+
code_execution_permissions = {}
|
224
|
+
if hasattr(agent, "allow_code_execution"):
|
225
|
+
manifest["code_execution_permissions"] = {"allow_code_execution": agent.allow_code_execution}
|
226
|
+
if hasattr(agent, "code_execution_mode"):
|
227
|
+
manifest["code_execution_permissions"] = {"code_execution_mode": agent.code_execution_mode}
|
228
|
+
if code_execution_permissions:
|
229
|
+
manifest["code_execution_permissions"] = code_execution_permissions
|
230
|
+
if hasattr(agent, "max_iter"):
|
231
|
+
manifest["max_iterations"] = agent.max_iter
|
232
|
+
if hasattr(agent, "tools"):
|
233
|
+
manifest["tools"] = self._get_agent_tools(agent.tools)
|
234
|
+
|
235
|
+
span._set_ctx_item(AGENT_MANIFEST, manifest)
|
236
|
+
|
237
|
+
def _get_agent_tools(self, tools):
|
238
|
+
if not tools or not isinstance(tools, list):
|
239
|
+
return []
|
240
|
+
formatted_tools = []
|
241
|
+
for tool in tools:
|
242
|
+
tool_dict = {}
|
243
|
+
if hasattr(tool, "name"):
|
244
|
+
tool_dict["name"] = tool.name
|
245
|
+
if hasattr(tool, "description"):
|
246
|
+
tool_dict["description"] = tool.description
|
247
|
+
formatted_tools.append(tool_dict)
|
248
|
+
return formatted_tools
|
249
|
+
|
201
250
|
def _llmobs_set_span_link_on_task(self, span, args, kwargs):
|
202
251
|
"""Set span links for the next queued task in a CrewAI workflow.
|
203
252
|
This happens between task executions, (the current span is the crew span and the task span hasn't started yet)
|
@@ -16,9 +16,9 @@ from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY
|
|
16
16
|
from ddtrace.llmobs._constants import SPAN_KIND
|
17
17
|
from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
18
18
|
from ddtrace.llmobs._integrations.base import BaseLLMIntegration
|
19
|
-
from ddtrace.llmobs._integrations.
|
20
|
-
from ddtrace.llmobs._integrations.
|
21
|
-
from ddtrace.llmobs._integrations.
|
19
|
+
from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_gemini_vertexai
|
20
|
+
from ddtrace.llmobs._integrations.google_utils import get_system_instructions_gemini_vertexai
|
21
|
+
from ddtrace.llmobs._integrations.google_utils import llmobs_get_metadata_gemini_vertexai
|
22
22
|
from ddtrace.llmobs._utils import _get_attr
|
23
23
|
from ddtrace.trace import Span
|
24
24
|
|
@@ -43,9 +43,9 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
43
43
|
operation: str = "",
|
44
44
|
) -> None:
|
45
45
|
instance = kwargs.get("instance", None)
|
46
|
-
metadata =
|
46
|
+
metadata = llmobs_get_metadata_gemini_vertexai(kwargs, instance)
|
47
47
|
|
48
|
-
system_instruction =
|
48
|
+
system_instruction = get_system_instructions_gemini_vertexai(instance)
|
49
49
|
input_contents = get_argument_value(args, kwargs, 0, "contents")
|
50
50
|
input_messages = self._extract_input_message(input_contents, system_instruction)
|
51
51
|
|
@@ -95,7 +95,7 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
95
95
|
messages.append(message)
|
96
96
|
continue
|
97
97
|
for part in parts:
|
98
|
-
message =
|
98
|
+
message = extract_message_from_part_gemini_vertexai(part, role)
|
99
99
|
messages.append(message)
|
100
100
|
return messages
|
101
101
|
|
@@ -107,7 +107,7 @@ class GeminiIntegration(BaseLLMIntegration):
|
|
107
107
|
role = content.get("role", "model")
|
108
108
|
parts = content.get("parts", [])
|
109
109
|
for part in parts:
|
110
|
-
message =
|
110
|
+
message = extract_message_from_part_gemini_vertexai(part, role)
|
111
111
|
output_messages.append(message)
|
112
112
|
return output_messages
|
113
113
|
|
@@ -14,12 +14,12 @@ from ddtrace.llmobs._constants import OUTPUT_MESSAGES
|
|
14
14
|
from ddtrace.llmobs._constants import OUTPUT_VALUE
|
15
15
|
from ddtrace.llmobs._constants import SPAN_KIND
|
16
16
|
from ddtrace.llmobs._integrations.base import BaseLLMIntegration
|
17
|
-
from ddtrace.llmobs._integrations.
|
18
|
-
from ddtrace.llmobs._integrations.
|
19
|
-
from ddtrace.llmobs._integrations.
|
20
|
-
from ddtrace.llmobs._integrations.
|
21
|
-
from ddtrace.llmobs._integrations.
|
22
|
-
from ddtrace.llmobs._integrations.
|
17
|
+
from ddtrace.llmobs._integrations.google_utils import GOOGLE_GENAI_DEFAULT_MODEL_ROLE
|
18
|
+
from ddtrace.llmobs._integrations.google_utils import extract_embedding_metrics_google_genai
|
19
|
+
from ddtrace.llmobs._integrations.google_utils import extract_generation_metrics_google_genai
|
20
|
+
from ddtrace.llmobs._integrations.google_utils import extract_message_from_part_google_genai
|
21
|
+
from ddtrace.llmobs._integrations.google_utils import extract_provider_and_model_name
|
22
|
+
from ddtrace.llmobs._integrations.google_utils import normalize_contents_google_genai
|
23
23
|
from ddtrace.llmobs._utils import _get_attr
|
24
24
|
from ddtrace.llmobs.utils import Document
|
25
25
|
|
@@ -71,7 +71,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
71
71
|
response: Optional[Any] = None,
|
72
72
|
operation: str = "",
|
73
73
|
) -> None:
|
74
|
-
provider_name, model_name = extract_provider_and_model_name(kwargs)
|
74
|
+
provider_name, model_name = extract_provider_and_model_name(kwargs=kwargs)
|
75
75
|
span._set_ctx_items(
|
76
76
|
{
|
77
77
|
SPAN_KIND: operation,
|
@@ -120,7 +120,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
120
120
|
|
121
121
|
def _extract_messages_from_contents(self, contents, default_role: str) -> List[Dict[str, Any]]:
|
122
122
|
messages = []
|
123
|
-
for content in
|
123
|
+
for content in normalize_contents_google_genai(contents):
|
124
124
|
role = content.get("role") or default_role
|
125
125
|
for part in content.get("parts", []):
|
126
126
|
messages.append(extract_message_from_part_google_genai(part, role))
|
@@ -128,7 +128,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
128
128
|
|
129
129
|
def _extract_output_messages(self, response) -> List[Dict[str, Any]]:
|
130
130
|
if not response:
|
131
|
-
return [{"content": "", "role":
|
131
|
+
return [{"content": "", "role": GOOGLE_GENAI_DEFAULT_MODEL_ROLE}]
|
132
132
|
messages = []
|
133
133
|
candidates = _get_attr(response, "candidates", [])
|
134
134
|
for candidate in candidates:
|
@@ -136,7 +136,7 @@ class GoogleGenAIIntegration(BaseLLMIntegration):
|
|
136
136
|
if not content:
|
137
137
|
continue
|
138
138
|
parts = _get_attr(content, "parts", [])
|
139
|
-
role = _get_attr(content, "role",
|
139
|
+
role = _get_attr(content, "role", GOOGLE_GENAI_DEFAULT_MODEL_ROLE)
|
140
140
|
for part in parts:
|
141
141
|
message = extract_message_from_part_google_genai(part, role)
|
142
142
|
messages.append(message)
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from typing import Any
|
2
2
|
from typing import Dict
|
3
3
|
from typing import List
|
4
|
+
from typing import Optional
|
4
5
|
from typing import Tuple
|
5
6
|
|
6
7
|
from ddtrace.llmobs._constants import BILLABLE_CHARACTER_COUNT_METRIC_KEY
|
@@ -11,9 +12,9 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY
|
|
11
12
|
from ddtrace.llmobs._utils import _get_attr
|
12
13
|
|
13
14
|
|
14
|
-
#
|
15
|
+
# Google GenAI has roles "model" and "user", but in order to stay consistent with other integrations,
|
15
16
|
# we use "assistant" as the default role for model messages
|
16
|
-
|
17
|
+
GOOGLE_GENAI_DEFAULT_MODEL_ROLE = "assistant"
|
17
18
|
|
18
19
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-partner-models
|
19
20
|
# GeminiAPI: only exports google provided models
|
@@ -40,9 +41,31 @@ KNOWN_MODEL_PREFIX_TO_PROVIDER = {
|
|
40
41
|
}
|
41
42
|
|
42
43
|
|
43
|
-
def extract_provider_and_model_name(
|
44
|
-
|
45
|
-
|
44
|
+
def extract_provider_and_model_name(
|
45
|
+
kwargs: Optional[Dict[str, Any]] = None, instance: Any = None, model_name_attr: Optional[str] = None
|
46
|
+
) -> Tuple[str, str]:
|
47
|
+
"""
|
48
|
+
Function to extract provider and model name from either kwargs or instance attributes.
|
49
|
+
Args:
|
50
|
+
kwargs: Dictionary containing model information (used for google_genai)
|
51
|
+
instance: Model instance with attributes (used for vertexai and google_generativeai)
|
52
|
+
model_name_attr: Attribute name to extract from instance (e.g., "_model_name", "model_name", used for vertexai
|
53
|
+
and google_generativeai)
|
54
|
+
|
55
|
+
Returns:
|
56
|
+
Tuple of (provider_name, model_name)
|
57
|
+
"""
|
58
|
+
model_path = ""
|
59
|
+
if kwargs is not None:
|
60
|
+
model_path = kwargs.get("model", "")
|
61
|
+
elif instance is not None and model_name_attr is not None:
|
62
|
+
model_path = _get_attr(instance, model_name_attr, "")
|
63
|
+
|
64
|
+
if not model_path or not isinstance(model_path, str):
|
65
|
+
return "custom", "custom"
|
66
|
+
|
67
|
+
model_name = model_path.split("/")[-1] if "/" in model_path else model_path
|
68
|
+
|
46
69
|
for prefix in KNOWN_MODEL_PREFIX_TO_PROVIDER.keys():
|
47
70
|
if model_name.lower().startswith(prefix):
|
48
71
|
provider_name = KNOWN_MODEL_PREFIX_TO_PROVIDER[prefix]
|
@@ -50,7 +73,7 @@ def extract_provider_and_model_name(kwargs: Dict[str, Any]) -> Tuple[str, str]:
|
|
50
73
|
return "custom", model_name if model_name else "custom"
|
51
74
|
|
52
75
|
|
53
|
-
def
|
76
|
+
def normalize_contents_google_genai(contents) -> List[Dict[str, Any]]:
|
54
77
|
"""
|
55
78
|
contents has a complex union type structure:
|
56
79
|
- contents: Union[ContentListUnion, ContentListUnionDict]
|
@@ -142,7 +165,7 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]:
|
|
142
165
|
returns a dict representing a message with format {"role": role, "content": content}
|
143
166
|
"""
|
144
167
|
if role == "model":
|
145
|
-
role =
|
168
|
+
role = GOOGLE_GENAI_DEFAULT_MODEL_ROLE
|
146
169
|
|
147
170
|
message: Dict[str, Any] = {"role": role}
|
148
171
|
if isinstance(part, str):
|
@@ -187,3 +210,76 @@ def extract_message_from_part_google_genai(part, role: str) -> Dict[str, Any]:
|
|
187
210
|
return message
|
188
211
|
|
189
212
|
return {"content": "Unsupported file type: {}".format(type(part)), "role": role}
|
213
|
+
|
214
|
+
|
215
|
+
def llmobs_get_metadata_gemini_vertexai(kwargs, instance):
|
216
|
+
metadata = {}
|
217
|
+
model_config = getattr(instance, "_generation_config", {}) or {}
|
218
|
+
model_config = model_config.to_dict() if hasattr(model_config, "to_dict") else model_config
|
219
|
+
request_config = kwargs.get("generation_config", {}) or {}
|
220
|
+
request_config = request_config.to_dict() if hasattr(request_config, "to_dict") else request_config
|
221
|
+
|
222
|
+
parameters = ("temperature", "max_output_tokens", "candidate_count", "top_p", "top_k")
|
223
|
+
for param in parameters:
|
224
|
+
model_config_value = _get_attr(model_config, param, None)
|
225
|
+
request_config_value = _get_attr(request_config, param, None)
|
226
|
+
if model_config_value or request_config_value:
|
227
|
+
metadata[param] = request_config_value or model_config_value
|
228
|
+
return metadata
|
229
|
+
|
230
|
+
|
231
|
+
def extract_message_from_part_gemini_vertexai(part, role=None):
|
232
|
+
text = _get_attr(part, "text", "")
|
233
|
+
function_call = _get_attr(part, "function_call", None)
|
234
|
+
function_response = _get_attr(part, "function_response", None)
|
235
|
+
message = {"content": text}
|
236
|
+
if role:
|
237
|
+
message["role"] = role
|
238
|
+
if function_call:
|
239
|
+
function_call_dict = function_call
|
240
|
+
if not isinstance(function_call, dict):
|
241
|
+
function_call_dict = type(function_call).to_dict(function_call)
|
242
|
+
message["tool_calls"] = [
|
243
|
+
{"name": function_call_dict.get("name", ""), "arguments": function_call_dict.get("args", {})}
|
244
|
+
]
|
245
|
+
if function_response:
|
246
|
+
function_response_dict = function_response
|
247
|
+
if not isinstance(function_response, dict):
|
248
|
+
function_response_dict = type(function_response).to_dict(function_response)
|
249
|
+
message["content"] = "[tool result: {}]".format(function_response_dict.get("response", ""))
|
250
|
+
return message
|
251
|
+
|
252
|
+
|
253
|
+
def get_system_instructions_gemini_vertexai(model_instance):
|
254
|
+
"""
|
255
|
+
Extract system instructions from model and convert to []str for tagging.
|
256
|
+
"""
|
257
|
+
try:
|
258
|
+
from google.ai.generativelanguage_v1beta.types.content import Content
|
259
|
+
except ImportError:
|
260
|
+
Content = None
|
261
|
+
try:
|
262
|
+
from vertexai.generative_models._generative_models import Part
|
263
|
+
except ImportError:
|
264
|
+
Part = None
|
265
|
+
|
266
|
+
raw_system_instructions = getattr(model_instance, "_system_instruction", [])
|
267
|
+
if Content is not None and isinstance(raw_system_instructions, Content):
|
268
|
+
system_instructions = []
|
269
|
+
for part in raw_system_instructions.parts:
|
270
|
+
system_instructions.append(_get_attr(part, "text", ""))
|
271
|
+
return system_instructions
|
272
|
+
elif isinstance(raw_system_instructions, str):
|
273
|
+
return [raw_system_instructions]
|
274
|
+
elif Part is not None and isinstance(raw_system_instructions, Part):
|
275
|
+
return [_get_attr(raw_system_instructions, "text", "")]
|
276
|
+
elif not isinstance(raw_system_instructions, list):
|
277
|
+
return []
|
278
|
+
|
279
|
+
system_instructions = []
|
280
|
+
for elem in raw_system_instructions:
|
281
|
+
if isinstance(elem, str):
|
282
|
+
system_instructions.append(elem)
|
283
|
+
elif Part is not None and isinstance(elem, Part):
|
284
|
+
system_instructions.append(_get_attr(elem, "text", ""))
|
285
|
+
return system_instructions
|
@@ -163,7 +163,6 @@ class LangChainIntegration(BaseLLMIntegration):
|
|
163
163
|
|
164
164
|
self._set_links(span)
|
165
165
|
model_provider = span.get_tag(PROVIDER)
|
166
|
-
self._llmobs_set_metadata(span, model_provider)
|
167
166
|
|
168
167
|
is_workflow = False
|
169
168
|
|
@@ -365,26 +364,37 @@ class LangChainIntegration(BaseLLMIntegration):
|
|
365
364
|
if hasattr(instance, "_datadog_spans"):
|
366
365
|
delattr(instance, "_datadog_spans")
|
367
366
|
|
368
|
-
def _llmobs_set_metadata(self, span: Span,
|
369
|
-
|
367
|
+
def _llmobs_set_metadata(self, span: Span, kwargs: Dict[str, Any]) -> None:
|
368
|
+
identifying_params = kwargs.pop("_dd.identifying_params", None)
|
369
|
+
if not identifying_params:
|
370
370
|
return
|
371
|
+
metadata = self._llmobs_extract_parameters(identifying_params)
|
372
|
+
for val in identifying_params.values():
|
373
|
+
if metadata:
|
374
|
+
break
|
375
|
+
if not isinstance(val, dict):
|
376
|
+
continue
|
377
|
+
metadata = self._llmobs_extract_parameters(val)
|
371
378
|
|
372
|
-
metadata
|
373
|
-
|
374
|
-
f"langchain.request.{model_provider}.parameters.model_kwargs.temperature"
|
375
|
-
) # huggingface
|
376
|
-
max_tokens = (
|
377
|
-
span.get_tag(f"langchain.request.{model_provider}.parameters.max_tokens")
|
378
|
-
or span.get_tag(f"langchain.request.{model_provider}.parameters.maxTokens") # ai21
|
379
|
-
or span.get_tag(f"langchain.request.{model_provider}.parameters.model_kwargs.max_tokens") # huggingface
|
380
|
-
)
|
379
|
+
if metadata:
|
380
|
+
span._set_ctx_item(METADATA, metadata)
|
381
381
|
|
382
|
+
def _llmobs_extract_parameters(self, parameters: Dict[str, Any]) -> Dict[str, Any]:
|
383
|
+
metadata: Dict[str, Any] = {}
|
384
|
+
max_tokens = None
|
385
|
+
temperature = None
|
386
|
+
if "temperature" in parameters:
|
387
|
+
temperature = parameters["temperature"]
|
388
|
+
for max_token_key in ["max_tokens", "maxTokens", "max_completion_tokens"]:
|
389
|
+
if max_token_key in parameters:
|
390
|
+
max_tokens = parameters[max_token_key]
|
391
|
+
break
|
382
392
|
if temperature is not None and temperature != "None":
|
383
393
|
metadata["temperature"] = float(temperature)
|
384
394
|
if max_tokens is not None and max_tokens != "None":
|
385
395
|
metadata["max_tokens"] = int(max_tokens)
|
386
|
-
|
387
|
-
|
396
|
+
|
397
|
+
return metadata
|
388
398
|
|
389
399
|
def _llmobs_set_tags_from_llm(
|
390
400
|
self, span: Span, args: List[Any], kwargs: Dict[str, Any], completions: Any, is_workflow: bool = False
|
@@ -411,6 +421,8 @@ class LangChainIntegration(BaseLLMIntegration):
|
|
411
421
|
}
|
412
422
|
)
|
413
423
|
|
424
|
+
self._llmobs_set_metadata(span, kwargs)
|
425
|
+
|
414
426
|
if span.error:
|
415
427
|
span._set_ctx_item(output_tag_key, [{"content": ""}])
|
416
428
|
return
|
@@ -444,6 +456,9 @@ class LangChainIntegration(BaseLLMIntegration):
|
|
444
456
|
MODEL_PROVIDER: span.get_tag(PROVIDER) or "",
|
445
457
|
}
|
446
458
|
)
|
459
|
+
|
460
|
+
self._llmobs_set_metadata(span, kwargs)
|
461
|
+
|
447
462
|
input_tag_key = INPUT_VALUE if is_workflow else INPUT_MESSAGES
|
448
463
|
output_tag_key = OUTPUT_VALUE if is_workflow else OUTPUT_MESSAGES
|
449
464
|
stream = span.get_tag("langchain.request.stream")
|
@@ -700,16 +715,10 @@ class LangChainIntegration(BaseLLMIntegration):
|
|
700
715
|
**kwargs,
|
701
716
|
) -> None:
|
702
717
|
"""Set base level tags that should be present on all LangChain spans (if they are not None)."""
|
703
|
-
span.set_tag_str(TYPE, interface_type)
|
704
718
|
if provider is not None:
|
705
719
|
span.set_tag_str(PROVIDER, provider)
|
706
720
|
if model is not None:
|
707
721
|
span.set_tag_str(MODEL, model)
|
708
|
-
if api_key is not None:
|
709
|
-
if len(api_key) >= 4:
|
710
|
-
span.set_tag_str(API_KEY, "...%s" % str(api_key[-4:]))
|
711
|
-
else:
|
712
|
-
span.set_tag_str(API_KEY, api_key)
|
713
722
|
|
714
723
|
def check_token_usage_chat_or_llm_result(self, result):
|
715
724
|
"""Checks for token usage on the top-level ChatResult or LLMResult object"""
|