veadk-python 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/agent.py +31 -21
- veadk/agents/loop_agent.py +55 -0
- veadk/agents/parallel_agent.py +60 -0
- veadk/agents/sequential_agent.py +55 -0
- veadk/cli/cli_deploy.py +14 -1
- veadk/cli/cli_web.py +27 -0
- veadk/cloud/cloud_app.py +21 -6
- veadk/consts.py +14 -1
- veadk/database/viking/viking_database.py +3 -3
- veadk/evaluation/adk_evaluator/__init__.py +4 -0
- veadk/evaluation/adk_evaluator/adk_evaluator.py +170 -217
- veadk/evaluation/base_evaluator.py +26 -20
- veadk/evaluation/deepeval_evaluator/deepeval_evaluator.py +8 -5
- veadk/{tracing/telemetry/metrics/__init__.py → integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/clean.py} +10 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +40 -7
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +11 -5
- veadk/integrations/ve_faas/ve_faas.py +5 -1
- veadk/integrations/ve_tos/ve_tos.py +176 -0
- veadk/runner.py +162 -39
- veadk/tools/builtin_tools/image_edit.py +236 -0
- veadk/tools/builtin_tools/image_generate.py +236 -0
- veadk/tools/builtin_tools/video_generate.py +326 -0
- veadk/tools/sandbox/browser_sandbox.py +19 -9
- veadk/tools/sandbox/code_sandbox.py +21 -11
- veadk/tools/sandbox/computer_sandbox.py +16 -9
- veadk/tracing/base_tracer.py +6 -200
- veadk/tracing/telemetry/attributes/attributes.py +29 -0
- veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +71 -0
- veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +451 -0
- veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +76 -0
- veadk/tracing/telemetry/attributes/extractors/types.py +75 -0
- veadk/tracing/telemetry/exporters/apmplus_exporter.py +97 -38
- veadk/tracing/telemetry/exporters/base_exporter.py +10 -10
- veadk/tracing/telemetry/exporters/cozeloop_exporter.py +20 -13
- veadk/tracing/telemetry/exporters/inmemory_exporter.py +49 -32
- veadk/tracing/telemetry/exporters/tls_exporter.py +18 -12
- veadk/tracing/telemetry/opentelemetry_tracer.py +105 -102
- veadk/tracing/telemetry/telemetry.py +238 -0
- veadk/types.py +6 -1
- veadk/utils/misc.py +41 -1
- veadk/utils/patches.py +25 -0
- veadk/version.py +1 -1
- veadk_python-0.2.5.dist-info/METADATA +345 -0
- veadk_python-0.2.5.dist-info/RECORD +127 -0
- veadk/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/__pycache__/config.cpython-310.pyc +0 -0
- veadk/__pycache__/consts.cpython-310.pyc +0 -0
- veadk/__pycache__/runner.cpython-310.pyc +0 -0
- veadk/__pycache__/types.cpython-310.pyc +0 -0
- veadk/__pycache__/version.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/agent_card.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/remote_ve_agent.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/ve_a2a_server.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/ve_agent_executor.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_deploy.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_init.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_prompt.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_studio.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_web.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/main.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/cloud_agent_engine.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/cloud_app.cpython-310.pyc +0 -0
- veadk/database/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/__pycache__/base_database.cpython-310.pyc +0 -0
- veadk/database/__pycache__/database_adapter.cpython-310.pyc +0 -0
- veadk/database/__pycache__/database_factory.cpython-310.pyc +0 -0
- veadk/database/__pycache__/local_database.cpython-310.pyc +0 -0
- veadk/database/kv/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/relational/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/opensearch_vector_database.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/type.cpython-310.pyc +0 -0
- veadk/database/viking/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/base_evaluator.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/eval_set_file_loader.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/eval_set_recorder.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/types.cpython-310.pyc +0 -0
- veadk/evaluation/adk_evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/deepeval_evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/deepeval_evaluator/__pycache__/deepeval_evaluator.cpython-310.pyc +0 -0
- veadk/evaluation/utils/__pycache__/prometheus.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/apig.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/ve_apig.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/types.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/ve_faas.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/ve_faas_utils.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/vefaas.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/vefaas_utils.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/app.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/studio_app.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name|replace('-', '_') }}/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name|replace('-', '_') }}/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/integrations/ve_prompt_pilot/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_prompt_pilot/__pycache__/agentpilot.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/knowledgebase.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/knowledgebase_database_adapter.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/long_term_memory.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/memory_database_adapter.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/short_term_memory.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/short_term_memory_processor.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/agent_default_prompt.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/prompt_memory_processor.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/prompt_optimization.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/demo_tools.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/load_knowledgebase_tool.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/lark.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/vesearch.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/web_search.cpython-310.pyc +0 -0
- veadk/tools/sandbox/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/__pycache__/base_tracer.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/__pycache__/opentelemetry_tracer.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/apiserver_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/apmplus_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/base_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/cozeloop_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/inmemory_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/tls_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/__pycache__/opentelemetry_metrics.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/opentelemetry_metrics.py +0 -73
- veadk/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/logger.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/mcp_utils.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/misc.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/patches.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/volcengine_sign.cpython-310.pyc +0 -0
- veadk_python-0.2.2.dist-info/METADATA +0 -144
- veadk_python-0.2.2.dist-info/RECORD +0 -213
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from veadk.version import VERSION
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def common_gen_ai_system(**kwargs) -> str:
|
|
19
|
+
"""This field will be parsed as `model_provider` in Volcengine CozeLoop platform."""
|
|
20
|
+
model_provider = kwargs.get("model_provider")
|
|
21
|
+
return model_provider or "<unknown_model_provider>"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def common_gen_ai_system_version(**kwargs) -> str:
|
|
25
|
+
return VERSION
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def common_gen_ai_app_name(**kwargs) -> str:
|
|
29
|
+
app_name = kwargs.get("app_name")
|
|
30
|
+
return app_name or "<unknown_app_name>"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def common_gen_ai_agent_name(**kwargs) -> str:
|
|
34
|
+
agent_name = kwargs.get("agent_name")
|
|
35
|
+
return agent_name or "<unknown_agent_name>"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def common_gen_ai_user_id(**kwargs) -> str:
|
|
39
|
+
user_id = kwargs.get("user_id")
|
|
40
|
+
return user_id or "<unknown_user_id>"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def common_gen_ai_session_id(**kwargs) -> str:
|
|
44
|
+
session_id = kwargs.get("session_id")
|
|
45
|
+
return session_id or "<unknown_session_id>"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def common_cozeloop_report_source(**kwargs) -> str:
|
|
49
|
+
return "veadk"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def llm_openinference_instrumentation_veadk(**kwargs) -> str:
|
|
53
|
+
return VERSION
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
COMMON_ATTRIBUTES = {
|
|
57
|
+
"gen_ai.system": common_gen_ai_system,
|
|
58
|
+
"gen_ai.system.version": common_gen_ai_system_version,
|
|
59
|
+
"gen_ai.agent.name": common_gen_ai_agent_name,
|
|
60
|
+
"openinference.instrumentation.veadk": llm_openinference_instrumentation_veadk,
|
|
61
|
+
"gen_ai.app.name": common_gen_ai_app_name, # APMPlus required
|
|
62
|
+
"gen_ai.user.id": common_gen_ai_user_id, # APMPlus required
|
|
63
|
+
"gen_ai.session.id": common_gen_ai_session_id, # APMPlus required
|
|
64
|
+
"agent_name": common_gen_ai_agent_name, # CozeLoop required
|
|
65
|
+
"agent.name": common_gen_ai_agent_name, # TLS required
|
|
66
|
+
"app_name": common_gen_ai_app_name, # CozeLoop required
|
|
67
|
+
"app.name": common_gen_ai_app_name, # TLS required
|
|
68
|
+
"user.id": common_gen_ai_user_id, # CozeLoop / TLS required
|
|
69
|
+
"session.id": common_gen_ai_session_id, # CozeLoop / TLS required
|
|
70
|
+
"cozeloop.report.source": common_cozeloop_report_source, # CozeLoop required
|
|
71
|
+
}
|
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
|
|
17
|
+
from veadk.tracing.telemetry.attributes.extractors.types import (
|
|
18
|
+
ExtractorResponse,
|
|
19
|
+
LLMAttributesParams,
|
|
20
|
+
)
|
|
21
|
+
from veadk.utils.misc import safe_json_serialize
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def llm_gen_ai_request_model(params: LLMAttributesParams) -> ExtractorResponse:
|
|
25
|
+
return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def llm_gen_ai_request_type(params: LLMAttributesParams) -> ExtractorResponse:
|
|
29
|
+
return ExtractorResponse(content="chat" or "<unknown_type>")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def llm_gen_ai_response_model(params: LLMAttributesParams) -> ExtractorResponse:
|
|
33
|
+
return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def llm_gen_ai_request_max_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
37
|
+
return ExtractorResponse(content=params.llm_request.config.max_output_tokens)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def llm_gen_ai_request_temperature(params: LLMAttributesParams) -> ExtractorResponse:
|
|
41
|
+
return ExtractorResponse(content=params.llm_request.config.temperature)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def llm_gen_ai_request_top_p(params: LLMAttributesParams) -> ExtractorResponse:
|
|
45
|
+
return ExtractorResponse(content=params.llm_request.config.top_p)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def llm_gen_ai_response_stop_reason(params: LLMAttributesParams) -> ExtractorResponse:
|
|
49
|
+
return ExtractorResponse(content="<no_stop_reason_provided>")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def llm_gen_ai_response_finish_reason(params: LLMAttributesParams) -> ExtractorResponse:
|
|
53
|
+
# TODO: update to google-adk v1.12.0
|
|
54
|
+
return ExtractorResponse(content="<no_finish_reason_provided>")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def llm_gen_ai_usage_input_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
58
|
+
if params.llm_response.usage_metadata:
|
|
59
|
+
return ExtractorResponse(
|
|
60
|
+
content=params.llm_response.usage_metadata.prompt_token_count
|
|
61
|
+
)
|
|
62
|
+
return ExtractorResponse(content=None)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def llm_gen_ai_usage_output_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
66
|
+
if params.llm_response.usage_metadata:
|
|
67
|
+
return ExtractorResponse(
|
|
68
|
+
content=params.llm_response.usage_metadata.candidates_token_count,
|
|
69
|
+
)
|
|
70
|
+
return ExtractorResponse(content=None)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def llm_gen_ai_usage_total_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
74
|
+
if params.llm_response.usage_metadata:
|
|
75
|
+
return ExtractorResponse(
|
|
76
|
+
content=params.llm_response.usage_metadata.total_token_count,
|
|
77
|
+
)
|
|
78
|
+
return ExtractorResponse(content=None)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# FIXME
|
|
82
|
+
def llm_gen_ai_usage_cache_creation_input_tokens(
|
|
83
|
+
params: LLMAttributesParams,
|
|
84
|
+
) -> ExtractorResponse:
|
|
85
|
+
if params.llm_response.usage_metadata:
|
|
86
|
+
return ExtractorResponse(
|
|
87
|
+
content=params.llm_response.usage_metadata.cached_content_token_count,
|
|
88
|
+
)
|
|
89
|
+
return ExtractorResponse(content=None)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# FIXME
|
|
93
|
+
def llm_gen_ai_usage_cache_read_input_tokens(
|
|
94
|
+
params: LLMAttributesParams,
|
|
95
|
+
) -> ExtractorResponse:
|
|
96
|
+
if params.llm_response.usage_metadata:
|
|
97
|
+
return ExtractorResponse(
|
|
98
|
+
content=params.llm_response.usage_metadata.cached_content_token_count,
|
|
99
|
+
)
|
|
100
|
+
return ExtractorResponse(content=None)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
|
|
104
|
+
# a part is a message
|
|
105
|
+
messages: list[dict] = []
|
|
106
|
+
idx = 0
|
|
107
|
+
|
|
108
|
+
for content in params.llm_request.contents:
|
|
109
|
+
if content.parts:
|
|
110
|
+
for part in content.parts:
|
|
111
|
+
message = {}
|
|
112
|
+
# text part
|
|
113
|
+
if part.text:
|
|
114
|
+
message[f"gen_ai.prompt.{idx}.role"] = content.role
|
|
115
|
+
message[f"gen_ai.prompt.{idx}.content"] = part.text
|
|
116
|
+
# function response
|
|
117
|
+
if part.function_response:
|
|
118
|
+
message[f"gen_ai.prompt.{idx}.role"] = content.role
|
|
119
|
+
message[f"gen_ai.prompt.{idx}.content"] = (
|
|
120
|
+
str(part.function_response.response)
|
|
121
|
+
if part.function_response
|
|
122
|
+
else "<unknown_function_response>"
|
|
123
|
+
)
|
|
124
|
+
# function call
|
|
125
|
+
if part.function_call:
|
|
126
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.id"] = (
|
|
127
|
+
part.function_call.id
|
|
128
|
+
if part.function_call.id
|
|
129
|
+
else "<unkown_function_call_id>"
|
|
130
|
+
)
|
|
131
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.type"] = "function"
|
|
132
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.function.name"] = (
|
|
133
|
+
part.function_call.name
|
|
134
|
+
if part.function_call.name
|
|
135
|
+
else "<unknown_function_name>"
|
|
136
|
+
)
|
|
137
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.function.arguments"] = (
|
|
138
|
+
safe_json_serialize(part.function_call.args)
|
|
139
|
+
if part.function_call.args
|
|
140
|
+
else json.dumps({})
|
|
141
|
+
)
|
|
142
|
+
# image
|
|
143
|
+
if part.inline_data:
|
|
144
|
+
message[f"gen_ai.prompt.{idx}.type"] = "image_url"
|
|
145
|
+
message[f"gen_ai.prompt.{idx}.image_url.name"] = (
|
|
146
|
+
part.inline_data.display_name.split("/")[-1]
|
|
147
|
+
)
|
|
148
|
+
message[f"gen_ai.prompt.{idx}.image_url.url"] = (
|
|
149
|
+
part.inline_data.display_name
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if message:
|
|
153
|
+
messages.append(message)
|
|
154
|
+
idx += 1
|
|
155
|
+
|
|
156
|
+
return ExtractorResponse(content=messages)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
|
|
160
|
+
messages = []
|
|
161
|
+
|
|
162
|
+
content = params.llm_response.content
|
|
163
|
+
if content and content.parts:
|
|
164
|
+
for idx, part in enumerate(content.parts):
|
|
165
|
+
message = {}
|
|
166
|
+
if part.text:
|
|
167
|
+
message[f"gen_ai.completion.{idx}.role"] = content.role
|
|
168
|
+
message[f"gen_ai.completion.{idx}.content"] = part.text
|
|
169
|
+
elif part.function_call:
|
|
170
|
+
message[f"gen_ai.completion.{idx}.role"] = content.role
|
|
171
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.id"] = (
|
|
172
|
+
part.function_call.id
|
|
173
|
+
if part.function_call.id
|
|
174
|
+
else "<unkown_function_call_id>"
|
|
175
|
+
)
|
|
176
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.type"] = "function"
|
|
177
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.function.name"] = (
|
|
178
|
+
part.function_call.name
|
|
179
|
+
if part.function_call.name
|
|
180
|
+
else "<unknown_function_name>"
|
|
181
|
+
)
|
|
182
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.function.arguments"] = (
|
|
183
|
+
safe_json_serialize(part.function_call.args)
|
|
184
|
+
if part.function_call.args
|
|
185
|
+
else json.dumps({})
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if message:
|
|
189
|
+
messages.append(message)
|
|
190
|
+
return ExtractorResponse(content=messages)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
|
|
194
|
+
# return params.llm_request.stream
|
|
195
|
+
return ExtractorResponse(content=None)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
|
|
199
|
+
return ExtractorResponse(content="chat")
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
203
|
+
event_attributes = {
|
|
204
|
+
"content": str(params.llm_request.config.system_instruction),
|
|
205
|
+
"role": "system",
|
|
206
|
+
}
|
|
207
|
+
return ExtractorResponse(type="event", content=event_attributes)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def llm_gen_ai_user_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
211
|
+
# a content is a message
|
|
212
|
+
messages = []
|
|
213
|
+
|
|
214
|
+
for content in params.llm_request.contents:
|
|
215
|
+
if content.role == "user":
|
|
216
|
+
message_parts = []
|
|
217
|
+
|
|
218
|
+
if content.parts:
|
|
219
|
+
if len(content.parts) == 1:
|
|
220
|
+
if content.parts[0].text:
|
|
221
|
+
message_parts.append(
|
|
222
|
+
{
|
|
223
|
+
"role": content.role,
|
|
224
|
+
"content": content.parts[0].text,
|
|
225
|
+
}
|
|
226
|
+
)
|
|
227
|
+
elif content.parts[0].function_response:
|
|
228
|
+
message_parts.append(
|
|
229
|
+
{
|
|
230
|
+
"role": content.role,
|
|
231
|
+
"content": str(
|
|
232
|
+
content.parts[0].function_response.response
|
|
233
|
+
),
|
|
234
|
+
}
|
|
235
|
+
)
|
|
236
|
+
else:
|
|
237
|
+
message_part = {"role": content.role}
|
|
238
|
+
for idx, part in enumerate(content.parts):
|
|
239
|
+
# text part
|
|
240
|
+
if part.text:
|
|
241
|
+
message_part[f"parts.{idx}.type"] = "text"
|
|
242
|
+
message_part[f"parts.{idx}.content"] = part.text
|
|
243
|
+
# function response
|
|
244
|
+
if part.function_response:
|
|
245
|
+
message_part[f"parts.{idx}.type"] = "function"
|
|
246
|
+
message_part[f"parts.{idx}.content"] = str(
|
|
247
|
+
part.function_response
|
|
248
|
+
)
|
|
249
|
+
if part.inline_data:
|
|
250
|
+
message_part[f"parts.{idx}.type"] = "image_url"
|
|
251
|
+
message_part[f"parts.{idx}.image_url.name"] = (
|
|
252
|
+
part.inline_data.display_name.split("/")[-1]
|
|
253
|
+
)
|
|
254
|
+
message_part[f"parts.{idx}.image_url.url"] = (
|
|
255
|
+
part.inline_data.display_name
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
message_parts.append(message_part)
|
|
259
|
+
|
|
260
|
+
if message_parts:
|
|
261
|
+
messages.extend(message_parts)
|
|
262
|
+
|
|
263
|
+
return ExtractorResponse(type="event", content=messages)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def llm_gen_ai_assistant_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
267
|
+
# a content is a message
|
|
268
|
+
messages = []
|
|
269
|
+
|
|
270
|
+
# each part in each content we make it a message
|
|
271
|
+
# e.g. 2 contents and 3 parts each means 6 messages
|
|
272
|
+
for content in params.llm_request.contents:
|
|
273
|
+
if content.role == "model":
|
|
274
|
+
message_parts = []
|
|
275
|
+
|
|
276
|
+
# each part we make it a message
|
|
277
|
+
if content.parts:
|
|
278
|
+
# only one part
|
|
279
|
+
if len(content.parts) == 1:
|
|
280
|
+
if content.parts[0].text:
|
|
281
|
+
message_parts.append(
|
|
282
|
+
{
|
|
283
|
+
"role": content.role,
|
|
284
|
+
"content": content.parts[0].text,
|
|
285
|
+
}
|
|
286
|
+
)
|
|
287
|
+
elif content.parts[0].function_call:
|
|
288
|
+
pass
|
|
289
|
+
# multiple parts
|
|
290
|
+
else:
|
|
291
|
+
message_part = {"role": content.role}
|
|
292
|
+
|
|
293
|
+
for idx, part in enumerate(content.parts):
|
|
294
|
+
# parse content
|
|
295
|
+
if part.text:
|
|
296
|
+
message_part[f"parts.{idx}.type"] = "text"
|
|
297
|
+
message_part[f"parts.{idx}.content"] = part.text
|
|
298
|
+
# parse tool_calls
|
|
299
|
+
if part.function_call:
|
|
300
|
+
message_part["tool_calls.0.id"] = (
|
|
301
|
+
part.function_call.id
|
|
302
|
+
if part.function_call.id
|
|
303
|
+
else "<unkown_function_call_id>"
|
|
304
|
+
)
|
|
305
|
+
message_part["tool_calls.0.type"] = "function"
|
|
306
|
+
message_part["tool_calls.0.function.name"] = (
|
|
307
|
+
part.function_call.name
|
|
308
|
+
if part.function_call.name
|
|
309
|
+
else "<unknown_function_name>"
|
|
310
|
+
)
|
|
311
|
+
message_part["tool_calls.0.function.arguments"] = (
|
|
312
|
+
safe_json_serialize(part.function_call.args)
|
|
313
|
+
if part.function_call.args
|
|
314
|
+
else json.dumps({})
|
|
315
|
+
)
|
|
316
|
+
message_parts.append(message_part)
|
|
317
|
+
|
|
318
|
+
if message_parts:
|
|
319
|
+
messages.extend(message_parts)
|
|
320
|
+
|
|
321
|
+
return ExtractorResponse(type="event", content=messages)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
325
|
+
message = {}
|
|
326
|
+
|
|
327
|
+
# parse content to build a message
|
|
328
|
+
content = params.llm_response.content
|
|
329
|
+
if content and content.parts:
|
|
330
|
+
message = {"message.role": content.role}
|
|
331
|
+
|
|
332
|
+
if len(content.parts) == 1:
|
|
333
|
+
part = content.parts[0]
|
|
334
|
+
if part.text:
|
|
335
|
+
message["message.content"] = part.text
|
|
336
|
+
elif part.function_call:
|
|
337
|
+
message["message.tool_calls.0.id"] = (
|
|
338
|
+
part.function_call.id
|
|
339
|
+
if part.function_call.id
|
|
340
|
+
else "<unkown_function_call_id>"
|
|
341
|
+
)
|
|
342
|
+
message["message.tool_calls.0.type"] = "function"
|
|
343
|
+
message["message.tool_calls.0.function.name"] = (
|
|
344
|
+
part.function_call.name
|
|
345
|
+
if part.function_call.name
|
|
346
|
+
else "<unknown_function_name>"
|
|
347
|
+
)
|
|
348
|
+
message["message.tool_calls.0.function.arguments"] = (
|
|
349
|
+
safe_json_serialize(part.function_call.args)
|
|
350
|
+
if part.function_call.args
|
|
351
|
+
else json.dumps({})
|
|
352
|
+
)
|
|
353
|
+
else:
|
|
354
|
+
for idx, part in enumerate(content.parts):
|
|
355
|
+
# parse content
|
|
356
|
+
if part.text:
|
|
357
|
+
message[f"message.parts.{idx}.type"] = "text"
|
|
358
|
+
message[f"message.parts.{idx}.text"] = part.text
|
|
359
|
+
|
|
360
|
+
# parse tool_calls
|
|
361
|
+
if part.function_call:
|
|
362
|
+
message["message.tool_calls.0.id"] = (
|
|
363
|
+
part.function_call.id
|
|
364
|
+
if part.function_call.id
|
|
365
|
+
else "<unkown_function_call_id>"
|
|
366
|
+
)
|
|
367
|
+
message["message.tool_calls.0.type"] = "function"
|
|
368
|
+
message["message.tool_calls.0.function.name"] = (
|
|
369
|
+
part.function_call.name
|
|
370
|
+
if part.function_call.name
|
|
371
|
+
else "<unknown_function_name>"
|
|
372
|
+
)
|
|
373
|
+
message["message.tool_calls.0.function.arguments"] = (
|
|
374
|
+
safe_json_serialize(part.function_call.args)
|
|
375
|
+
if part.function_call.args
|
|
376
|
+
else json.dumps({})
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
return ExtractorResponse(type="event", content=message)
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
383
|
+
return ExtractorResponse(
|
|
384
|
+
content=str(params.llm_request.model_dump(exclude_none=True))
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
389
|
+
return ExtractorResponse(
|
|
390
|
+
content=str(params.llm_response.model_dump(exclude_none=True))
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
|
|
395
|
+
functions = []
|
|
396
|
+
|
|
397
|
+
for idx, (tool_name, tool_instance) in enumerate(
|
|
398
|
+
params.llm_request.tools_dict.items()
|
|
399
|
+
):
|
|
400
|
+
functions.append(
|
|
401
|
+
{
|
|
402
|
+
f"gen_ai.request.functions.{idx}.name": tool_instance.name,
|
|
403
|
+
f"gen_ai.request.functions.{idx}.description": tool_instance.description,
|
|
404
|
+
f"gen_ai.request.functions.{idx}.parameters": str(
|
|
405
|
+
tool_instance._get_declaration().parameters.model_dump( # type: ignore
|
|
406
|
+
exclude_none=True
|
|
407
|
+
)
|
|
408
|
+
if tool_instance._get_declaration()
|
|
409
|
+
and tool_instance._get_declaration().parameters # type: ignore
|
|
410
|
+
else {}
|
|
411
|
+
),
|
|
412
|
+
}
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
return ExtractorResponse(content=functions)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
LLM_ATTRIBUTES = {
|
|
419
|
+
# ===== request attributes =====
|
|
420
|
+
"gen_ai.request.model": llm_gen_ai_request_model,
|
|
421
|
+
"gen_ai.request.type": llm_gen_ai_request_type,
|
|
422
|
+
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
|
|
423
|
+
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
|
|
424
|
+
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
|
|
425
|
+
"gen_ai.request.functions": llm_gen_ai_request_functions,
|
|
426
|
+
# ===== response attributes =====
|
|
427
|
+
"gen_ai.response.model": llm_gen_ai_response_model,
|
|
428
|
+
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
|
|
429
|
+
"gen_ai.response.finish_reason": llm_gen_ai_response_finish_reason,
|
|
430
|
+
# ===== streaming =====
|
|
431
|
+
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
|
|
432
|
+
# ===== span type =====
|
|
433
|
+
"gen_ai.operation.name": llm_gen_ai_operation_name,
|
|
434
|
+
# ===== inputs and outputs =====
|
|
435
|
+
# events
|
|
436
|
+
"gen_ai.system.message": llm_gen_ai_system_message,
|
|
437
|
+
"gen_ai.user.message": llm_gen_ai_user_message,
|
|
438
|
+
"gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
439
|
+
"gen_ai.choice": llm_gen_ai_choice,
|
|
440
|
+
# attributes
|
|
441
|
+
"gen_ai.prompt": llm_gen_ai_prompt,
|
|
442
|
+
"gen_ai.completion": llm_gen_ai_completion,
|
|
443
|
+
# "input.value": llm_input_value,
|
|
444
|
+
# "output.value": llm_output_value,
|
|
445
|
+
# ===== usage =====
|
|
446
|
+
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
|
|
447
|
+
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,
|
|
448
|
+
"gen_ai.usage.total_tokens": llm_gen_ai_usage_total_tokens,
|
|
449
|
+
"gen_ai.usage.cache_creation_input_tokens": llm_gen_ai_usage_cache_creation_input_tokens,
|
|
450
|
+
"gen_ai.usage.cache_read_input_tokens": llm_gen_ai_usage_cache_read_input_tokens,
|
|
451
|
+
}
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from veadk.tracing.telemetry.attributes.extractors.types import (
|
|
16
|
+
ExtractorResponse,
|
|
17
|
+
ToolAttributesParams,
|
|
18
|
+
)
|
|
19
|
+
from veadk.utils.misc import safe_json_serialize
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorResponse:
|
|
23
|
+
return ExtractorResponse(content="execute_tool")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
|
|
27
|
+
tool_input = {
|
|
28
|
+
"role": "tool",
|
|
29
|
+
"content": safe_json_serialize(
|
|
30
|
+
{
|
|
31
|
+
"name": params.tool.name,
|
|
32
|
+
"description": params.tool.description,
|
|
33
|
+
"parameters": params.args,
|
|
34
|
+
}
|
|
35
|
+
),
|
|
36
|
+
}
|
|
37
|
+
return ExtractorResponse(type="event", content=tool_input)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def tool_gen_ai_tool_input(params: ToolAttributesParams) -> ExtractorResponse:
|
|
41
|
+
tool_input = {
|
|
42
|
+
"name": params.tool.name,
|
|
43
|
+
"description": params.tool.description,
|
|
44
|
+
"parameters": params.args,
|
|
45
|
+
}
|
|
46
|
+
return ExtractorResponse(
|
|
47
|
+
content=safe_json_serialize(tool_input) or "<unknown_tool_input>"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def tool_gen_ai_tool_name(params: ToolAttributesParams) -> ExtractorResponse:
|
|
52
|
+
return ExtractorResponse(content=params.tool.name or "<unknown_tool_name>")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
|
|
56
|
+
function_response = params.function_response_event.get_function_responses()[
|
|
57
|
+
0
|
|
58
|
+
].model_dump()
|
|
59
|
+
tool_output = {
|
|
60
|
+
"id": function_response["id"],
|
|
61
|
+
"name": function_response["name"],
|
|
62
|
+
"response": function_response["response"],
|
|
63
|
+
}
|
|
64
|
+
return ExtractorResponse(
|
|
65
|
+
content=safe_json_serialize(tool_output) or "<unknown_tool_output>"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
TOOL_ATTRIBUTES = {
|
|
70
|
+
"gen_ai.operation.name": tool_gen_ai_operation_name,
|
|
71
|
+
"gen_ai.tool.name": tool_gen_ai_tool_name, # TLS required
|
|
72
|
+
"gen_ai.tool.input": tool_gen_ai_tool_input, # TLS required
|
|
73
|
+
"gen_ai.tool.output": tool_gen_ai_tool_output, # TLS required
|
|
74
|
+
"cozeloop.input": tool_gen_ai_tool_input, # CozeLoop required
|
|
75
|
+
"cozeloop.output": tool_gen_ai_tool_output, # CozeLoop required
|
|
76
|
+
}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Any, Literal
|
|
16
|
+
|
|
17
|
+
from attr import dataclass
|
|
18
|
+
from google.adk.agents.invocation_context import InvocationContext
|
|
19
|
+
from google.adk.events import Event
|
|
20
|
+
from google.adk.models.llm_request import LlmRequest
|
|
21
|
+
from google.adk.models.llm_response import LlmResponse
|
|
22
|
+
from google.adk.tools import BaseTool
|
|
23
|
+
from opentelemetry.sdk.trace import _Span
|
|
24
|
+
from opentelemetry.trace.span import Span
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ExtractorResponse:
|
|
29
|
+
content: list | dict | None | str | int | float
|
|
30
|
+
|
|
31
|
+
type: Literal["attribute", "event"] = "attribute"
|
|
32
|
+
"""Type of extractor response.
|
|
33
|
+
|
|
34
|
+
`attribute`: span.add_attribute(attr_name, attr_value)
|
|
35
|
+
`event`: span.add_event(...)
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
@staticmethod
|
|
39
|
+
def update_span(
|
|
40
|
+
span: _Span | Span, attr_name: str, response: "ExtractorResponse"
|
|
41
|
+
) -> None:
|
|
42
|
+
if response.type == "attribute":
|
|
43
|
+
res = response.content
|
|
44
|
+
if isinstance(res, list): # list[dict]
|
|
45
|
+
for _res in res:
|
|
46
|
+
if isinstance(_res, dict):
|
|
47
|
+
for k, v in _res.items():
|
|
48
|
+
span.set_attribute(k, v)
|
|
49
|
+
else:
|
|
50
|
+
# set anyway
|
|
51
|
+
span.set_attribute(attr_name, res) # type: ignore
|
|
52
|
+
elif response.type == "event":
|
|
53
|
+
if isinstance(response.content, dict):
|
|
54
|
+
span.add_event(attr_name, response.content)
|
|
55
|
+
elif isinstance(response.content, list):
|
|
56
|
+
for event in response.content:
|
|
57
|
+
span.add_event(attr_name, event)
|
|
58
|
+
else:
|
|
59
|
+
# Unsupported response type, discard it.
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class LLMAttributesParams:
|
|
65
|
+
invocation_context: InvocationContext
|
|
66
|
+
event_id: str
|
|
67
|
+
llm_request: LlmRequest
|
|
68
|
+
llm_response: LlmResponse
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@dataclass
|
|
72
|
+
class ToolAttributesParams:
|
|
73
|
+
tool: BaseTool
|
|
74
|
+
args: dict[str, Any]
|
|
75
|
+
function_response_event: Event
|