veadk-python 0.2.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- veadk/__init__.py +37 -0
- veadk/a2a/__init__.py +13 -0
- veadk/a2a/agent_card.py +45 -0
- veadk/a2a/remote_ve_agent.py +390 -0
- veadk/a2a/utils/__init__.py +13 -0
- veadk/a2a/utils/agent_to_a2a.py +170 -0
- veadk/a2a/ve_a2a_server.py +93 -0
- veadk/a2a/ve_agent_executor.py +78 -0
- veadk/a2a/ve_middlewares.py +313 -0
- veadk/a2a/ve_task_store.py +37 -0
- veadk/agent.py +402 -0
- veadk/agent_builder.py +93 -0
- veadk/agents/loop_agent.py +68 -0
- veadk/agents/parallel_agent.py +72 -0
- veadk/agents/sequential_agent.py +64 -0
- veadk/auth/__init__.py +13 -0
- veadk/auth/base_auth.py +22 -0
- veadk/auth/ve_credential_service.py +203 -0
- veadk/auth/veauth/__init__.py +13 -0
- veadk/auth/veauth/apmplus_veauth.py +58 -0
- veadk/auth/veauth/ark_veauth.py +75 -0
- veadk/auth/veauth/base_veauth.py +50 -0
- veadk/auth/veauth/cozeloop_veauth.py +13 -0
- veadk/auth/veauth/opensearch_veauth.py +75 -0
- veadk/auth/veauth/postgresql_veauth.py +75 -0
- veadk/auth/veauth/prompt_pilot_veauth.py +60 -0
- veadk/auth/veauth/speech_veauth.py +54 -0
- veadk/auth/veauth/utils.py +69 -0
- veadk/auth/veauth/vesearch_veauth.py +62 -0
- veadk/auth/veauth/viking_mem0_veauth.py +91 -0
- veadk/cli/__init__.py +13 -0
- veadk/cli/cli.py +58 -0
- veadk/cli/cli_clean.py +87 -0
- veadk/cli/cli_create.py +163 -0
- veadk/cli/cli_deploy.py +233 -0
- veadk/cli/cli_eval.py +215 -0
- veadk/cli/cli_init.py +214 -0
- veadk/cli/cli_kb.py +110 -0
- veadk/cli/cli_pipeline.py +285 -0
- veadk/cli/cli_prompt.py +86 -0
- veadk/cli/cli_update.py +106 -0
- veadk/cli/cli_uploadevalset.py +139 -0
- veadk/cli/cli_web.py +143 -0
- veadk/cloud/__init__.py +13 -0
- veadk/cloud/cloud_agent_engine.py +485 -0
- veadk/cloud/cloud_app.py +475 -0
- veadk/config.py +115 -0
- veadk/configs/__init__.py +13 -0
- veadk/configs/auth_configs.py +133 -0
- veadk/configs/database_configs.py +132 -0
- veadk/configs/model_configs.py +78 -0
- veadk/configs/tool_configs.py +54 -0
- veadk/configs/tracing_configs.py +110 -0
- veadk/consts.py +74 -0
- veadk/evaluation/__init__.py +17 -0
- veadk/evaluation/adk_evaluator/__init__.py +17 -0
- veadk/evaluation/adk_evaluator/adk_evaluator.py +302 -0
- veadk/evaluation/base_evaluator.py +642 -0
- veadk/evaluation/deepeval_evaluator/__init__.py +17 -0
- veadk/evaluation/deepeval_evaluator/deepeval_evaluator.py +339 -0
- veadk/evaluation/eval_set_file_loader.py +48 -0
- veadk/evaluation/eval_set_recorder.py +146 -0
- veadk/evaluation/types.py +65 -0
- veadk/evaluation/utils/prometheus.py +196 -0
- veadk/integrations/__init__.py +13 -0
- veadk/integrations/ve_apig/__init__.py +13 -0
- veadk/integrations/ve_apig/ve_apig.py +349 -0
- veadk/integrations/ve_apig/ve_apig_utils.py +332 -0
- veadk/integrations/ve_code_pipeline/__init__.py +13 -0
- veadk/integrations/ve_code_pipeline/ve_code_pipeline.py +431 -0
- veadk/integrations/ve_cozeloop/__init__.py +13 -0
- veadk/integrations/ve_cozeloop/ve_cozeloop.py +96 -0
- veadk/integrations/ve_cr/__init__.py +13 -0
- veadk/integrations/ve_cr/ve_cr.py +220 -0
- veadk/integrations/ve_faas/__init__.py +13 -0
- veadk/integrations/ve_faas/template/cookiecutter.json +15 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/__init__.py +13 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/config.yaml.example +6 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/deploy.py +106 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__init__.py +13 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/agent.py +25 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +202 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/requirements.txt +3 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +49 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name }}/__init__.py +14 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name }}/agent.py +27 -0
- veadk/integrations/ve_faas/ve_faas.py +754 -0
- veadk/integrations/ve_faas/ve_faas_utils.py +408 -0
- veadk/integrations/ve_faas/web_template/cookiecutter.json +20 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/__init__.py +13 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/config.yaml.example +2 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/deploy.py +44 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/Dockerfile +23 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/app.py +123 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/init_db.py +46 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/models.py +36 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/requirements.txt +4 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/run.sh +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/css/style.css +368 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/static/js/admin.js +0 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/dashboard.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/edit_post.html +24 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/login.html +21 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/admin/posts.html +53 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/base.html +45 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/index.html +29 -0
- veadk/integrations/ve_faas/web_template/{{cookiecutter.local_dir_name}}/src/templates/post.html +14 -0
- veadk/integrations/ve_identity/__init__.py +110 -0
- veadk/integrations/ve_identity/auth_config.py +261 -0
- veadk/integrations/ve_identity/auth_mixins.py +650 -0
- veadk/integrations/ve_identity/auth_processor.py +385 -0
- veadk/integrations/ve_identity/function_tool.py +158 -0
- veadk/integrations/ve_identity/identity_client.py +864 -0
- veadk/integrations/ve_identity/mcp_tool.py +181 -0
- veadk/integrations/ve_identity/mcp_toolset.py +431 -0
- veadk/integrations/ve_identity/models.py +228 -0
- veadk/integrations/ve_identity/token_manager.py +188 -0
- veadk/integrations/ve_identity/utils.py +151 -0
- veadk/integrations/ve_prompt_pilot/__init__.py +13 -0
- veadk/integrations/ve_prompt_pilot/ve_prompt_pilot.py +85 -0
- veadk/integrations/ve_tls/__init__.py +13 -0
- veadk/integrations/ve_tls/utils.py +116 -0
- veadk/integrations/ve_tls/ve_tls.py +212 -0
- veadk/integrations/ve_tos/ve_tos.py +710 -0
- veadk/integrations/ve_viking_db_memory/__init__.py +13 -0
- veadk/integrations/ve_viking_db_memory/ve_viking_db_memory.py +308 -0
- veadk/knowledgebase/__init__.py +17 -0
- veadk/knowledgebase/backends/__init__.py +13 -0
- veadk/knowledgebase/backends/base_backend.py +72 -0
- veadk/knowledgebase/backends/in_memory_backend.py +91 -0
- veadk/knowledgebase/backends/opensearch_backend.py +162 -0
- veadk/knowledgebase/backends/redis_backend.py +172 -0
- veadk/knowledgebase/backends/utils.py +92 -0
- veadk/knowledgebase/backends/vikingdb_knowledge_backend.py +608 -0
- veadk/knowledgebase/entry.py +25 -0
- veadk/knowledgebase/knowledgebase.py +307 -0
- veadk/memory/__init__.py +35 -0
- veadk/memory/long_term_memory.py +365 -0
- veadk/memory/long_term_memory_backends/__init__.py +13 -0
- veadk/memory/long_term_memory_backends/base_backend.py +35 -0
- veadk/memory/long_term_memory_backends/in_memory_backend.py +67 -0
- veadk/memory/long_term_memory_backends/mem0_backend.py +155 -0
- veadk/memory/long_term_memory_backends/opensearch_backend.py +124 -0
- veadk/memory/long_term_memory_backends/redis_backend.py +140 -0
- veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py +189 -0
- veadk/memory/short_term_memory.py +252 -0
- veadk/memory/short_term_memory_backends/__init__.py +13 -0
- veadk/memory/short_term_memory_backends/base_backend.py +31 -0
- veadk/memory/short_term_memory_backends/mysql_backend.py +49 -0
- veadk/memory/short_term_memory_backends/postgresql_backend.py +49 -0
- veadk/memory/short_term_memory_backends/sqlite_backend.py +55 -0
- veadk/memory/short_term_memory_processor.py +100 -0
- veadk/processors/__init__.py +26 -0
- veadk/processors/base_run_processor.py +120 -0
- veadk/prompts/__init__.py +13 -0
- veadk/prompts/agent_default_prompt.py +30 -0
- veadk/prompts/prompt_evaluator.py +20 -0
- veadk/prompts/prompt_memory_processor.py +55 -0
- veadk/prompts/prompt_optimization.py +150 -0
- veadk/runner.py +732 -0
- veadk/tools/__init__.py +13 -0
- veadk/tools/builtin_tools/__init__.py +13 -0
- veadk/tools/builtin_tools/agent_authorization.py +94 -0
- veadk/tools/builtin_tools/generate_image.py +23 -0
- veadk/tools/builtin_tools/image_edit.py +300 -0
- veadk/tools/builtin_tools/image_generate.py +446 -0
- veadk/tools/builtin_tools/lark.py +67 -0
- veadk/tools/builtin_tools/las.py +24 -0
- veadk/tools/builtin_tools/link_reader.py +66 -0
- veadk/tools/builtin_tools/llm_shield.py +381 -0
- veadk/tools/builtin_tools/load_knowledgebase.py +97 -0
- veadk/tools/builtin_tools/mcp_router.py +29 -0
- veadk/tools/builtin_tools/run_code.py +113 -0
- veadk/tools/builtin_tools/tts.py +253 -0
- veadk/tools/builtin_tools/vesearch.py +49 -0
- veadk/tools/builtin_tools/video_generate.py +363 -0
- veadk/tools/builtin_tools/web_scraper.py +76 -0
- veadk/tools/builtin_tools/web_search.py +83 -0
- veadk/tools/demo_tools.py +58 -0
- veadk/tools/load_knowledgebase_tool.py +149 -0
- veadk/tools/sandbox/__init__.py +13 -0
- veadk/tools/sandbox/browser_sandbox.py +37 -0
- veadk/tools/sandbox/code_sandbox.py +40 -0
- veadk/tools/sandbox/computer_sandbox.py +34 -0
- veadk/tracing/__init__.py +13 -0
- veadk/tracing/base_tracer.py +58 -0
- veadk/tracing/telemetry/__init__.py +13 -0
- veadk/tracing/telemetry/attributes/attributes.py +29 -0
- veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +180 -0
- veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +858 -0
- veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +152 -0
- veadk/tracing/telemetry/attributes/extractors/types.py +164 -0
- veadk/tracing/telemetry/exporters/__init__.py +13 -0
- veadk/tracing/telemetry/exporters/apmplus_exporter.py +558 -0
- veadk/tracing/telemetry/exporters/base_exporter.py +39 -0
- veadk/tracing/telemetry/exporters/cozeloop_exporter.py +129 -0
- veadk/tracing/telemetry/exporters/inmemory_exporter.py +248 -0
- veadk/tracing/telemetry/exporters/tls_exporter.py +139 -0
- veadk/tracing/telemetry/opentelemetry_tracer.py +320 -0
- veadk/tracing/telemetry/telemetry.py +411 -0
- veadk/types.py +47 -0
- veadk/utils/__init__.py +13 -0
- veadk/utils/audio_manager.py +95 -0
- veadk/utils/auth.py +294 -0
- veadk/utils/logger.py +59 -0
- veadk/utils/mcp_utils.py +44 -0
- veadk/utils/misc.py +184 -0
- veadk/utils/patches.py +101 -0
- veadk/utils/volcengine_sign.py +205 -0
- veadk/version.py +15 -0
- veadk_python-0.2.27.dist-info/METADATA +373 -0
- veadk_python-0.2.27.dist-info/RECORD +218 -0
- veadk_python-0.2.27.dist-info/WHEEL +5 -0
- veadk_python-0.2.27.dist-info/entry_points.txt +2 -0
- veadk_python-0.2.27.dist-info/licenses/LICENSE +201 -0
- veadk_python-0.2.27.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,858 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
|
|
17
|
+
from veadk.tracing.telemetry.attributes.extractors.types import (
|
|
18
|
+
ExtractorResponse,
|
|
19
|
+
LLMAttributesParams,
|
|
20
|
+
)
|
|
21
|
+
from veadk.utils.misc import safe_json_serialize
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def llm_gen_ai_request_model(params: LLMAttributesParams) -> ExtractorResponse:
|
|
25
|
+
"""Extract the requested language model name.
|
|
26
|
+
|
|
27
|
+
Provides the model identifier that was specified in the LLM request
|
|
28
|
+
for tracking model usage patterns and performance analysis.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
params: LLM execution parameters containing request details
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
ExtractorResponse: Response containing the model name or placeholder
|
|
35
|
+
"""
|
|
36
|
+
return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def llm_gen_ai_request_type(params: LLMAttributesParams) -> ExtractorResponse:
|
|
40
|
+
"""Extract the LLM request type.
|
|
41
|
+
|
|
42
|
+
Provides the type of LLM operation being performed, typically "chat"
|
|
43
|
+
for conversational interactions with language models.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
params: LLM execution parameters (unused in this extractor)
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
ExtractorResponse: Response containing "chat" as the request type
|
|
50
|
+
"""
|
|
51
|
+
return ExtractorResponse(content="chat" or "<unknown_type>")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def llm_gen_ai_response_model(params: LLMAttributesParams) -> ExtractorResponse:
|
|
55
|
+
"""Extract the responding language model name.
|
|
56
|
+
|
|
57
|
+
Provides the actual model that generated the response, which should
|
|
58
|
+
match the requested model for verification and tracking purposes.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
params: LLM execution parameters containing request details
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
ExtractorResponse: Response containing the response model name or placeholder
|
|
65
|
+
"""
|
|
66
|
+
return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def llm_gen_ai_request_max_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
70
|
+
"""Extract the maximum output tokens configuration.
|
|
71
|
+
|
|
72
|
+
Provides the maximum number of tokens the model is allowed to generate
|
|
73
|
+
in its response, used for cost prediction and output length control.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
params: LLM execution parameters containing request configuration
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
ExtractorResponse: Response containing max output tokens value
|
|
80
|
+
"""
|
|
81
|
+
return ExtractorResponse(content=params.llm_request.config.max_output_tokens)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def llm_gen_ai_request_temperature(params: LLMAttributesParams) -> ExtractorResponse:
|
|
85
|
+
"""Extract the temperature parameter for response randomness.
|
|
86
|
+
|
|
87
|
+
Provides the temperature setting that controls randomness in model
|
|
88
|
+
responses, affecting creativity and consistency of outputs.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
params: LLM execution parameters containing request configuration
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
ExtractorResponse: Response containing temperature value
|
|
95
|
+
"""
|
|
96
|
+
return ExtractorResponse(content=params.llm_request.config.temperature)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def llm_gen_ai_request_top_p(params: LLMAttributesParams) -> ExtractorResponse:
|
|
100
|
+
"""Extract the top-p parameter for nucleus sampling.
|
|
101
|
+
|
|
102
|
+
Provides the top-p (nucleus sampling) setting that controls the
|
|
103
|
+
diversity of token sampling in model responses.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
params: LLM execution parameters containing request configuration
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
ExtractorResponse: Response containing top-p value
|
|
110
|
+
"""
|
|
111
|
+
return ExtractorResponse(content=params.llm_request.config.top_p)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def llm_gen_ai_response_stop_reason(params: LLMAttributesParams) -> ExtractorResponse:
|
|
115
|
+
"""Extract the stop reason for response completion.
|
|
116
|
+
|
|
117
|
+
Provides information about why the model stopped generating tokens,
|
|
118
|
+
which helps identify truncation or completion patterns.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
params: LLM execution parameters (currently not implemented)
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
ExtractorResponse: Response containing placeholder stop reason
|
|
125
|
+
"""
|
|
126
|
+
return ExtractorResponse(content="<no_stop_reason_provided>")
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def llm_gen_ai_response_finish_reason(params: LLMAttributesParams) -> ExtractorResponse:
|
|
130
|
+
"""Extract the finish reason for response completion.
|
|
131
|
+
|
|
132
|
+
Provides information about how the model completed its response,
|
|
133
|
+
such as natural completion, token limit, or stop sequence.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
params: LLM execution parameters (currently not implemented)
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
ExtractorResponse: Response containing placeholder finish reason
|
|
140
|
+
|
|
141
|
+
Note:
|
|
142
|
+
- Currently returns placeholder value
|
|
143
|
+
- TODO: Update implementation for Google ADK v1.12.0
|
|
144
|
+
- Critical for understanding response quality and completeness
|
|
145
|
+
"""
|
|
146
|
+
# TODO: update to google-adk v1.12.0
|
|
147
|
+
return ExtractorResponse(content="<no_finish_reason_provided>")
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def llm_gen_ai_usage_input_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
151
|
+
"""Extract the number of input tokens consumed.
|
|
152
|
+
|
|
153
|
+
Provides the count of tokens in the prompt and context that were
|
|
154
|
+
processed by the model, essential for cost tracking and analysis.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
params: LLM execution parameters containing response metadata
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
ExtractorResponse: Response containing input token count or None
|
|
161
|
+
"""
|
|
162
|
+
if params.llm_response.usage_metadata:
|
|
163
|
+
return ExtractorResponse(
|
|
164
|
+
content=params.llm_response.usage_metadata.prompt_token_count
|
|
165
|
+
)
|
|
166
|
+
return ExtractorResponse(content=None)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def llm_gen_ai_usage_output_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
170
|
+
"""Extract the number of output tokens generated.
|
|
171
|
+
|
|
172
|
+
Provides the count of tokens generated by the model in its response,
|
|
173
|
+
essential for cost tracking and response length analysis.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
params: LLM execution parameters containing response metadata
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
ExtractorResponse: Response containing output token count or None
|
|
180
|
+
"""
|
|
181
|
+
if params.llm_response.usage_metadata:
|
|
182
|
+
return ExtractorResponse(
|
|
183
|
+
content=params.llm_response.usage_metadata.candidates_token_count,
|
|
184
|
+
)
|
|
185
|
+
return ExtractorResponse(content=None)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def llm_gen_ai_usage_total_tokens(params: LLMAttributesParams) -> ExtractorResponse:
|
|
189
|
+
"""Extract the total number of tokens consumed.
|
|
190
|
+
|
|
191
|
+
Provides the total count of tokens (input + output) consumed by
|
|
192
|
+
the model interaction, used for overall cost tracking.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
params: LLM execution parameters containing response metadata
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
ExtractorResponse: Response containing total token count or None
|
|
199
|
+
"""
|
|
200
|
+
if params.llm_response.usage_metadata:
|
|
201
|
+
return ExtractorResponse(
|
|
202
|
+
content=params.llm_response.usage_metadata.total_token_count,
|
|
203
|
+
)
|
|
204
|
+
return ExtractorResponse(content=None)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# FIXME
|
|
208
|
+
def llm_gen_ai_usage_cache_creation_input_tokens(
|
|
209
|
+
params: LLMAttributesParams,
|
|
210
|
+
) -> ExtractorResponse:
|
|
211
|
+
"""Extract the number of tokens used for cache creation.
|
|
212
|
+
|
|
213
|
+
Provides the count of tokens used for creating cached content,
|
|
214
|
+
which affects cost calculation in caching-enabled models.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
params: LLM execution parameters containing response metadata
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
ExtractorResponse: Response containing cache creation token count or None
|
|
221
|
+
"""
|
|
222
|
+
if params.llm_response.usage_metadata:
|
|
223
|
+
return ExtractorResponse(
|
|
224
|
+
content=params.llm_response.usage_metadata.cached_content_token_count,
|
|
225
|
+
)
|
|
226
|
+
return ExtractorResponse(content=None)
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
# FIXME
|
|
230
|
+
def llm_gen_ai_usage_cache_read_input_tokens(
|
|
231
|
+
params: LLMAttributesParams,
|
|
232
|
+
) -> ExtractorResponse:
|
|
233
|
+
"""Extract the number of tokens used for cache reading.
|
|
234
|
+
|
|
235
|
+
Provides the count of tokens read from cached content,
|
|
236
|
+
which affects cost calculation in caching-enabled models.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
params: LLM execution parameters containing response metadata
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
ExtractorResponse: Response containing cache read token count or None
|
|
243
|
+
"""
|
|
244
|
+
if params.llm_response.usage_metadata:
|
|
245
|
+
return ExtractorResponse(
|
|
246
|
+
content=params.llm_response.usage_metadata.cached_content_token_count,
|
|
247
|
+
)
|
|
248
|
+
return ExtractorResponse(content=None)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
|
|
252
|
+
"""Extract structured prompt data for span attributes.
|
|
253
|
+
|
|
254
|
+
Processes the complete conversation history from the LLM request
|
|
255
|
+
and structures it into indexed prompt messages with role, content,
|
|
256
|
+
and metadata information for analysis and debugging.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
params: LLM execution parameters containing request content
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
ExtractorResponse: Response containing list of structured prompt messages
|
|
263
|
+
"""
|
|
264
|
+
# a part is a message
|
|
265
|
+
messages: list[dict] = []
|
|
266
|
+
idx = 0
|
|
267
|
+
|
|
268
|
+
for content in params.llm_request.contents:
|
|
269
|
+
if content.parts:
|
|
270
|
+
for part in content.parts:
|
|
271
|
+
message = {}
|
|
272
|
+
# text part
|
|
273
|
+
if part.text:
|
|
274
|
+
message[f"gen_ai.prompt.{idx}.role"] = content.role
|
|
275
|
+
message[f"gen_ai.prompt.{idx}.content"] = part.text
|
|
276
|
+
# function response
|
|
277
|
+
if part.function_response:
|
|
278
|
+
message[f"gen_ai.prompt.{idx}.role"] = content.role
|
|
279
|
+
message[f"gen_ai.prompt.{idx}.content"] = (
|
|
280
|
+
str(part.function_response.response)
|
|
281
|
+
if part.function_response
|
|
282
|
+
else "<unknown_function_response>"
|
|
283
|
+
)
|
|
284
|
+
# function call
|
|
285
|
+
if part.function_call:
|
|
286
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.id"] = (
|
|
287
|
+
part.function_call.id
|
|
288
|
+
if part.function_call.id
|
|
289
|
+
else "<unkown_function_call_id>"
|
|
290
|
+
)
|
|
291
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.type"] = "function"
|
|
292
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.function.name"] = (
|
|
293
|
+
part.function_call.name
|
|
294
|
+
if part.function_call.name
|
|
295
|
+
else "<unknown_function_name>"
|
|
296
|
+
)
|
|
297
|
+
message[f"gen_ai.prompt.{idx}.tool_calls.0.function.arguments"] = (
|
|
298
|
+
safe_json_serialize(part.function_call.args)
|
|
299
|
+
if part.function_call.args
|
|
300
|
+
else json.dumps({})
|
|
301
|
+
)
|
|
302
|
+
# image
|
|
303
|
+
if part.inline_data:
|
|
304
|
+
message[f"gen_ai.prompt.{idx}.type"] = "image_url"
|
|
305
|
+
message[f"gen_ai.prompt.{idx}.image_url.name"] = (
|
|
306
|
+
part.inline_data.display_name.split("/")[-1]
|
|
307
|
+
if part.inline_data.display_name
|
|
308
|
+
else "<unknown_image_name>"
|
|
309
|
+
)
|
|
310
|
+
message[f"gen_ai.prompt.{idx}.image_url.url"] = (
|
|
311
|
+
part.inline_data.display_name
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
if message:
|
|
315
|
+
messages.append(message)
|
|
316
|
+
idx += 1
|
|
317
|
+
|
|
318
|
+
return ExtractorResponse(content=messages)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
|
|
322
|
+
"""Extract structured completion data for span attributes.
|
|
323
|
+
|
|
324
|
+
Processes the model's response content and structures it into
|
|
325
|
+
indexed completion messages with role, content, and tool call
|
|
326
|
+
information for analysis and evaluation.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
params: LLM execution parameters containing response content
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
ExtractorResponse: Response containing list of structured completion messages
|
|
333
|
+
"""
|
|
334
|
+
messages = []
|
|
335
|
+
|
|
336
|
+
content = params.llm_response.content
|
|
337
|
+
if content and content.parts:
|
|
338
|
+
for idx, part in enumerate(content.parts):
|
|
339
|
+
message = {}
|
|
340
|
+
if part.text:
|
|
341
|
+
message[f"gen_ai.completion.{idx}.role"] = content.role
|
|
342
|
+
message[f"gen_ai.completion.{idx}.content"] = part.text
|
|
343
|
+
elif part.function_call:
|
|
344
|
+
message[f"gen_ai.completion.{idx}.role"] = content.role
|
|
345
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.id"] = (
|
|
346
|
+
part.function_call.id
|
|
347
|
+
if part.function_call.id
|
|
348
|
+
else "<unkown_function_call_id>"
|
|
349
|
+
)
|
|
350
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.type"] = "function"
|
|
351
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.function.name"] = (
|
|
352
|
+
part.function_call.name
|
|
353
|
+
if part.function_call.name
|
|
354
|
+
else "<unknown_function_name>"
|
|
355
|
+
)
|
|
356
|
+
message[f"gen_ai.completion.{idx}.tool_calls.0.function.arguments"] = (
|
|
357
|
+
safe_json_serialize(part.function_call.args)
|
|
358
|
+
if part.function_call.args
|
|
359
|
+
else json.dumps({})
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
if message:
|
|
363
|
+
messages.append(message)
|
|
364
|
+
return ExtractorResponse(content=messages)
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def llm_gen_ai_messages(params: LLMAttributesParams) -> ExtractorResponse:
|
|
368
|
+
"""Extract complete conversation messages as structured events.
|
|
369
|
+
|
|
370
|
+
Processes the entire conversation context including system instructions,
|
|
371
|
+
user messages, tool messages, and assistant responses into structured
|
|
372
|
+
events for comprehensive conversation flow analysis.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
params: LLM execution parameters containing request content
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
ExtractorResponse: Event list response containing structured conversation events
|
|
379
|
+
"""
|
|
380
|
+
events = []
|
|
381
|
+
|
|
382
|
+
# system message
|
|
383
|
+
events.append(
|
|
384
|
+
{
|
|
385
|
+
"gen_ai.system.message": {
|
|
386
|
+
"role": "system",
|
|
387
|
+
"content": str(params.llm_request.config.system_instruction),
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# user, tool, and assistant message
|
|
393
|
+
if params.llm_request and params.llm_request.contents:
|
|
394
|
+
for content in params.llm_request.contents:
|
|
395
|
+
if content and content.parts:
|
|
396
|
+
# content.role == "user"
|
|
397
|
+
# part.function_response -> gen_ai.tool.message
|
|
398
|
+
# not part.function_response -> gen_ai.user.message
|
|
399
|
+
# content.role == "model" -> gen_ai.assistant.message
|
|
400
|
+
if content.role == "user":
|
|
401
|
+
user_event = {}
|
|
402
|
+
user_event["gen_ai.user.message"] = {"role": content.role}
|
|
403
|
+
for idx, part in enumerate(content.parts):
|
|
404
|
+
if part.function_response:
|
|
405
|
+
events.append(
|
|
406
|
+
{
|
|
407
|
+
"gen_ai.tool.message": {
|
|
408
|
+
"role": "tool",
|
|
409
|
+
"id": part.function_response.id,
|
|
410
|
+
"content": safe_json_serialize(
|
|
411
|
+
part.function_response.response
|
|
412
|
+
),
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
)
|
|
416
|
+
else:
|
|
417
|
+
if part.text:
|
|
418
|
+
if len(content.parts) == 1:
|
|
419
|
+
user_event["gen_ai.user.message"].update(
|
|
420
|
+
{"content": part.text}
|
|
421
|
+
)
|
|
422
|
+
else:
|
|
423
|
+
user_event["gen_ai.user.message"].update(
|
|
424
|
+
{
|
|
425
|
+
f"parts.{idx}.type": "text",
|
|
426
|
+
f"parts.{idx}.text": part.text,
|
|
427
|
+
},
|
|
428
|
+
)
|
|
429
|
+
if part.inline_data:
|
|
430
|
+
if len(content.parts) == 1:
|
|
431
|
+
part = content.parts[0]
|
|
432
|
+
user_event["gen_ai.user.message"].update(
|
|
433
|
+
{
|
|
434
|
+
"parts.0.type": "image_url",
|
|
435
|
+
"parts.0.image_url.name": (
|
|
436
|
+
part.inline_data.display_name.split(
|
|
437
|
+
"/"
|
|
438
|
+
)[-1]
|
|
439
|
+
if part.inline_data
|
|
440
|
+
and part.inline_data.display_name
|
|
441
|
+
else "<unknown_image_name>"
|
|
442
|
+
),
|
|
443
|
+
"parts.0.image_url.url": (
|
|
444
|
+
part.inline_data.display_name
|
|
445
|
+
if part.inline_data
|
|
446
|
+
and part.inline_data.display_name
|
|
447
|
+
else "<unknown_image_url>"
|
|
448
|
+
),
|
|
449
|
+
}
|
|
450
|
+
)
|
|
451
|
+
else:
|
|
452
|
+
user_event["gen_ai.user.message"].update(
|
|
453
|
+
{
|
|
454
|
+
f"parts.{idx}.type": "image_url",
|
|
455
|
+
f"parts.{idx}.image_url.name": (
|
|
456
|
+
part.inline_data.display_name.split(
|
|
457
|
+
"/"
|
|
458
|
+
)[-1]
|
|
459
|
+
if part.inline_data.display_name
|
|
460
|
+
else "<unknown_image_name>"
|
|
461
|
+
),
|
|
462
|
+
f"parts.{idx}.image_url.url": (
|
|
463
|
+
part.inline_data.display_name
|
|
464
|
+
if part.inline_data.display_name
|
|
465
|
+
else "<unknown_image_url>"
|
|
466
|
+
),
|
|
467
|
+
}
|
|
468
|
+
)
|
|
469
|
+
# in case of only function response
|
|
470
|
+
if len(user_event["gen_ai.user.message"].items()) > 1:
|
|
471
|
+
events.append(user_event)
|
|
472
|
+
elif content.role == "model":
|
|
473
|
+
event = {}
|
|
474
|
+
event["gen_ai.assistant.message"] = {"role": content.role}
|
|
475
|
+
for idx, part in enumerate(content.parts):
|
|
476
|
+
if part.text:
|
|
477
|
+
event["gen_ai.assistant.message"].update(
|
|
478
|
+
{
|
|
479
|
+
f"parts.{idx}.type": "text",
|
|
480
|
+
f"parts.{idx}.text": part.text,
|
|
481
|
+
}
|
|
482
|
+
)
|
|
483
|
+
if part.function_call:
|
|
484
|
+
event["gen_ai.assistant.message"].update(
|
|
485
|
+
{
|
|
486
|
+
"tool_calls.0.id": str(part.function_call.id),
|
|
487
|
+
"tool_calls.0.type": "function",
|
|
488
|
+
"tool_calls.0.function.name": part.function_call.name
|
|
489
|
+
if part.function_call.name
|
|
490
|
+
else "<unknown_function_name>",
|
|
491
|
+
"tool_calls.0.function.arguments": safe_json_serialize(
|
|
492
|
+
part.function_call.args
|
|
493
|
+
)
|
|
494
|
+
if part.function_call.args
|
|
495
|
+
else json.dumps({}),
|
|
496
|
+
}
|
|
497
|
+
)
|
|
498
|
+
events.append(event)
|
|
499
|
+
|
|
500
|
+
return ExtractorResponse(type="event_list", content=events)
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
|
|
504
|
+
"""Extract streaming mode indicator.
|
|
505
|
+
|
|
506
|
+
Indicates whether the LLM request was processed in streaming mode
|
|
507
|
+
for performance analysis and debugging purposes.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
params: LLM execution parameters (currently not implemented)
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
ExtractorResponse: Response containing None (not implemented)
|
|
514
|
+
"""
|
|
515
|
+
# return params.llm_request.stream
|
|
516
|
+
return ExtractorResponse(content=None)
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
|
|
520
|
+
"""Extract the operation name for LLM spans.
|
|
521
|
+
|
|
522
|
+
Provides a standardized operation name for LLM interactions,
|
|
523
|
+
enabling consistent categorization across all model calls.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
params: LLM execution parameters (unused in this extractor)
|
|
527
|
+
|
|
528
|
+
Returns:
|
|
529
|
+
ExtractorResponse: Response containing "chat" as the operation name
|
|
530
|
+
"""
|
|
531
|
+
return ExtractorResponse(content="chat")
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
def llm_gen_ai_span_kind(params: LLMAttributesParams) -> ExtractorResponse:
|
|
535
|
+
"""Extract the span kind for LLM spans.
|
|
536
|
+
|
|
537
|
+
Provides span kind classification following OpenTelemetry semantic
|
|
538
|
+
conventions for generative AI LLM operations.
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
ExtractorResponse: Response containing "llm" as the span kind
|
|
542
|
+
"""
|
|
543
|
+
return ExtractorResponse(content="llm")
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
# def llm_gen_ai_system_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
547
|
+
# event_attributes = {
|
|
548
|
+
# "content": str(params.llm_request.config.system_instruction),
|
|
549
|
+
# "role": "system",
|
|
550
|
+
# }
|
|
551
|
+
# return ExtractorResponse(type="event", content=event_attributes)
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
# def llm_gen_ai_user_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
555
|
+
# # a content is a message
|
|
556
|
+
# messages = []
|
|
557
|
+
|
|
558
|
+
# for content in params.llm_request.contents:
|
|
559
|
+
# if content.role == "user":
|
|
560
|
+
# message_parts = []
|
|
561
|
+
|
|
562
|
+
# if content.parts:
|
|
563
|
+
# if len(content.parts) == 1:
|
|
564
|
+
# if content.parts[0].text:
|
|
565
|
+
# message_parts.append(
|
|
566
|
+
# {
|
|
567
|
+
# "role": content.role,
|
|
568
|
+
# "content": content.parts[0].text,
|
|
569
|
+
# }
|
|
570
|
+
# )
|
|
571
|
+
# elif content.parts[0].function_response:
|
|
572
|
+
# message_parts.append(
|
|
573
|
+
# {
|
|
574
|
+
# "role": content.role,
|
|
575
|
+
# "content": str(
|
|
576
|
+
# content.parts[0].function_response.response
|
|
577
|
+
# ),
|
|
578
|
+
# }
|
|
579
|
+
# )
|
|
580
|
+
# else:
|
|
581
|
+
# message_part = {"role": content.role}
|
|
582
|
+
# for idx, part in enumerate(content.parts):
|
|
583
|
+
# # text part
|
|
584
|
+
# if part.text:
|
|
585
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
586
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
587
|
+
# # function response
|
|
588
|
+
# if part.function_response:
|
|
589
|
+
# message_part[f"parts.{idx}.type"] = "function"
|
|
590
|
+
# message_part[f"parts.{idx}.content"] = str(
|
|
591
|
+
# part.function_response
|
|
592
|
+
# )
|
|
593
|
+
# if part.inline_data:
|
|
594
|
+
# message_part[f"parts.{idx}.type"] = "image_url"
|
|
595
|
+
# message_part[f"parts.{idx}.image_url.name"] = (
|
|
596
|
+
# part.inline_data.display_name.split("/")[-1]
|
|
597
|
+
# )
|
|
598
|
+
# message_part[f"parts.{idx}.image_url.url"] = (
|
|
599
|
+
# part.inline_data.display_name
|
|
600
|
+
# )
|
|
601
|
+
|
|
602
|
+
# message_parts.append(message_part)
|
|
603
|
+
|
|
604
|
+
# if message_parts:
|
|
605
|
+
# messages.extend(message_parts)
|
|
606
|
+
|
|
607
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
608
|
+
|
|
609
|
+
|
|
610
|
+
# def llm_gen_ai_assistant_message(params: LLMAttributesParams) -> ExtractorResponse:
|
|
611
|
+
# # a content is a message
|
|
612
|
+
# messages = []
|
|
613
|
+
|
|
614
|
+
# # each part in each content we make it a message
|
|
615
|
+
# # e.g. 2 contents and 3 parts each means 6 messages
|
|
616
|
+
# for content in params.llm_request.contents:
|
|
617
|
+
# if content.role == "model":
|
|
618
|
+
# message_parts = []
|
|
619
|
+
|
|
620
|
+
# # each part we make it a message
|
|
621
|
+
# if content.parts:
|
|
622
|
+
# # only one part
|
|
623
|
+
# if len(content.parts) == 1:
|
|
624
|
+
# if content.parts[0].text:
|
|
625
|
+
# message_parts.append(
|
|
626
|
+
# {
|
|
627
|
+
# "role": content.role,
|
|
628
|
+
# "content": content.parts[0].text,
|
|
629
|
+
# }
|
|
630
|
+
# )
|
|
631
|
+
# elif content.parts[0].function_call:
|
|
632
|
+
# pass
|
|
633
|
+
# # multiple parts
|
|
634
|
+
# else:
|
|
635
|
+
# message_part = {"role": content.role}
|
|
636
|
+
|
|
637
|
+
# for idx, part in enumerate(content.parts):
|
|
638
|
+
# # parse content
|
|
639
|
+
# if part.text:
|
|
640
|
+
# message_part[f"parts.{idx}.type"] = "text"
|
|
641
|
+
# message_part[f"parts.{idx}.content"] = part.text
|
|
642
|
+
# # parse tool_calls
|
|
643
|
+
# if part.function_call:
|
|
644
|
+
# message_part["tool_calls.0.id"] = (
|
|
645
|
+
# part.function_call.id
|
|
646
|
+
# if part.function_call.id
|
|
647
|
+
# else "<unkown_function_call_id>"
|
|
648
|
+
# )
|
|
649
|
+
# message_part["tool_calls.0.type"] = "function"
|
|
650
|
+
# message_part["tool_calls.0.function.name"] = (
|
|
651
|
+
# part.function_call.name
|
|
652
|
+
# if part.function_call.name
|
|
653
|
+
# else "<unknown_function_name>"
|
|
654
|
+
# )
|
|
655
|
+
# message_part["tool_calls.0.function.arguments"] = (
|
|
656
|
+
# safe_json_serialize(part.function_call.args)
|
|
657
|
+
# if part.function_call.args
|
|
658
|
+
# else json.dumps({})
|
|
659
|
+
# )
|
|
660
|
+
# message_parts.append(message_part)
|
|
661
|
+
|
|
662
|
+
# if message_parts:
|
|
663
|
+
# messages.extend(message_parts)
|
|
664
|
+
|
|
665
|
+
# return ExtractorResponse(type="event", content=messages)
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
|
|
669
|
+
"""Extract model choice data as span events.
|
|
670
|
+
|
|
671
|
+
Processes the model's response content and creates choice events
|
|
672
|
+
containing response metadata, content, and tool calls for
|
|
673
|
+
detailed response analysis.
|
|
674
|
+
|
|
675
|
+
Args:
|
|
676
|
+
params: LLM execution parameters containing response content
|
|
677
|
+
|
|
678
|
+
Returns:
|
|
679
|
+
ExtractorResponse: Event response containing structured choice data
|
|
680
|
+
"""
|
|
681
|
+
message = {}
|
|
682
|
+
|
|
683
|
+
# parse content to build a message
|
|
684
|
+
content = params.llm_response.content
|
|
685
|
+
if content and content.parts:
|
|
686
|
+
message = {"message.role": content.role}
|
|
687
|
+
|
|
688
|
+
if len(content.parts) == 1:
|
|
689
|
+
part = content.parts[0]
|
|
690
|
+
if part.text:
|
|
691
|
+
message["message.content"] = part.text
|
|
692
|
+
elif part.function_call:
|
|
693
|
+
message["message.tool_calls.0.id"] = (
|
|
694
|
+
part.function_call.id
|
|
695
|
+
if part.function_call.id
|
|
696
|
+
else "<unkown_function_call_id>"
|
|
697
|
+
)
|
|
698
|
+
message["message.tool_calls.0.type"] = "function"
|
|
699
|
+
message["message.tool_calls.0.function.name"] = (
|
|
700
|
+
part.function_call.name
|
|
701
|
+
if part.function_call.name
|
|
702
|
+
else "<unknown_function_name>"
|
|
703
|
+
)
|
|
704
|
+
message["message.tool_calls.0.function.arguments"] = (
|
|
705
|
+
safe_json_serialize(part.function_call.args)
|
|
706
|
+
if part.function_call.args
|
|
707
|
+
else json.dumps({})
|
|
708
|
+
)
|
|
709
|
+
else:
|
|
710
|
+
for idx, part in enumerate(content.parts):
|
|
711
|
+
# parse content
|
|
712
|
+
if part.text:
|
|
713
|
+
message[f"message.parts.{idx}.type"] = "text"
|
|
714
|
+
message[f"message.parts.{idx}.text"] = part.text
|
|
715
|
+
|
|
716
|
+
# parse tool_calls
|
|
717
|
+
if part.function_call:
|
|
718
|
+
message["message.tool_calls.0.id"] = (
|
|
719
|
+
part.function_call.id
|
|
720
|
+
if part.function_call.id
|
|
721
|
+
else "<unkown_function_call_id>"
|
|
722
|
+
)
|
|
723
|
+
message["message.tool_calls.0.type"] = "function"
|
|
724
|
+
message["message.tool_calls.0.function.name"] = (
|
|
725
|
+
part.function_call.name
|
|
726
|
+
if part.function_call.name
|
|
727
|
+
else "<unknown_function_name>"
|
|
728
|
+
)
|
|
729
|
+
message["message.tool_calls.0.function.arguments"] = (
|
|
730
|
+
safe_json_serialize(part.function_call.args)
|
|
731
|
+
if part.function_call.args
|
|
732
|
+
else json.dumps({})
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
return ExtractorResponse(type="event", content=message)
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
739
|
+
"""Extract complete LLM request data for debugging.
|
|
740
|
+
|
|
741
|
+
Provides the complete LLM request object in string format
|
|
742
|
+
for detailed debugging and analysis purposes.
|
|
743
|
+
|
|
744
|
+
Args:
|
|
745
|
+
params: LLM execution parameters containing request details
|
|
746
|
+
|
|
747
|
+
Returns:
|
|
748
|
+
ExtractorResponse: Response containing serialized request data
|
|
749
|
+
"""
|
|
750
|
+
return ExtractorResponse(
|
|
751
|
+
content=str(params.llm_request.model_dump(exclude_none=True))
|
|
752
|
+
)
|
|
753
|
+
|
|
754
|
+
|
|
755
|
+
def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
|
|
756
|
+
"""Extract complete LLM response data for debugging.
|
|
757
|
+
|
|
758
|
+
Provides the complete LLM response object in string format
|
|
759
|
+
for detailed debugging and analysis purposes.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
params: LLM execution parameters containing response details
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
ExtractorResponse: Response containing serialized response data
|
|
766
|
+
"""
|
|
767
|
+
return ExtractorResponse(
|
|
768
|
+
content=str(params.llm_response.model_dump(exclude_none=True))
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
|
|
773
|
+
"""Extract available functions/tools from the LLM request.
|
|
774
|
+
|
|
775
|
+
Processes the tools dictionary from the LLM request and extracts
|
|
776
|
+
function metadata including names, descriptions, and parameters
|
|
777
|
+
for tool usage analysis and debugging.
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
params: LLM execution parameters containing request tools
|
|
781
|
+
|
|
782
|
+
Returns:
|
|
783
|
+
ExtractorResponse: Response containing list of function metadata
|
|
784
|
+
"""
|
|
785
|
+
functions = []
|
|
786
|
+
|
|
787
|
+
for idx, (tool_name, tool_instance) in enumerate(
|
|
788
|
+
params.llm_request.tools_dict.items()
|
|
789
|
+
):
|
|
790
|
+
functions.append(
|
|
791
|
+
{
|
|
792
|
+
f"gen_ai.request.functions.{idx}.name": tool_instance.name,
|
|
793
|
+
f"gen_ai.request.functions.{idx}.description": tool_instance.description,
|
|
794
|
+
f"gen_ai.request.functions.{idx}.parameters": str(
|
|
795
|
+
tool_instance._get_declaration().parameters.model_dump_json( # type: ignore
|
|
796
|
+
exclude_none=True
|
|
797
|
+
)
|
|
798
|
+
if tool_instance._get_declaration()
|
|
799
|
+
and tool_instance._get_declaration().parameters # type: ignore
|
|
800
|
+
else {}
|
|
801
|
+
),
|
|
802
|
+
}
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
return ExtractorResponse(content=functions)
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
LLM_ATTRIBUTES = {
|
|
809
|
+
# -> 1. attributes
|
|
810
|
+
# -> 1.1. request
|
|
811
|
+
"gen_ai.request.model": llm_gen_ai_request_model,
|
|
812
|
+
"gen_ai.request.type": llm_gen_ai_request_type,
|
|
813
|
+
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
|
|
814
|
+
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
|
|
815
|
+
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
|
|
816
|
+
# CozeLoop required
|
|
817
|
+
"gen_ai.request.functions": llm_gen_ai_request_functions,
|
|
818
|
+
# -> 1.2. response
|
|
819
|
+
"gen_ai.response.model": llm_gen_ai_response_model,
|
|
820
|
+
"gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
|
|
821
|
+
"gen_ai.response.finish_reason": llm_gen_ai_response_finish_reason,
|
|
822
|
+
# -> 1.3. streaming
|
|
823
|
+
"gen_ai.is_streaming": llm_gen_ai_is_streaming,
|
|
824
|
+
# -> 1.4. span kind
|
|
825
|
+
"gen_ai.operation.name": llm_gen_ai_operation_name,
|
|
826
|
+
"gen_ai.span.kind": llm_gen_ai_span_kind, # apmplus required
|
|
827
|
+
# -> 1.5. inputs
|
|
828
|
+
"gen_ai.prompt": llm_gen_ai_prompt,
|
|
829
|
+
# -> 1.6. outputs
|
|
830
|
+
"gen_ai.completion": llm_gen_ai_completion,
|
|
831
|
+
# -> 1.7. usage
|
|
832
|
+
"gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
|
|
833
|
+
"gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,
|
|
834
|
+
"gen_ai.usage.total_tokens": llm_gen_ai_usage_total_tokens,
|
|
835
|
+
"gen_ai.usage.cache_creation_input_tokens": llm_gen_ai_usage_cache_creation_input_tokens,
|
|
836
|
+
"gen_ai.usage.cache_read_input_tokens": llm_gen_ai_usage_cache_read_input_tokens,
|
|
837
|
+
# -> 2. events
|
|
838
|
+
# -> 2.1. inputs
|
|
839
|
+
# In order to adapt OpenTelemetry and CozeLoop rendering,
|
|
840
|
+
# and avoid error sequence of tool-call and too-response,
|
|
841
|
+
# we use `llm_gen_ai_messages` to upload system message, user message,
|
|
842
|
+
# and assistant message together.
|
|
843
|
+
# Correct sequence: system message, user message, tool message,
|
|
844
|
+
# and assistant message.
|
|
845
|
+
"gen_ai.messages": llm_gen_ai_messages,
|
|
846
|
+
# [depracated]
|
|
847
|
+
# "gen_ai.system.message": llm_gen_ai_system_message,
|
|
848
|
+
# [depracated]
|
|
849
|
+
# "gen_ai.user.message": llm_gen_ai_user_message,
|
|
850
|
+
# [depracated]
|
|
851
|
+
# "gen_ai.assistant.message": llm_gen_ai_assistant_message,
|
|
852
|
+
# -> 2.2. outputs
|
|
853
|
+
"gen_ai.choice": llm_gen_ai_choice,
|
|
854
|
+
# [debugging]
|
|
855
|
+
# "input.value": llm_input_value,
|
|
856
|
+
# [debugging]
|
|
857
|
+
# "output.value": llm_output_value,
|
|
858
|
+
}
|