veadk-python 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/agent.py +31 -21
- veadk/agents/loop_agent.py +55 -0
- veadk/agents/parallel_agent.py +60 -0
- veadk/agents/sequential_agent.py +55 -0
- veadk/cli/cli_deploy.py +14 -1
- veadk/cli/cli_web.py +27 -0
- veadk/cloud/cloud_app.py +21 -6
- veadk/consts.py +14 -1
- veadk/database/viking/viking_database.py +3 -3
- veadk/evaluation/adk_evaluator/__init__.py +4 -0
- veadk/evaluation/adk_evaluator/adk_evaluator.py +170 -217
- veadk/evaluation/base_evaluator.py +26 -20
- veadk/evaluation/deepeval_evaluator/deepeval_evaluator.py +8 -5
- veadk/{tracing/telemetry/metrics/__init__.py → integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/clean.py} +10 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +40 -7
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +11 -5
- veadk/integrations/ve_faas/ve_faas.py +5 -1
- veadk/integrations/ve_tos/ve_tos.py +176 -0
- veadk/runner.py +162 -39
- veadk/tools/builtin_tools/image_edit.py +236 -0
- veadk/tools/builtin_tools/image_generate.py +236 -0
- veadk/tools/builtin_tools/video_generate.py +326 -0
- veadk/tools/sandbox/browser_sandbox.py +19 -9
- veadk/tools/sandbox/code_sandbox.py +21 -11
- veadk/tools/sandbox/computer_sandbox.py +16 -9
- veadk/tracing/base_tracer.py +6 -200
- veadk/tracing/telemetry/attributes/attributes.py +29 -0
- veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +71 -0
- veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +451 -0
- veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +76 -0
- veadk/tracing/telemetry/attributes/extractors/types.py +75 -0
- veadk/tracing/telemetry/exporters/apmplus_exporter.py +97 -38
- veadk/tracing/telemetry/exporters/base_exporter.py +10 -10
- veadk/tracing/telemetry/exporters/cozeloop_exporter.py +20 -13
- veadk/tracing/telemetry/exporters/inmemory_exporter.py +49 -32
- veadk/tracing/telemetry/exporters/tls_exporter.py +18 -12
- veadk/tracing/telemetry/opentelemetry_tracer.py +105 -102
- veadk/tracing/telemetry/telemetry.py +238 -0
- veadk/types.py +6 -1
- veadk/utils/misc.py +41 -1
- veadk/utils/patches.py +25 -0
- veadk/version.py +1 -1
- veadk_python-0.2.5.dist-info/METADATA +345 -0
- veadk_python-0.2.5.dist-info/RECORD +127 -0
- veadk/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/__pycache__/config.cpython-310.pyc +0 -0
- veadk/__pycache__/consts.cpython-310.pyc +0 -0
- veadk/__pycache__/runner.cpython-310.pyc +0 -0
- veadk/__pycache__/types.cpython-310.pyc +0 -0
- veadk/__pycache__/version.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/agent_card.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/remote_ve_agent.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/ve_a2a_server.cpython-310.pyc +0 -0
- veadk/a2a/__pycache__/ve_agent_executor.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_deploy.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_init.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_prompt.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_studio.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/cli_web.cpython-310.pyc +0 -0
- veadk/cli/__pycache__/main.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/cloud_agent_engine.cpython-310.pyc +0 -0
- veadk/cloud/__pycache__/cloud_app.cpython-310.pyc +0 -0
- veadk/database/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/__pycache__/base_database.cpython-310.pyc +0 -0
- veadk/database/__pycache__/database_adapter.cpython-310.pyc +0 -0
- veadk/database/__pycache__/database_factory.cpython-310.pyc +0 -0
- veadk/database/__pycache__/local_database.cpython-310.pyc +0 -0
- veadk/database/kv/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/relational/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/opensearch_vector_database.cpython-310.pyc +0 -0
- veadk/database/vector/__pycache__/type.cpython-310.pyc +0 -0
- veadk/database/viking/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/base_evaluator.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/eval_set_file_loader.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/eval_set_recorder.cpython-310.pyc +0 -0
- veadk/evaluation/__pycache__/types.cpython-310.pyc +0 -0
- veadk/evaluation/adk_evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/deepeval_evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/evaluation/deepeval_evaluator/__pycache__/deepeval_evaluator.cpython-310.pyc +0 -0
- veadk/evaluation/utils/__pycache__/prometheus.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/apig.cpython-310.pyc +0 -0
- veadk/integrations/ve_apig/__pycache__/ve_apig.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/types.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/ve_faas.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/ve_faas_utils.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/vefaas.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/__pycache__/vefaas_utils.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/app.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/__pycache__/studio_app.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name|replace('-', '_') }}/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/{{ cookiecutter.app_name|replace('-', '_') }}/__pycache__/agent.cpython-310.pyc +0 -0
- veadk/integrations/ve_prompt_pilot/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/integrations/ve_prompt_pilot/__pycache__/agentpilot.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/knowledgebase.cpython-310.pyc +0 -0
- veadk/knowledgebase/__pycache__/knowledgebase_database_adapter.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/long_term_memory.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/memory_database_adapter.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/short_term_memory.cpython-310.pyc +0 -0
- veadk/memory/__pycache__/short_term_memory_processor.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/agent_default_prompt.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/prompt_memory_processor.cpython-310.pyc +0 -0
- veadk/prompts/__pycache__/prompt_optimization.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/demo_tools.cpython-310.pyc +0 -0
- veadk/tools/__pycache__/load_knowledgebase_tool.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/lark.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/vesearch.cpython-310.pyc +0 -0
- veadk/tools/builtin_tools/__pycache__/web_search.cpython-310.pyc +0 -0
- veadk/tools/sandbox/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/__pycache__/base_tracer.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/__pycache__/opentelemetry_tracer.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/apiserver_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/apmplus_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/base_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/cozeloop_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/inmemory_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/exporters/__pycache__/tls_exporter.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/__pycache__/opentelemetry_metrics.cpython-310.pyc +0 -0
- veadk/tracing/telemetry/metrics/opentelemetry_metrics.py +0 -73
- veadk/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/logger.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/mcp_utils.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/misc.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/patches.cpython-310.pyc +0 -0
- veadk/utils/__pycache__/volcengine_sign.cpython-310.pyc +0 -0
- veadk_python-0.2.2.dist-info/METADATA +0 -144
- veadk_python-0.2.2.dist-info/RECORD +0 -213
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.2.dist-info → veadk_python-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Dict
|
|
16
|
+
from google.adk.tools import ToolContext
|
|
17
|
+
from volcenginesdkarkruntime import Ark
|
|
18
|
+
from veadk.config import getenv
|
|
19
|
+
import time
|
|
20
|
+
import traceback
|
|
21
|
+
import json
|
|
22
|
+
from veadk.version import VERSION
|
|
23
|
+
from opentelemetry import trace
|
|
24
|
+
from opentelemetry.trace import Span
|
|
25
|
+
|
|
26
|
+
from veadk.utils.logger import get_logger
|
|
27
|
+
|
|
28
|
+
logger = get_logger(__name__)
|
|
29
|
+
|
|
30
|
+
client = Ark(
|
|
31
|
+
api_key=getenv("MODEL_VIDEO_API_KEY"),
|
|
32
|
+
base_url=getenv("MODEL_VIDEO_API_BASE"),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
async def generate(prompt, first_frame_image=None, last_frame_image=None):
|
|
37
|
+
try:
|
|
38
|
+
if first_frame_image is None:
|
|
39
|
+
logger.debug("text generation")
|
|
40
|
+
response = client.content_generation.tasks.create(
|
|
41
|
+
model=getenv("MODEL_VIDEO_NAME"),
|
|
42
|
+
content=[
|
|
43
|
+
{"type": "text", "text": prompt},
|
|
44
|
+
],
|
|
45
|
+
)
|
|
46
|
+
elif last_frame_image is None:
|
|
47
|
+
logger.debug("first frame generation")
|
|
48
|
+
response = client.content_generation.tasks.create(
|
|
49
|
+
model=getenv("MODEL_VIDEO_NAME"),
|
|
50
|
+
content=[
|
|
51
|
+
{"type": "text", "text": prompt},
|
|
52
|
+
{
|
|
53
|
+
"type": "image_url",
|
|
54
|
+
"image_url": {"url": first_frame_image},
|
|
55
|
+
},
|
|
56
|
+
],
|
|
57
|
+
)
|
|
58
|
+
else:
|
|
59
|
+
logger.debug("last frame generation")
|
|
60
|
+
response = client.content_generation.tasks.create(
|
|
61
|
+
model=getenv("MODEL_VIDEO_NAME"),
|
|
62
|
+
content=[
|
|
63
|
+
{"type": "text", "text": prompt},
|
|
64
|
+
{
|
|
65
|
+
"type": "image_url",
|
|
66
|
+
"image_url": {"url": first_frame_image},
|
|
67
|
+
"role": "first_frame",
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"type": "image_url",
|
|
71
|
+
"image_url": {"url": last_frame_image},
|
|
72
|
+
"role": "last_frame",
|
|
73
|
+
},
|
|
74
|
+
],
|
|
75
|
+
)
|
|
76
|
+
except:
|
|
77
|
+
traceback.print_exc()
|
|
78
|
+
raise
|
|
79
|
+
return response
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
83
|
+
"""
|
|
84
|
+
Generate videos in **batch** from text prompts, optionally guided by a first/last frame,
|
|
85
|
+
and fine-tuned via *model text commands* (a.k.a. `parameters` appended to the prompt).
|
|
86
|
+
|
|
87
|
+
This API creates video-generation tasks. Each item in `params` describes a single video.
|
|
88
|
+
The function submits all items in one call and returns task metadata for tracking.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
params (list[dict]):
|
|
92
|
+
A list of video generation requests. Each item supports the fields below.
|
|
93
|
+
|
|
94
|
+
Required per item:
|
|
95
|
+
- video_name (str):
|
|
96
|
+
Name/identifier of the output video file.
|
|
97
|
+
|
|
98
|
+
- prompt (str):
|
|
99
|
+
Text describing the video to generate. Supports zh/EN.
|
|
100
|
+
You may append **model text commands** after the prompt to control resolution,
|
|
101
|
+
aspect ratio, duration, fps, watermark, seed, camera lock, etc.
|
|
102
|
+
Format: `... --rs <resolution> --rt <ratio> --dur <seconds> --fps <fps> --wm <bool> --seed <int> --cf <bool>`
|
|
103
|
+
Example:
|
|
104
|
+
"小猫骑着滑板穿过公园。 --rs 720p --rt 16:9 --dur 5 --fps 24 --wm true --seed 11 --cf false"
|
|
105
|
+
|
|
106
|
+
Optional per item:
|
|
107
|
+
- first_frame (str | None):
|
|
108
|
+
URL or Base64 string (data URL) for the **first frame** (role = `first_frame`).
|
|
109
|
+
Use when you want the clip to start from a specific image.
|
|
110
|
+
|
|
111
|
+
- last_frame (str | None):
|
|
112
|
+
URL or Base64 string (data URL) for the **last frame** (role = `last_frame`).
|
|
113
|
+
Use when you want the clip to end on a specific image.
|
|
114
|
+
|
|
115
|
+
Notes on first/last frame:
|
|
116
|
+
* When both frames are provided, **match width/height** to avoid cropping; if they differ,
|
|
117
|
+
the tail frame may be auto-cropped to fit.
|
|
118
|
+
* If you only need one guided frame, provide either `first_frame` or `last_frame` (not both).
|
|
119
|
+
|
|
120
|
+
Image input constraints (for first/last frame):
|
|
121
|
+
- Formats: jpeg, png, webp, bmp, tiff, gif
|
|
122
|
+
- Aspect ratio (宽:高): 0.4–2.5
|
|
123
|
+
- Width/Height (px): 300–6000
|
|
124
|
+
- Size: < 30 MB
|
|
125
|
+
- Base64 data URL example: `data:image/png;base64,<BASE64>`
|
|
126
|
+
|
|
127
|
+
Model text commands (append after the prompt; unsupported keys are ignored by some models):
|
|
128
|
+
--rs / --resolution <value> Video resolution. Common values: 480p, 720p, 1080p.
|
|
129
|
+
Default depends on model (e.g., doubao-seedance-1-0-pro: 1080p,
|
|
130
|
+
some others default 720p).
|
|
131
|
+
|
|
132
|
+
--rt / --ratio <value> Aspect ratio. Typical: 16:9 (default), 9:16, 4:3, 3:4, 1:1, 2:1, 21:9.
|
|
133
|
+
Some models support `keep_ratio` (keep source image ratio) or `adaptive`
|
|
134
|
+
(auto choose suitable ratio).
|
|
135
|
+
|
|
136
|
+
--dur / --duration <seconds> Clip length in seconds. Seedance supports **3–12 s**;
|
|
137
|
+
Wan2.1 仅支持 5 s。Default varies by model.
|
|
138
|
+
|
|
139
|
+
--fps / --framespersecond <int> Frame rate. Common: 16 or 24 (model-dependent; e.g., seaweed=24, wan2.1=16).
|
|
140
|
+
|
|
141
|
+
--wm / --watermark <true|false> Whether to add watermark. Default: **false** (per doc).
|
|
142
|
+
|
|
143
|
+
--seed <int> Random seed in [-1, 2^32-1]. Default **-1** = auto seed.
|
|
144
|
+
Same seed may yield similar (not guaranteed identical) results across runs.
|
|
145
|
+
|
|
146
|
+
--cf / --camerafixed <true|false> Lock camera movement. Some models support this flag.
|
|
147
|
+
true: try to keep camera fixed; false: allow movement. Default: **false**.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Dict:
|
|
151
|
+
API response containing task creation results for each input item. A typical shape is:
|
|
152
|
+
{
|
|
153
|
+
"status": "success",
|
|
154
|
+
"success_list": [{"video_name": "video_url"}],
|
|
155
|
+
"error_list": []
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
Constraints & Tips:
|
|
159
|
+
- Keep prompt concise and focused (建议 ≤ 500 字); too many details may distract the model.
|
|
160
|
+
- If using first/last frames, ensure their **aspect ratio matches** your chosen `--rt` to minimize cropping.
|
|
161
|
+
- If you must reproduce results, specify an explicit `--seed`.
|
|
162
|
+
- Unsupported parameters are ignored silently or may cause validation errors (model-specific).
|
|
163
|
+
|
|
164
|
+
Minimal examples:
|
|
165
|
+
1) Text-only batch of two 5-second clips at 720p, 16:9, 24 fps:
|
|
166
|
+
params = [
|
|
167
|
+
{
|
|
168
|
+
"video_name": "cat_park.mp4",
|
|
169
|
+
"prompt": "小猫骑着滑板穿过公园。 --rs 720p --rt 16:9 --dur 5 --fps 24 --wm false"
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
"video_name": "city_night.mp4",
|
|
173
|
+
"prompt": "霓虹灯下的城市延时摄影风。 --rs 720p --rt 16:9 --dur 5 --fps 24 --seed 7"
|
|
174
|
+
},
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
2) With guided first/last frame (square, 6 s, camera fixed):
|
|
178
|
+
params = [
|
|
179
|
+
{
|
|
180
|
+
"video_name": "logo_reveal.mp4",
|
|
181
|
+
"first_frame": "https://cdn.example.com/brand/logo_start.png",
|
|
182
|
+
"last_frame": "https://cdn.example.com/brand/logo_end.png",
|
|
183
|
+
"prompt": "品牌 Logo 从线稿到上色的变化。 --rs 1080p --rt 1:1 --dur 6 --fps 24 --cf true"
|
|
184
|
+
}
|
|
185
|
+
]
|
|
186
|
+
"""
|
|
187
|
+
batch_size = 10
|
|
188
|
+
success_list = []
|
|
189
|
+
error_list = []
|
|
190
|
+
tracer = trace.get_tracer("gcp.vertex.agent")
|
|
191
|
+
with tracer.start_as_current_span("call_llm") as span:
|
|
192
|
+
input_part = {"role": "user"}
|
|
193
|
+
output_part = {"message.role": "model"}
|
|
194
|
+
|
|
195
|
+
for idx, item in enumerate(params):
|
|
196
|
+
input_part[f"parts.{idx}.type"] = "text"
|
|
197
|
+
input_part[f"parts.{idx}.text"] = json.dumps(item, ensure_ascii=False)
|
|
198
|
+
|
|
199
|
+
for start_idx in range(0, len(params), batch_size):
|
|
200
|
+
batch = params[start_idx : start_idx + batch_size]
|
|
201
|
+
task_dict = {}
|
|
202
|
+
for idx, item in enumerate(batch):
|
|
203
|
+
video_name = item["video_name"]
|
|
204
|
+
prompt = item["prompt"]
|
|
205
|
+
first_frame = item.get("first_frame", None)
|
|
206
|
+
last_frame = item.get("last_frame", None)
|
|
207
|
+
try:
|
|
208
|
+
if not first_frame:
|
|
209
|
+
response = await generate(prompt)
|
|
210
|
+
elif not last_frame:
|
|
211
|
+
response = await generate(prompt, first_frame)
|
|
212
|
+
else:
|
|
213
|
+
response = await generate(prompt, first_frame, last_frame)
|
|
214
|
+
task_dict[response.id] = video_name
|
|
215
|
+
except Exception as e:
|
|
216
|
+
logger.error(f"Error: {e}")
|
|
217
|
+
error_list.append(video_name)
|
|
218
|
+
|
|
219
|
+
total_tokens = 0
|
|
220
|
+
while True:
|
|
221
|
+
task_list = list(task_dict.keys())
|
|
222
|
+
if len(task_list) == 0:
|
|
223
|
+
break
|
|
224
|
+
for idx, task_id in enumerate(task_list):
|
|
225
|
+
result = client.content_generation.tasks.get(task_id=task_id)
|
|
226
|
+
status = result.status
|
|
227
|
+
if status == "succeeded":
|
|
228
|
+
logger.debug("----- task succeeded -----")
|
|
229
|
+
tool_context.state[f"{task_dict[task_id]}_video_url"] = (
|
|
230
|
+
result.content.video_url
|
|
231
|
+
)
|
|
232
|
+
total_tokens += result.usage.completion_tokens
|
|
233
|
+
output_part[f"message.parts.{idx}.type"] = "text"
|
|
234
|
+
output_part[f"message.parts.{idx}.text"] = (
|
|
235
|
+
f"{task_dict[task_id]}: {result.content.video_url}"
|
|
236
|
+
)
|
|
237
|
+
success_list.append(
|
|
238
|
+
{task_dict[task_id]: result.content.video_url}
|
|
239
|
+
)
|
|
240
|
+
task_dict.pop(task_id, None)
|
|
241
|
+
elif status == "failed":
|
|
242
|
+
logger.error("----- task failed -----")
|
|
243
|
+
logger.error(f"Error: {result.error}")
|
|
244
|
+
error_list.append(task_dict[task_id])
|
|
245
|
+
task_dict.pop(task_id, None)
|
|
246
|
+
else:
|
|
247
|
+
logger.debug(
|
|
248
|
+
f"Current status: {status}, Retrying after 10 seconds..."
|
|
249
|
+
)
|
|
250
|
+
time.sleep(10)
|
|
251
|
+
|
|
252
|
+
add_span_attributes(
|
|
253
|
+
span,
|
|
254
|
+
tool_context,
|
|
255
|
+
input_part=input_part,
|
|
256
|
+
output_part=output_part,
|
|
257
|
+
output_tokens=total_tokens,
|
|
258
|
+
total_tokens=total_tokens,
|
|
259
|
+
request_model=getenv("MODEL_VIDEO_NAME"),
|
|
260
|
+
response_model=getenv("MODEL_VIDEO_NAME"),
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
if len(success_list) == 0:
|
|
264
|
+
return {
|
|
265
|
+
"status": "error",
|
|
266
|
+
"success_list": success_list,
|
|
267
|
+
"error_list": error_list,
|
|
268
|
+
}
|
|
269
|
+
else:
|
|
270
|
+
return {
|
|
271
|
+
"status": "success",
|
|
272
|
+
"success_list": success_list,
|
|
273
|
+
"error_list": error_list,
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def add_span_attributes(
|
|
278
|
+
span: Span,
|
|
279
|
+
tool_context: ToolContext,
|
|
280
|
+
input_part: dict = None,
|
|
281
|
+
output_part: dict = None,
|
|
282
|
+
input_tokens: int = None,
|
|
283
|
+
output_tokens: int = None,
|
|
284
|
+
total_tokens: int = None,
|
|
285
|
+
request_model: str = None,
|
|
286
|
+
response_model: str = None,
|
|
287
|
+
):
|
|
288
|
+
try:
|
|
289
|
+
# common attributes
|
|
290
|
+
app_name = tool_context._invocation_context.app_name
|
|
291
|
+
user_id = tool_context._invocation_context.user_id
|
|
292
|
+
agent_name = tool_context.agent_name
|
|
293
|
+
session_id = tool_context._invocation_context.session.id
|
|
294
|
+
span.set_attribute("gen_ai.agent.name", agent_name)
|
|
295
|
+
span.set_attribute("openinference.instrumentation.veadk", VERSION)
|
|
296
|
+
span.set_attribute("gen_ai.app.name", app_name)
|
|
297
|
+
span.set_attribute("gen_ai.user.id", user_id)
|
|
298
|
+
span.set_attribute("gen_ai.session.id", session_id)
|
|
299
|
+
span.set_attribute("agent_name", agent_name)
|
|
300
|
+
span.set_attribute("agent.name", agent_name)
|
|
301
|
+
span.set_attribute("app_name", app_name)
|
|
302
|
+
span.set_attribute("app.name", app_name)
|
|
303
|
+
span.set_attribute("user.id", user_id)
|
|
304
|
+
span.set_attribute("session.id", session_id)
|
|
305
|
+
span.set_attribute("cozeloop.report.source", "veadk")
|
|
306
|
+
|
|
307
|
+
# llm attributes
|
|
308
|
+
span.set_attribute("gen_ai.system", "openai")
|
|
309
|
+
span.set_attribute("gen_ai.operation.name", "chat")
|
|
310
|
+
if request_model:
|
|
311
|
+
span.set_attribute("gen_ai.request.model", request_model)
|
|
312
|
+
if response_model:
|
|
313
|
+
span.set_attribute("gen_ai.response.model", response_model)
|
|
314
|
+
if total_tokens:
|
|
315
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
316
|
+
if output_tokens:
|
|
317
|
+
span.set_attribute("gen_ai.usage.output_tokens", output_tokens)
|
|
318
|
+
if input_tokens:
|
|
319
|
+
span.set_attribute("gen_ai.usage.input_tokens", input_tokens)
|
|
320
|
+
if input_part:
|
|
321
|
+
span.add_event("gen_ai.user.message", input_part)
|
|
322
|
+
if output_part:
|
|
323
|
+
span.add_event("gen_ai.choice", output_part)
|
|
324
|
+
|
|
325
|
+
except Exception:
|
|
326
|
+
traceback.print_exc()
|
|
@@ -12,16 +12,26 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
|
|
16
16
|
|
|
17
|
+
from veadk.config import getenv
|
|
18
|
+
from veadk.utils.mcp_utils import get_mcp_params
|
|
17
19
|
|
|
18
|
-
|
|
19
|
-
"""Using the remote browser sandbox to according to the prompt.
|
|
20
|
+
url = getenv("TOOL_BROWSER_SANDBOX_URL")
|
|
20
21
|
|
|
21
|
-
Args:
|
|
22
|
-
prompt (str): The prompt to be used.
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
23
|
+
browser_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
|
|
24
|
+
|
|
25
|
+
# browser_sandbox = ...
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# def browser_use(prompt: str) -> str:
|
|
29
|
+
# """Using the remote browser sandbox to according to the prompt.
|
|
30
|
+
|
|
31
|
+
# Args:
|
|
32
|
+
# prompt (str): The prompt to be used.
|
|
33
|
+
|
|
34
|
+
# Returns:
|
|
35
|
+
# str: The response from the sandbox.
|
|
36
|
+
# """
|
|
37
|
+
# ...
|
|
@@ -12,19 +12,29 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
|
|
16
16
|
|
|
17
|
+
from veadk.config import getenv
|
|
18
|
+
from veadk.utils.mcp_utils import get_mcp_params
|
|
17
19
|
|
|
18
|
-
|
|
19
|
-
"""Execute code in sandbox.
|
|
20
|
+
url = getenv("TOOL_CODE_SANDBOX_URL")
|
|
20
21
|
|
|
21
|
-
Args:
|
|
22
|
-
code (str): The code to be executed.
|
|
23
|
-
language (str): The language of the code.
|
|
24
22
|
|
|
25
|
-
|
|
26
|
-
str: The response from the sandbox.
|
|
27
|
-
"""
|
|
23
|
+
code_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
|
|
28
24
|
|
|
29
|
-
|
|
30
|
-
|
|
25
|
+
# code_sandbox = ...
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# def code_execution(code: str, language: str) -> str:
|
|
29
|
+
# """Execute code in sandbox.
|
|
30
|
+
|
|
31
|
+
# Args:
|
|
32
|
+
# code (str): The code to be executed.
|
|
33
|
+
# language (str): The language of the code.
|
|
34
|
+
|
|
35
|
+
# Returns:
|
|
36
|
+
# str: The response from the sandbox.
|
|
37
|
+
# """
|
|
38
|
+
|
|
39
|
+
# res = code_sandbox(code, language)
|
|
40
|
+
# return res
|
|
@@ -12,16 +12,23 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
|
|
16
16
|
|
|
17
|
+
from veadk.config import getenv
|
|
18
|
+
from veadk.utils.mcp_utils import get_mcp_params
|
|
17
19
|
|
|
18
|
-
|
|
19
|
-
"""Using the remote computer sandbox to according to the prompt.
|
|
20
|
+
url = getenv("TOOL_COMPUTER_SANDBOX_URL")
|
|
20
21
|
|
|
21
|
-
Args:
|
|
22
|
-
prompt (str): The prompt to be used.
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
23
|
+
computer_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
|
|
24
|
+
|
|
25
|
+
# def computer_use(prompt: str) -> str:
|
|
26
|
+
# """Using the remote computer sandbox to according to the prompt.
|
|
27
|
+
|
|
28
|
+
# Args:
|
|
29
|
+
# prompt (str): The prompt to be used.
|
|
30
|
+
|
|
31
|
+
# Returns:
|
|
32
|
+
# str: The response from the sandbox.
|
|
33
|
+
# """
|
|
34
|
+
# ...
|
veadk/tracing/base_tracer.py
CHANGED
|
@@ -12,15 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
import json
|
|
16
15
|
from abc import ABC, abstractmethod
|
|
17
|
-
from typing import Any, Optional
|
|
18
|
-
|
|
19
|
-
from google.adk.agents.callback_context import CallbackContext
|
|
20
|
-
from google.adk.models.llm_request import LlmRequest
|
|
21
|
-
from google.adk.models.llm_response import LlmResponse
|
|
22
|
-
from google.adk.tools import BaseTool, ToolContext
|
|
23
|
-
from opentelemetry import trace
|
|
24
16
|
|
|
25
17
|
from veadk.utils.logger import get_logger
|
|
26
18
|
|
|
@@ -29,197 +21,11 @@ logger = get_logger(__name__)
|
|
|
29
21
|
|
|
30
22
|
class BaseTracer(ABC):
|
|
31
23
|
def __init__(self, name: str):
|
|
32
|
-
self.
|
|
33
|
-
|
|
24
|
+
self.name = name
|
|
25
|
+
self._trace_id = "<unknown_trace_id>"
|
|
26
|
+
self._trace_file_path = "<unknown_trace_file_path>"
|
|
34
27
|
|
|
35
28
|
@abstractmethod
|
|
36
|
-
def dump(self, user_id: str, session_id: str, path: str = "/tmp") -> str:
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
self, callback_context: CallbackContext, llm_request: LlmRequest
|
|
40
|
-
) -> Optional[LlmResponse]:
|
|
41
|
-
"""agent run stage"""
|
|
42
|
-
trace.get_tracer("gcp.vertex.agent")
|
|
43
|
-
span = trace.get_current_span()
|
|
44
|
-
# logger.debug(f"llm_request: {llm_request}")
|
|
45
|
-
|
|
46
|
-
req = llm_request.model_dump()
|
|
47
|
-
|
|
48
|
-
app_name = getattr(self, "app_name", "veadk_app")
|
|
49
|
-
agent_name = callback_context.agent_name
|
|
50
|
-
model_name = req.get("model", "unknown")
|
|
51
|
-
max_tokens = (
|
|
52
|
-
None
|
|
53
|
-
if not req.get("live_connect_config")
|
|
54
|
-
else req["live_connect_config"].get("max_output_tokens", None)
|
|
55
|
-
)
|
|
56
|
-
temperature = (
|
|
57
|
-
None
|
|
58
|
-
if not req.get("live_connect_config")
|
|
59
|
-
else req["live_connect_config"].get("temperature", None)
|
|
60
|
-
)
|
|
61
|
-
top_p = (
|
|
62
|
-
None
|
|
63
|
-
if not req.get("live_connect_config")
|
|
64
|
-
else req["live_connect_config"].get("top_p", None)
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
attributes = {}
|
|
68
|
-
attributes["agent.name"] = agent_name
|
|
69
|
-
attributes["app.name"] = app_name
|
|
70
|
-
attributes["gen_ai.system"] = "veadk"
|
|
71
|
-
if model_name:
|
|
72
|
-
attributes["gen_ai.request.model"] = model_name
|
|
73
|
-
attributes["gen_ai.response.model"] = (
|
|
74
|
-
model_name # The req model and the resp model should be consistent.
|
|
75
|
-
)
|
|
76
|
-
attributes["gen_ai.request.type"] = "completion"
|
|
77
|
-
if max_tokens:
|
|
78
|
-
attributes["gen_ai.request.max_tokens"] = max_tokens
|
|
79
|
-
if temperature:
|
|
80
|
-
attributes["gen_ai.request.temperature"] = temperature
|
|
81
|
-
if top_p:
|
|
82
|
-
attributes["gen_ai.request.top_p"] = top_p
|
|
83
|
-
|
|
84
|
-
# Print attributes for debugging
|
|
85
|
-
# print("Tracing attributes:", attributes)
|
|
86
|
-
|
|
87
|
-
# Set all attributes at once if possible, else fallback to individual
|
|
88
|
-
if hasattr(span, "set_attributes"):
|
|
89
|
-
span.set_attributes(attributes)
|
|
90
|
-
else:
|
|
91
|
-
# Fallback for OpenTelemetry versions without set_attributes
|
|
92
|
-
for k, v in attributes.items():
|
|
93
|
-
span.set_attribute(k, v)
|
|
94
|
-
|
|
95
|
-
def tracer_hook_after_model(
|
|
96
|
-
self, callback_context: CallbackContext, llm_response: LlmResponse
|
|
97
|
-
) -> Optional[LlmResponse]:
|
|
98
|
-
"""call llm stage"""
|
|
99
|
-
trace.get_tracer("gcp.vertex.agent")
|
|
100
|
-
span = trace.get_current_span()
|
|
101
|
-
# logger.debug(f"llm_response: {llm_response}")
|
|
102
|
-
# logger.debug(f"callback_context: {callback_context}")
|
|
103
|
-
|
|
104
|
-
# Refined: collect all attributes, use set_attributes, print for debugging
|
|
105
|
-
attributes = {}
|
|
106
|
-
|
|
107
|
-
app_name = getattr(self, "app_name", "veadk_app")
|
|
108
|
-
agent_name = callback_context.agent_name
|
|
109
|
-
attributes["agent.name"] = agent_name
|
|
110
|
-
attributes["app.name"] = app_name
|
|
111
|
-
|
|
112
|
-
# prompt
|
|
113
|
-
user_content = callback_context.user_content
|
|
114
|
-
role = None
|
|
115
|
-
content = None
|
|
116
|
-
if getattr(user_content, "role", None):
|
|
117
|
-
role = getattr(user_content, "role", None)
|
|
118
|
-
|
|
119
|
-
if user_content and getattr(user_content, "parts", None):
|
|
120
|
-
content = user_content.model_dump(exclude_none=True).get("parts", None)
|
|
121
|
-
content = json.dumps(content) if content else None
|
|
122
|
-
|
|
123
|
-
if role and content:
|
|
124
|
-
attributes["gen_ai.prompt.0.role"] = role
|
|
125
|
-
attributes["gen_ai.prompt.0.content"] = content
|
|
126
|
-
|
|
127
|
-
# completion
|
|
128
|
-
completion_content = getattr(llm_response, "content").model_dump(
|
|
129
|
-
exclude_none=True
|
|
130
|
-
)
|
|
131
|
-
if completion_content:
|
|
132
|
-
content = json.dumps(
|
|
133
|
-
getattr(llm_response, "content").model_dump(exclude_none=True)["parts"]
|
|
134
|
-
)
|
|
135
|
-
role = getattr(llm_response, "content").model_dump(exclude_none=True)[
|
|
136
|
-
"role"
|
|
137
|
-
]
|
|
138
|
-
if role and content:
|
|
139
|
-
attributes["gen_ai.completion.0.role"] = role
|
|
140
|
-
attributes["gen_ai.completion.0.content"] = content
|
|
141
|
-
|
|
142
|
-
if not llm_response.usage_metadata:
|
|
143
|
-
return
|
|
144
|
-
|
|
145
|
-
# tokens
|
|
146
|
-
metadata = llm_response.usage_metadata.model_dump()
|
|
147
|
-
if metadata:
|
|
148
|
-
prompt_tokens = metadata.get("prompt_token_count", None)
|
|
149
|
-
completion_tokens = metadata.get("candidates_token_count", None)
|
|
150
|
-
total_tokens = metadata.get("total_token_count", None)
|
|
151
|
-
cache_read_input_tokens = (
|
|
152
|
-
metadata.get("cache_read_input_tokens") or 0
|
|
153
|
-
) # Might change, once openai introduces their equivalent.
|
|
154
|
-
cache_create_input_tokens = (
|
|
155
|
-
metadata.get("cache_create_input_tokens") or 0
|
|
156
|
-
) # Might change, once openai introduces their equivalent.
|
|
157
|
-
if prompt_tokens:
|
|
158
|
-
attributes["gen_ai.usage.prompt_tokens"] = prompt_tokens
|
|
159
|
-
if completion_tokens:
|
|
160
|
-
attributes["gen_ai.usage.completion_tokens"] = completion_tokens
|
|
161
|
-
if total_tokens:
|
|
162
|
-
attributes["gen_ai.usage.total_tokens"] = total_tokens
|
|
163
|
-
if cache_read_input_tokens is not None:
|
|
164
|
-
attributes["gen_ai.usage.cache_read_input_tokens"] = (
|
|
165
|
-
cache_read_input_tokens
|
|
166
|
-
)
|
|
167
|
-
if cache_create_input_tokens is not None:
|
|
168
|
-
attributes["gen_ai.usage.cache_create_input_tokens"] = (
|
|
169
|
-
cache_create_input_tokens
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
# Print attributes for debugging
|
|
173
|
-
# print("Tracing attributes:", attributes)
|
|
174
|
-
|
|
175
|
-
# Set all attributes at once if possible, else fallback to individual
|
|
176
|
-
if hasattr(span, "set_attributes"):
|
|
177
|
-
span.set_attributes(attributes)
|
|
178
|
-
else:
|
|
179
|
-
# Fallback for OpenTelemetry versions without set_attributes
|
|
180
|
-
for k, v in attributes.items():
|
|
181
|
-
span.set_attribute(k, v)
|
|
182
|
-
|
|
183
|
-
def tracer_hook_after_tool(
|
|
184
|
-
self,
|
|
185
|
-
tool: BaseTool,
|
|
186
|
-
args: dict[str, Any],
|
|
187
|
-
tool_context: ToolContext,
|
|
188
|
-
tool_response: dict,
|
|
189
|
-
):
|
|
190
|
-
trace.get_tracer("gcp.vertex.agent")
|
|
191
|
-
span = trace.get_current_span()
|
|
192
|
-
agent_name = tool_context.agent_name
|
|
193
|
-
tool_name = tool.name
|
|
194
|
-
app_name = getattr(self, "app_name", "veadk_app")
|
|
195
|
-
attributes = {
|
|
196
|
-
"agent.name": agent_name,
|
|
197
|
-
"app.name": app_name,
|
|
198
|
-
"tool.name": tool_name,
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
# Set all attributes at once if possible, else fallback to individual
|
|
202
|
-
if hasattr(span, "set_attributes"):
|
|
203
|
-
span.set_attributes(attributes)
|
|
204
|
-
else:
|
|
205
|
-
# Fallback for OpenTelemetry versions without set_attributes
|
|
206
|
-
for k, v in attributes.items():
|
|
207
|
-
span.set_attribute(k, v)
|
|
208
|
-
|
|
209
|
-
def set_app_name(self, app_name):
|
|
210
|
-
self.app_name = app_name
|
|
211
|
-
|
|
212
|
-
def do_hooks(self, agent) -> None:
|
|
213
|
-
if not getattr(agent, "before_model_callback", None):
|
|
214
|
-
agent.before_model_callback = []
|
|
215
|
-
if not getattr(agent, "after_model_callback", None):
|
|
216
|
-
agent.after_model_callback = []
|
|
217
|
-
if not getattr(agent, "after_tool_callback", None):
|
|
218
|
-
agent.after_tool_callback = []
|
|
219
|
-
|
|
220
|
-
if self.tracer_hook_before_model not in agent.before_model_callback:
|
|
221
|
-
agent.before_model_callback.append(self.tracer_hook_before_model)
|
|
222
|
-
if self.tracer_hook_after_model not in agent.after_model_callback:
|
|
223
|
-
agent.after_model_callback.append(self.tracer_hook_after_model)
|
|
224
|
-
if self.tracer_hook_after_tool not in agent.after_tool_callback:
|
|
225
|
-
agent.after_tool_callback.append(self.tracer_hook_after_tool)
|
|
29
|
+
def dump(self, user_id: str, session_id: str, path: str = "/tmp") -> str:
|
|
30
|
+
"""Dump the trace data to a local file."""
|
|
31
|
+
...
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from veadk.tracing.telemetry.attributes.extractors.common_attributes_extractors import (
|
|
16
|
+
COMMON_ATTRIBUTES,
|
|
17
|
+
)
|
|
18
|
+
from veadk.tracing.telemetry.attributes.extractors.llm_attributes_extractors import (
|
|
19
|
+
LLM_ATTRIBUTES,
|
|
20
|
+
)
|
|
21
|
+
from veadk.tracing.telemetry.attributes.extractors.tool_attributes_extractors import (
|
|
22
|
+
TOOL_ATTRIBUTES,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
ATTRIBUTES = {
|
|
26
|
+
"common": COMMON_ATTRIBUTES,
|
|
27
|
+
"llm": LLM_ATTRIBUTES,
|
|
28
|
+
"tool": TOOL_ATTRIBUTES,
|
|
29
|
+
}
|