veadk-python 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of veadk-python might be problematic. Click here for more details.

Files changed (30) hide show
  1. veadk/agent.py +28 -8
  2. veadk/cli/cli_deploy.py +3 -1
  3. veadk/cloud/cloud_app.py +21 -6
  4. veadk/consts.py +14 -1
  5. veadk/database/viking/viking_database.py +3 -3
  6. veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/clean.py +23 -0
  7. veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/app.py +4 -1
  8. veadk/integrations/ve_faas/template/{{cookiecutter.local_dir_name}}/src/run.sh +11 -1
  9. veadk/integrations/ve_tos/ve_tos.py +176 -0
  10. veadk/runner.py +107 -34
  11. veadk/tools/builtin_tools/image_edit.py +236 -0
  12. veadk/tools/builtin_tools/image_generate.py +236 -0
  13. veadk/tools/builtin_tools/video_generate.py +326 -0
  14. veadk/tools/sandbox/browser_sandbox.py +19 -9
  15. veadk/tools/sandbox/code_sandbox.py +21 -11
  16. veadk/tools/sandbox/computer_sandbox.py +16 -9
  17. veadk/tracing/base_tracer.py +0 -19
  18. veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +65 -6
  19. veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +20 -14
  20. veadk/tracing/telemetry/exporters/inmemory_exporter.py +3 -0
  21. veadk/tracing/telemetry/opentelemetry_tracer.py +4 -1
  22. veadk/tracing/telemetry/telemetry.py +113 -24
  23. veadk/utils/misc.py +40 -0
  24. veadk/version.py +1 -1
  25. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/METADATA +1 -1
  26. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/RECORD +30 -25
  27. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/WHEEL +0 -0
  28. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/entry_points.txt +0 -0
  29. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/licenses/LICENSE +0 -0
  30. {veadk_python-0.2.4.dist-info → veadk_python-0.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,326 @@
1
+ # Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict
16
+ from google.adk.tools import ToolContext
17
+ from volcenginesdkarkruntime import Ark
18
+ from veadk.config import getenv
19
+ import time
20
+ import traceback
21
+ import json
22
+ from veadk.version import VERSION
23
+ from opentelemetry import trace
24
+ from opentelemetry.trace import Span
25
+
26
+ from veadk.utils.logger import get_logger
27
+
28
+ logger = get_logger(__name__)
29
+
30
+ client = Ark(
31
+ api_key=getenv("MODEL_VIDEO_API_KEY"),
32
+ base_url=getenv("MODEL_VIDEO_API_BASE"),
33
+ )
34
+
35
+
36
+ async def generate(prompt, first_frame_image=None, last_frame_image=None):
37
+ try:
38
+ if first_frame_image is None:
39
+ logger.debug("text generation")
40
+ response = client.content_generation.tasks.create(
41
+ model=getenv("MODEL_VIDEO_NAME"),
42
+ content=[
43
+ {"type": "text", "text": prompt},
44
+ ],
45
+ )
46
+ elif last_frame_image is None:
47
+ logger.debug("first frame generation")
48
+ response = client.content_generation.tasks.create(
49
+ model=getenv("MODEL_VIDEO_NAME"),
50
+ content=[
51
+ {"type": "text", "text": prompt},
52
+ {
53
+ "type": "image_url",
54
+ "image_url": {"url": first_frame_image},
55
+ },
56
+ ],
57
+ )
58
+ else:
59
+ logger.debug("last frame generation")
60
+ response = client.content_generation.tasks.create(
61
+ model=getenv("MODEL_VIDEO_NAME"),
62
+ content=[
63
+ {"type": "text", "text": prompt},
64
+ {
65
+ "type": "image_url",
66
+ "image_url": {"url": first_frame_image},
67
+ "role": "first_frame",
68
+ },
69
+ {
70
+ "type": "image_url",
71
+ "image_url": {"url": last_frame_image},
72
+ "role": "last_frame",
73
+ },
74
+ ],
75
+ )
76
+ except:
77
+ traceback.print_exc()
78
+ raise
79
+ return response
80
+
81
+
82
+ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
83
+ """
84
+ Generate videos in **batch** from text prompts, optionally guided by a first/last frame,
85
+ and fine-tuned via *model text commands* (a.k.a. `parameters` appended to the prompt).
86
+
87
+ This API creates video-generation tasks. Each item in `params` describes a single video.
88
+ The function submits all items in one call and returns task metadata for tracking.
89
+
90
+ Args:
91
+ params (list[dict]):
92
+ A list of video generation requests. Each item supports the fields below.
93
+
94
+ Required per item:
95
+ - video_name (str):
96
+ Name/identifier of the output video file.
97
+
98
+ - prompt (str):
99
+ Text describing the video to generate. Supports zh/EN.
100
+ You may append **model text commands** after the prompt to control resolution,
101
+ aspect ratio, duration, fps, watermark, seed, camera lock, etc.
102
+ Format: `... --rs <resolution> --rt <ratio> --dur <seconds> --fps <fps> --wm <bool> --seed <int> --cf <bool>`
103
+ Example:
104
+ "小猫骑着滑板穿过公园。 --rs 720p --rt 16:9 --dur 5 --fps 24 --wm true --seed 11 --cf false"
105
+
106
+ Optional per item:
107
+ - first_frame (str | None):
108
+ URL or Base64 string (data URL) for the **first frame** (role = `first_frame`).
109
+ Use when you want the clip to start from a specific image.
110
+
111
+ - last_frame (str | None):
112
+ URL or Base64 string (data URL) for the **last frame** (role = `last_frame`).
113
+ Use when you want the clip to end on a specific image.
114
+
115
+ Notes on first/last frame:
116
+ * When both frames are provided, **match width/height** to avoid cropping; if they differ,
117
+ the tail frame may be auto-cropped to fit.
118
+ * If you only need one guided frame, provide either `first_frame` or `last_frame` (not both).
119
+
120
+ Image input constraints (for first/last frame):
121
+ - Formats: jpeg, png, webp, bmp, tiff, gif
122
+ - Aspect ratio (宽:高): 0.4–2.5
123
+ - Width/Height (px): 300–6000
124
+ - Size: < 30 MB
125
+ - Base64 data URL example: `data:image/png;base64,<BASE64>`
126
+
127
+ Model text commands (append after the prompt; unsupported keys are ignored by some models):
128
+ --rs / --resolution <value> Video resolution. Common values: 480p, 720p, 1080p.
129
+ Default depends on model (e.g., doubao-seedance-1-0-pro: 1080p,
130
+ some others default 720p).
131
+
132
+ --rt / --ratio <value> Aspect ratio. Typical: 16:9 (default), 9:16, 4:3, 3:4, 1:1, 2:1, 21:9.
133
+ Some models support `keep_ratio` (keep source image ratio) or `adaptive`
134
+ (auto choose suitable ratio).
135
+
136
+ --dur / --duration <seconds> Clip length in seconds. Seedance supports **3–12 s**;
137
+ Wan2.1 仅支持 5 s。Default varies by model.
138
+
139
+ --fps / --framespersecond <int> Frame rate. Common: 16 or 24 (model-dependent; e.g., seaweed=24, wan2.1=16).
140
+
141
+ --wm / --watermark <true|false> Whether to add watermark. Default: **false** (per doc).
142
+
143
+ --seed <int> Random seed in [-1, 2^32-1]. Default **-1** = auto seed.
144
+ Same seed may yield similar (not guaranteed identical) results across runs.
145
+
146
+ --cf / --camerafixed <true|false> Lock camera movement. Some models support this flag.
147
+ true: try to keep camera fixed; false: allow movement. Default: **false**.
148
+
149
+ Returns:
150
+ Dict:
151
+ API response containing task creation results for each input item. A typical shape is:
152
+ {
153
+ "status": "success",
154
+ "success_list": [{"video_name": "video_url"}],
155
+ "error_list": []
156
+ }
157
+
158
+ Constraints & Tips:
159
+ - Keep prompt concise and focused (建议 ≤ 500 字); too many details may distract the model.
160
+ - If using first/last frames, ensure their **aspect ratio matches** your chosen `--rt` to minimize cropping.
161
+ - If you must reproduce results, specify an explicit `--seed`.
162
+ - Unsupported parameters are ignored silently or may cause validation errors (model-specific).
163
+
164
+ Minimal examples:
165
+ 1) Text-only batch of two 5-second clips at 720p, 16:9, 24 fps:
166
+ params = [
167
+ {
168
+ "video_name": "cat_park.mp4",
169
+ "prompt": "小猫骑着滑板穿过公园。 --rs 720p --rt 16:9 --dur 5 --fps 24 --wm false"
170
+ },
171
+ {
172
+ "video_name": "city_night.mp4",
173
+ "prompt": "霓虹灯下的城市延时摄影风。 --rs 720p --rt 16:9 --dur 5 --fps 24 --seed 7"
174
+ },
175
+ ]
176
+
177
+ 2) With guided first/last frame (square, 6 s, camera fixed):
178
+ params = [
179
+ {
180
+ "video_name": "logo_reveal.mp4",
181
+ "first_frame": "https://cdn.example.com/brand/logo_start.png",
182
+ "last_frame": "https://cdn.example.com/brand/logo_end.png",
183
+ "prompt": "品牌 Logo 从线稿到上色的变化。 --rs 1080p --rt 1:1 --dur 6 --fps 24 --cf true"
184
+ }
185
+ ]
186
+ """
187
+ batch_size = 10
188
+ success_list = []
189
+ error_list = []
190
+ tracer = trace.get_tracer("gcp.vertex.agent")
191
+ with tracer.start_as_current_span("call_llm") as span:
192
+ input_part = {"role": "user"}
193
+ output_part = {"message.role": "model"}
194
+
195
+ for idx, item in enumerate(params):
196
+ input_part[f"parts.{idx}.type"] = "text"
197
+ input_part[f"parts.{idx}.text"] = json.dumps(item, ensure_ascii=False)
198
+
199
+ for start_idx in range(0, len(params), batch_size):
200
+ batch = params[start_idx : start_idx + batch_size]
201
+ task_dict = {}
202
+ for idx, item in enumerate(batch):
203
+ video_name = item["video_name"]
204
+ prompt = item["prompt"]
205
+ first_frame = item.get("first_frame", None)
206
+ last_frame = item.get("last_frame", None)
207
+ try:
208
+ if not first_frame:
209
+ response = await generate(prompt)
210
+ elif not last_frame:
211
+ response = await generate(prompt, first_frame)
212
+ else:
213
+ response = await generate(prompt, first_frame, last_frame)
214
+ task_dict[response.id] = video_name
215
+ except Exception as e:
216
+ logger.error(f"Error: {e}")
217
+ error_list.append(video_name)
218
+
219
+ total_tokens = 0
220
+ while True:
221
+ task_list = list(task_dict.keys())
222
+ if len(task_list) == 0:
223
+ break
224
+ for idx, task_id in enumerate(task_list):
225
+ result = client.content_generation.tasks.get(task_id=task_id)
226
+ status = result.status
227
+ if status == "succeeded":
228
+ logger.debug("----- task succeeded -----")
229
+ tool_context.state[f"{task_dict[task_id]}_video_url"] = (
230
+ result.content.video_url
231
+ )
232
+ total_tokens += result.usage.completion_tokens
233
+ output_part[f"message.parts.{idx}.type"] = "text"
234
+ output_part[f"message.parts.{idx}.text"] = (
235
+ f"{task_dict[task_id]}: {result.content.video_url}"
236
+ )
237
+ success_list.append(
238
+ {task_dict[task_id]: result.content.video_url}
239
+ )
240
+ task_dict.pop(task_id, None)
241
+ elif status == "failed":
242
+ logger.error("----- task failed -----")
243
+ logger.error(f"Error: {result.error}")
244
+ error_list.append(task_dict[task_id])
245
+ task_dict.pop(task_id, None)
246
+ else:
247
+ logger.debug(
248
+ f"Current status: {status}, Retrying after 10 seconds..."
249
+ )
250
+ time.sleep(10)
251
+
252
+ add_span_attributes(
253
+ span,
254
+ tool_context,
255
+ input_part=input_part,
256
+ output_part=output_part,
257
+ output_tokens=total_tokens,
258
+ total_tokens=total_tokens,
259
+ request_model=getenv("MODEL_VIDEO_NAME"),
260
+ response_model=getenv("MODEL_VIDEO_NAME"),
261
+ )
262
+
263
+ if len(success_list) == 0:
264
+ return {
265
+ "status": "error",
266
+ "success_list": success_list,
267
+ "error_list": error_list,
268
+ }
269
+ else:
270
+ return {
271
+ "status": "success",
272
+ "success_list": success_list,
273
+ "error_list": error_list,
274
+ }
275
+
276
+
277
+ def add_span_attributes(
278
+ span: Span,
279
+ tool_context: ToolContext,
280
+ input_part: dict = None,
281
+ output_part: dict = None,
282
+ input_tokens: int = None,
283
+ output_tokens: int = None,
284
+ total_tokens: int = None,
285
+ request_model: str = None,
286
+ response_model: str = None,
287
+ ):
288
+ try:
289
+ # common attributes
290
+ app_name = tool_context._invocation_context.app_name
291
+ user_id = tool_context._invocation_context.user_id
292
+ agent_name = tool_context.agent_name
293
+ session_id = tool_context._invocation_context.session.id
294
+ span.set_attribute("gen_ai.agent.name", agent_name)
295
+ span.set_attribute("openinference.instrumentation.veadk", VERSION)
296
+ span.set_attribute("gen_ai.app.name", app_name)
297
+ span.set_attribute("gen_ai.user.id", user_id)
298
+ span.set_attribute("gen_ai.session.id", session_id)
299
+ span.set_attribute("agent_name", agent_name)
300
+ span.set_attribute("agent.name", agent_name)
301
+ span.set_attribute("app_name", app_name)
302
+ span.set_attribute("app.name", app_name)
303
+ span.set_attribute("user.id", user_id)
304
+ span.set_attribute("session.id", session_id)
305
+ span.set_attribute("cozeloop.report.source", "veadk")
306
+
307
+ # llm attributes
308
+ span.set_attribute("gen_ai.system", "openai")
309
+ span.set_attribute("gen_ai.operation.name", "chat")
310
+ if request_model:
311
+ span.set_attribute("gen_ai.request.model", request_model)
312
+ if response_model:
313
+ span.set_attribute("gen_ai.response.model", response_model)
314
+ if total_tokens:
315
+ span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
316
+ if output_tokens:
317
+ span.set_attribute("gen_ai.usage.output_tokens", output_tokens)
318
+ if input_tokens:
319
+ span.set_attribute("gen_ai.usage.input_tokens", input_tokens)
320
+ if input_part:
321
+ span.add_event("gen_ai.user.message", input_part)
322
+ if output_part:
323
+ span.add_event("gen_ai.choice", output_part)
324
+
325
+ except Exception:
326
+ traceback.print_exc()
@@ -12,16 +12,26 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- browser_sandbox = ...
15
+ from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
16
16
 
17
+ from veadk.config import getenv
18
+ from veadk.utils.mcp_utils import get_mcp_params
17
19
 
18
- def browser_use(prompt: str) -> str:
19
- """Using the remote browser sandbox to according to the prompt.
20
+ url = getenv("TOOL_BROWSER_SANDBOX_URL")
20
21
 
21
- Args:
22
- prompt (str): The prompt to be used.
23
22
 
24
- Returns:
25
- str: The response from the sandbox.
26
- """
27
- ...
23
+ browser_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
24
+
25
+ # browser_sandbox = ...
26
+
27
+
28
+ # def browser_use(prompt: str) -> str:
29
+ # """Using the remote browser sandbox to according to the prompt.
30
+
31
+ # Args:
32
+ # prompt (str): The prompt to be used.
33
+
34
+ # Returns:
35
+ # str: The response from the sandbox.
36
+ # """
37
+ # ...
@@ -12,19 +12,29 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- code_sandbox = ...
15
+ from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
16
16
 
17
+ from veadk.config import getenv
18
+ from veadk.utils.mcp_utils import get_mcp_params
17
19
 
18
- def code_execution(code: str, language: str) -> str:
19
- """Execute code in sandbox.
20
+ url = getenv("TOOL_CODE_SANDBOX_URL")
20
21
 
21
- Args:
22
- code (str): The code to be executed.
23
- language (str): The language of the code.
24
22
 
25
- Returns:
26
- str: The response from the sandbox.
27
- """
23
+ code_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
28
24
 
29
- res = code_sandbox(code, language)
30
- return res
25
+ # code_sandbox = ...
26
+
27
+
28
+ # def code_execution(code: str, language: str) -> str:
29
+ # """Execute code in sandbox.
30
+
31
+ # Args:
32
+ # code (str): The code to be executed.
33
+ # language (str): The language of the code.
34
+
35
+ # Returns:
36
+ # str: The response from the sandbox.
37
+ # """
38
+
39
+ # res = code_sandbox(code, language)
40
+ # return res
@@ -12,16 +12,23 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- computer_sandbox = ...
15
+ from google.adk.tools.mcp_tool.mcp_toolset import MCPToolset
16
16
 
17
+ from veadk.config import getenv
18
+ from veadk.utils.mcp_utils import get_mcp_params
17
19
 
18
- def computer_use(prompt: str) -> str:
19
- """Using the remote computer sandbox to according to the prompt.
20
+ url = getenv("TOOL_COMPUTER_SANDBOX_URL")
20
21
 
21
- Args:
22
- prompt (str): The prompt to be used.
23
22
 
24
- Returns:
25
- str: The response from the sandbox.
26
- """
27
- ...
23
+ computer_sandbox = MCPToolset(connection_params=get_mcp_params(url=url))
24
+
25
+ # def computer_use(prompt: str) -> str:
26
+ # """Using the remote computer sandbox to according to the prompt.
27
+
28
+ # Args:
29
+ # prompt (str): The prompt to be used.
30
+
31
+ # Returns:
32
+ # str: The response from the sandbox.
33
+ # """
34
+ # ...
@@ -19,25 +19,6 @@ from veadk.utils.logger import get_logger
19
19
  logger = get_logger(__name__)
20
20
 
21
21
 
22
- def replace_bytes_with_empty(data):
23
- """
24
- Recursively traverse the data structure and replace all bytes types with empty strings.
25
- Supports handling any nested structure of lists and dictionaries.
26
- """
27
- if isinstance(data, dict):
28
- # Handle dictionary: Recursively process each value
29
- return {k: replace_bytes_with_empty(v) for k, v in data.items()}
30
- elif isinstance(data, list):
31
- # Handle list: Recursively process each element
32
- return [replace_bytes_with_empty(item) for item in data]
33
- elif isinstance(data, bytes):
34
- # When encountering the bytes type, replace it with an empty string
35
- return "<image data>"
36
- else:
37
- # Keep other types unchanged
38
- return data
39
-
40
-
41
22
  class BaseTracer(ABC):
42
23
  def __init__(self, name: str):
43
24
  self.name = name
@@ -18,6 +18,7 @@ from veadk.tracing.telemetry.attributes.extractors.types import (
18
18
  ExtractorResponse,
19
19
  LLMAttributesParams,
20
20
  )
21
+ from veadk.utils.misc import safe_json_serialize
21
22
 
22
23
 
23
24
  def llm_gen_ai_request_model(params: LLMAttributesParams) -> ExtractorResponse:
@@ -102,10 +103,11 @@ def llm_gen_ai_usage_cache_read_input_tokens(
102
103
  def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
103
104
  # a part is a message
104
105
  messages: list[dict] = []
106
+ idx = 0
105
107
 
106
108
  for content in params.llm_request.contents:
107
109
  if content.parts:
108
- for idx, part in enumerate(content.parts):
110
+ for part in content.parts:
109
111
  message = {}
110
112
  # text part
111
113
  if part.text:
@@ -133,13 +135,23 @@ def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
133
135
  else "<unknown_function_name>"
134
136
  )
135
137
  message[f"gen_ai.prompt.{idx}.tool_calls.0.function.arguments"] = (
136
- json.dumps(part.function_call.args)
138
+ safe_json_serialize(part.function_call.args)
137
139
  if part.function_call.args
138
140
  else json.dumps({})
139
141
  )
142
+ # image
143
+ if part.inline_data:
144
+ message[f"gen_ai.prompt.{idx}.type"] = "image_url"
145
+ message[f"gen_ai.prompt.{idx}.image_url.name"] = (
146
+ part.inline_data.display_name.split("/")[-1]
147
+ )
148
+ message[f"gen_ai.prompt.{idx}.image_url.url"] = (
149
+ part.inline_data.display_name
150
+ )
140
151
 
141
152
  if message:
142
153
  messages.append(message)
154
+ idx += 1
143
155
 
144
156
  return ExtractorResponse(content=messages)
145
157
 
@@ -168,7 +180,7 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
168
180
  else "<unknown_function_name>"
169
181
  )
170
182
  message[f"gen_ai.completion.{idx}.tool_calls.0.function.arguments"] = (
171
- json.dumps(part.function_call.args)
183
+ safe_json_serialize(part.function_call.args)
172
184
  if part.function_call.args
173
185
  else json.dumps({})
174
186
  )
@@ -234,6 +246,14 @@ def llm_gen_ai_user_message(params: LLMAttributesParams) -> ExtractorResponse:
234
246
  message_part[f"parts.{idx}.content"] = str(
235
247
  part.function_response
236
248
  )
249
+ if part.inline_data:
250
+ message_part[f"parts.{idx}.type"] = "image_url"
251
+ message_part[f"parts.{idx}.image_url.name"] = (
252
+ part.inline_data.display_name.split("/")[-1]
253
+ )
254
+ message_part[f"parts.{idx}.image_url.url"] = (
255
+ part.inline_data.display_name
256
+ )
237
257
 
238
258
  message_parts.append(message_part)
239
259
 
@@ -289,7 +309,7 @@ def llm_gen_ai_assistant_message(params: LLMAttributesParams) -> ExtractorRespon
289
309
  else "<unknown_function_name>"
290
310
  )
291
311
  message_part["tool_calls.0.function.arguments"] = (
292
- json.dumps(part.function_call.args)
312
+ safe_json_serialize(part.function_call.args)
293
313
  if part.function_call.args
294
314
  else json.dumps({})
295
315
  )
@@ -326,7 +346,7 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
326
346
  else "<unknown_function_name>"
327
347
  )
328
348
  message["message.tool_calls.0.function.arguments"] = (
329
- json.dumps(part.function_call.args)
349
+ safe_json_serialize(part.function_call.args)
330
350
  if part.function_call.args
331
351
  else json.dumps({})
332
352
  )
@@ -351,7 +371,7 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
351
371
  else "<unknown_function_name>"
352
372
  )
353
373
  message["message.tool_calls.0.function.arguments"] = (
354
- json.dumps(part.function_call.args)
374
+ safe_json_serialize(part.function_call.args)
355
375
  if part.function_call.args
356
376
  else json.dumps({})
357
377
  )
@@ -359,6 +379,42 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
359
379
  return ExtractorResponse(type="event", content=message)
360
380
 
361
381
 
382
+ def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse:
383
+ return ExtractorResponse(
384
+ content=str(params.llm_request.model_dump(exclude_none=True))
385
+ )
386
+
387
+
388
+ def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
389
+ return ExtractorResponse(
390
+ content=str(params.llm_response.model_dump(exclude_none=True))
391
+ )
392
+
393
+
394
+ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
395
+ functions = []
396
+
397
+ for idx, (tool_name, tool_instance) in enumerate(
398
+ params.llm_request.tools_dict.items()
399
+ ):
400
+ functions.append(
401
+ {
402
+ f"gen_ai.request.functions.{idx}.name": tool_instance.name,
403
+ f"gen_ai.request.functions.{idx}.description": tool_instance.description,
404
+ f"gen_ai.request.functions.{idx}.parameters": str(
405
+ tool_instance._get_declaration().parameters.model_dump( # type: ignore
406
+ exclude_none=True
407
+ )
408
+ if tool_instance._get_declaration()
409
+ and tool_instance._get_declaration().parameters # type: ignore
410
+ else {}
411
+ ),
412
+ }
413
+ )
414
+
415
+ return ExtractorResponse(content=functions)
416
+
417
+
362
418
  LLM_ATTRIBUTES = {
363
419
  # ===== request attributes =====
364
420
  "gen_ai.request.model": llm_gen_ai_request_model,
@@ -366,6 +422,7 @@ LLM_ATTRIBUTES = {
366
422
  "gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
367
423
  "gen_ai.request.temperature": llm_gen_ai_request_temperature,
368
424
  "gen_ai.request.top_p": llm_gen_ai_request_top_p,
425
+ "gen_ai.request.functions": llm_gen_ai_request_functions,
369
426
  # ===== response attributes =====
370
427
  "gen_ai.response.model": llm_gen_ai_response_model,
371
428
  "gen_ai.response.stop_reason": llm_gen_ai_response_stop_reason,
@@ -383,6 +440,8 @@ LLM_ATTRIBUTES = {
383
440
  # attributes
384
441
  "gen_ai.prompt": llm_gen_ai_prompt,
385
442
  "gen_ai.completion": llm_gen_ai_completion,
443
+ # "input.value": llm_input_value,
444
+ # "output.value": llm_output_value,
386
445
  # ===== usage =====
387
446
  "gen_ai.usage.input_tokens": llm_gen_ai_usage_input_tokens,
388
447
  "gen_ai.usage.output_tokens": llm_gen_ai_usage_output_tokens,