veadk-python 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of veadk-python might be problematic. Click here for more details.

@@ -12,22 +12,30 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import asyncio
16
+ import base64
17
+ import concurrent.futures
18
+ import contextvars
19
+ import json
20
+ import mimetypes
21
+ import traceback
15
22
  from typing import Dict
16
23
 
17
24
  from google.adk.tools import ToolContext
25
+ from google.genai.types import Blob, Part
26
+ from opentelemetry import trace
27
+ from opentelemetry.trace import Span
28
+ from volcenginesdkarkruntime import Ark
29
+ from volcenginesdkarkruntime.types.images.images import SequentialImageGenerationOptions
30
+
18
31
  from veadk.config import getenv, settings
19
32
  from veadk.consts import (
20
- DEFAULT_TEXT_TO_IMAGE_MODEL_NAME,
21
- DEFAULT_TEXT_TO_IMAGE_MODEL_API_BASE,
33
+ DEFAULT_IMAGE_GENERATE_MODEL_API_BASE,
34
+ DEFAULT_IMAGE_GENERATE_MODEL_NAME,
22
35
  )
23
- import base64
24
- from volcenginesdkarkruntime import Ark
25
- from opentelemetry import trace
26
- import traceback
27
- import json
28
- from veadk.version import VERSION
29
- from opentelemetry.trace import Span
30
36
  from veadk.utils.logger import get_logger
37
+ from veadk.utils.misc import formatted_timestamp, read_file_to_bytes
38
+ from veadk.version import VERSION
31
39
 
32
40
  logger = get_logger(__name__)
33
41
 
@@ -35,188 +43,329 @@ client = Ark(
35
43
  api_key=getenv(
36
44
  "MODEL_IMAGE_API_KEY", getenv("MODEL_AGENT_API_KEY", settings.model.api_key)
37
45
  ),
38
- base_url=getenv("MODEL_IMAGE_API_BASE", DEFAULT_TEXT_TO_IMAGE_MODEL_API_BASE),
46
+ base_url=getenv("MODEL_IMAGE_API_BASE", DEFAULT_IMAGE_GENERATE_MODEL_API_BASE),
39
47
  )
40
48
 
49
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=8)
50
+ tracer = trace.get_tracer("veadk")
51
+
52
+
53
+ def _build_input_parts(item: dict, task_type: str, image_field):
54
+ input_part = {"role": "user"}
55
+ input_part["parts.0.type"] = "text"
56
+ input_part["parts.0.text"] = json.dumps(item, ensure_ascii=False)
57
+
58
+ if image_field:
59
+ if task_type.startswith("single"):
60
+ assert isinstance(image_field, str), (
61
+ f"single_* task_type image must be str, got {type(image_field)}"
62
+ )
63
+ input_part["parts.1.type"] = "image_url"
64
+ input_part["parts.1.image_url.name"] = "origin_image"
65
+ input_part["parts.1.image_url.url"] = image_field
66
+ elif task_type.startswith("multi"):
67
+ assert isinstance(image_field, list), (
68
+ f"multi_* task_type image must be list, got {type(image_field)}"
69
+ )
70
+ assert len(image_field) <= 10, (
71
+ f"multi_* task_type image list length must be <= 10, got {len(image_field)}"
72
+ )
73
+ for i, image_url in enumerate(image_field):
74
+ idx = i + 1
75
+ input_part[f"parts.{idx}.type"] = "image_url"
76
+ input_part[f"parts.{idx}.image_url.name"] = f"origin_image_{i}"
77
+ input_part[f"parts.{idx}.image_url.url"] = image_url
78
+
79
+ return input_part
80
+
81
+
82
+ def handle_single_task_sync(
83
+ idx: int, item: dict, tool_context
84
+ ) -> tuple[list[dict], list[str]]:
85
+ logger.debug(f"handle_single_task_sync item {idx}: {item}")
86
+ success_list: list[dict] = []
87
+ error_list: list[str] = []
88
+ total_tokens = 0
89
+ output_tokens = 0
90
+ output_part = {"message.role": "model"}
91
+
92
+ task_type = item.get("task_type", "text_to_single")
93
+ prompt = item.get("prompt", "")
94
+ response_format = item.get("response_format", None)
95
+ size = item.get("size", None)
96
+ watermark = item.get("watermark", None)
97
+ image_field = item.get("image", None)
98
+ sequential_image_generation = item.get("sequential_image_generation", None)
99
+ max_images = item.get("max_images", None)
100
+
101
+ input_part = _build_input_parts(item, task_type, image_field)
102
+
103
+ inputs = {"prompt": prompt}
104
+ if size:
105
+ inputs["size"] = size
106
+ if response_format:
107
+ inputs["response_format"] = response_format
108
+ if watermark is not None:
109
+ inputs["watermark"] = watermark
110
+ if sequential_image_generation:
111
+ inputs["sequential_image_generation"] = sequential_image_generation
112
+
113
+ with tracer.start_as_current_span(f"call_llm_task_{idx}") as span:
114
+ try:
115
+ if (
116
+ sequential_image_generation
117
+ and sequential_image_generation == "auto"
118
+ and max_images
119
+ ):
120
+ response = client.images.generate(
121
+ model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
122
+ **inputs,
123
+ sequential_image_generation_options=SequentialImageGenerationOptions(
124
+ max_images=max_images
125
+ ),
126
+ )
127
+ else:
128
+ response = client.images.generate(
129
+ model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
130
+ **inputs,
131
+ )
41
132
 
42
- async def image_generate(
43
- params: list,
44
- tool_context: ToolContext,
45
- ) -> Dict:
46
- """
47
- Generate images in batch according to prompts and optional settings.
133
+ if not response.error:
134
+ logger.debug(f"task {idx} Image generate response: {response}")
135
+
136
+ total_tokens += getattr(response.usage, "total_tokens", 0) or 0
137
+ output_tokens += getattr(response.usage, "output_tokens", 0) or 0
138
+
139
+ for i, image_data in enumerate(response.data):
140
+ image_name = f"task_{idx}_image_{i}"
141
+ if "error" in image_data:
142
+ logger.error(f"Image {image_name} error: {image_data.error}")
143
+ error_list.append(image_name)
144
+ continue
145
+
146
+ if getattr(image_data, "url", None):
147
+ image_url = image_data.url
148
+ else:
149
+ b64 = getattr(image_data, "b64_json", None)
150
+ if not b64:
151
+ logger.error(
152
+ f"Image {image_name} missing data (no url/b64)"
153
+ )
154
+ error_list.append(image_name)
155
+ continue
156
+ image_bytes = base64.b64decode(b64)
157
+ image_url = _upload_image_to_tos(
158
+ image_bytes=image_bytes, object_key=f"{image_name}.png"
159
+ )
160
+ if not image_url:
161
+ logger.error(f"Upload image to TOS failed: {image_name}")
162
+ error_list.append(image_name)
163
+ continue
164
+ logger.debug(f"Image saved as ADK artifact: {image_name}")
165
+
166
+ tool_context.state[f"{image_name}_url"] = image_url
167
+ output_part[f"message.parts.{i}.type"] = "image_url"
168
+ output_part[f"message.parts.{i}.image_url.name"] = image_name
169
+ output_part[f"message.parts.{i}.image_url.url"] = image_url
170
+ logger.debug(
171
+ f"Image {image_name} generated successfully: {image_url}"
172
+ )
173
+ success_list.append({image_name: image_url})
174
+ else:
175
+ logger.error(
176
+ f"Task {idx} No images returned by model: {response.error}"
177
+ )
178
+ error_list.append(f"task_{idx}")
48
179
 
49
- Each item in `params` describes a single image-generation request.
180
+ except Exception as e:
181
+ logger.error(f"Error in task {idx}: {e}")
182
+ traceback.print_exc()
183
+ error_list.append(f"task_{idx}")
184
+
185
+ finally:
186
+ add_span_attributes(
187
+ span,
188
+ tool_context,
189
+ input_part=input_part,
190
+ output_part=output_part,
191
+ output_tokens=output_tokens,
192
+ total_tokens=total_tokens,
193
+ request_model=getenv(
194
+ "MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
195
+ ),
196
+ response_model=getenv(
197
+ "MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
198
+ ),
199
+ )
200
+ logger.debug(
201
+ f"task {idx} Image generate success_list: {success_list}\nerror_list: {error_list}"
202
+ )
203
+ return success_list, error_list
204
+
205
+
206
+ async def image_generate(tasks: list[dict], tool_context) -> Dict:
207
+ """Generate images with Seedream 4.0.
208
+
209
+ Commit batch image generation requests via tasks.
50
210
 
51
211
  Args:
52
- params (list[dict]):
53
- A list of image generation requests. Each item supports:
54
-
55
- Required:
56
- - prompt (str):
57
- The textual description of the desired image.
58
- Supports English and Chinese.
59
-
60
- Optional:
61
- - image_name (str):
62
- Name/identifier for the generated image.
63
-
64
- - response_format (str):
65
- Format of the returned image.
66
- * "url": JPEG link (default)
67
- * "b64_json": Base64 string in JSON
68
-
69
- - size (str):
70
- Resolution of the generated image.
71
- Default: "1024x1024".
72
- Must be within [512x512, 2048x2048].
73
- Common options: 1024x1024, 864x1152, 1280x720, etc.
74
-
75
- - guidance_scale (float):
76
- How strongly the prompt affects the result.
77
- Range: [1.0, 10.0], default 2.5.
78
-
79
- - watermark (bool):
80
- Whether to add watermark.
81
- Default: True.
82
-
83
- - seed (int):
84
- Random seed for reproducibility.
85
- Range: [-1, 2^31-1], default -1 (random).
86
-
87
- Returns:
88
- Dict: API response containing generated image metadata.
212
+ tasks (list[dict]):
213
+ A list of image-generation tasks. Each task is a dict.
214
+ Per-task schema
215
+ ---------------
216
+ Required:
217
+ - task_type (str):
218
+ One of:
219
+ * "multi_image_to_group" # 多图生组图
220
+ * "single_image_to_group" # 单图生组图
221
+ * "text_to_group" # 文生组图
222
+ * "multi_image_to_single" # 多图生单图
223
+ * "single_image_to_single" # 单图生单图
224
+ * "text_to_single" # 文生单图
225
+ - prompt (str)
226
+ Text description of the desired image(s). 中文/English 均可。
227
+ 若要指定生成图片的数量,请在prompt中添加"生成N张图片",其中N为具体的数字。
228
+ Optional:
229
+ - size (str)
230
+ 指定生成图像的大小,有两种用法(二选一,不可混用):
231
+ 方式 1:分辨率级别
232
+ 可选值: "1K", "2K", "4K"
233
+ 模型会结合 prompt 中的语义推断合适的宽高比、长宽。
234
+ 方式 2:具体宽高值
235
+ 格式: "<宽度>x<高度>",如 "2048x2048", "2384x1728"
236
+ 约束:
237
+ * 总像素数范围: [1024x1024, 4096x4096]
238
+ * 宽高比范围: [1/16, 16]
239
+ 推荐值:
240
+ - 1:1 → 2048x2048
241
+ - 4:3 → 2384x1728
242
+ - 3:4 → 1728x2304
243
+ - 16:9 → 2560x1440
244
+ - 9:16 → 1440x2560
245
+ - 3:2 2496x1664
246
+ - 2:3 → 1664x2496
247
+ - 21:9 → 3024x1296
248
+ 默认值: "2048x2048"
249
+ - response_format (str)
250
+ Return format: "url" (default, URL 24h 过期) | "b64_json".
251
+ - watermark (bool)
252
+ Add watermark. Default: true.
253
+ - image (str | list[str]) # 仅“非文生图”需要。文生图请不要提供 image
254
+ Reference image(s) as URL or Base64.
255
+ * 生成“单图”的任务:传入 string(exactly 1 image)。
256
+ * 生成“组图”的任务:传入 array(2–10 images)。
257
+ - sequential_image_generation (str)
258
+ 控制是否生成“组图”。Default: "disabled".
259
+ * 若要生成组图:必须设为 "auto"。
260
+ - max_images (int)
261
+ 仅当生成组图时生效。控制模型能生成的最多张数,范围 [1, 15], 不设置默认为15。
262
+ 注意这个参数不等于生成的图片数量,而是模型最多能生成的图片数量。
263
+ 在单图组图场景最多 14;多图组图场景需满足 (len(images)+max_images ≤ 15)。
264
+ Model 行为说明(如何由参数推断模式)
265
+ ---------------------------------
266
+ 1) 文生单图: 不提供 image 且 (S 未设置或 S="disabled") → 1 张图。
267
+ 2) 文生组图: 不提供 image 且 S="auto" → 组图,数量由 max_images 控制。
268
+ 3) 单图生单图: image=string 且 (S 未设置或 S="disabled") → 1 张图。
269
+ 4) 单图生组图: image=string 且 S="auto" → 组图,数量 ≤14。
270
+ 5) 多图生单图: image=array (2–10) 且 (S 未设置或 S="disabled") → 1 张图。
271
+ 6) 多图生组图: image=array (2–10) 且 S="auto" → 组图,需满足总数 ≤15。
272
+ 返回结果
273
+ --------
274
+ Dict with generation summary.
89
275
  Example:
90
276
  {
91
277
  "status": "success",
92
- "success_list": [{"image_name": ""}],
93
- "error_list": [{}]
278
+ "success_list": [
279
+ {"image_name": "url"}
280
+ ],
281
+ "error_list": ["image_name"]
94
282
  }
95
-
96
283
  Notes:
97
- - Best suited for creating original images from text.
98
- - Use a fixed `seed` for reproducibility.
99
- - Choose appropriate `size` for desired aspect ratio.
284
+ - 组图任务必须 sequential_image_generation="auto"。
285
+ - 如果想要指定生成组图的数量,请在prompt里添加数量说明,例如:"生成3张图片"。
286
+ - size 推荐使用 2048x2048 或表格里的标准比例,确保生成质量。
100
287
  """
101
- logger.debug(
102
- f"Using model: {getenv('MODEL_IMAGE_NAME', DEFAULT_TEXT_TO_IMAGE_MODEL_NAME)}"
103
- )
104
- success_list = []
105
- error_list = []
106
- logger.debug(f"image_generate params: {params}")
107
- for idx, item in enumerate(params):
108
- logger.debug(f"image_generate item {idx}: {item}")
109
- prompt = item.get("prompt", "")
110
- image_name = item.get("image_name", f"generated_image_{idx}")
111
- response_format = item.get("response_format", "url")
112
- size = item.get("size", "1024x1024")
113
- guidance_scale = item.get("guidance_scale", 2.5)
114
- watermark = item.get("watermark", True)
115
- seed = item.get("seed", -1)
288
+ model = getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME)
116
289
 
117
- try:
118
- tracer = trace.get_tracer("gcp.vertex.agent")
119
- with tracer.start_as_current_span("call_llm") as span:
120
- inputs = {
121
- "prompt": prompt,
122
- "response_format": response_format,
123
- "size": size,
124
- "guidance_scale": guidance_scale,
125
- "watermark": watermark,
126
- "seed": seed,
127
- }
128
- input_part = {
129
- "role": "user",
130
- "content": json.dumps(inputs, ensure_ascii=False),
131
- }
132
- response = client.images.generate(
133
- model=getenv("MODEL_IMAGE_NAME", DEFAULT_TEXT_TO_IMAGE_MODEL_NAME),
134
- **inputs,
135
- )
136
- output_part = None
137
- if response.data and len(response.data) > 0:
138
- logger.debug(f"task {idx} Image generate response: {response}")
139
- for item in response.data:
140
- if response_format == "url":
141
- image = item.url
142
- tool_context.state[f"{image_name}_url"] = image
143
- output_part = {
144
- "message.role": "model",
145
- "message.parts.0.type": "image_url",
146
- "message.parts.0.image_url.name": image_name,
147
- "message.parts.0.image_url.url": image,
148
- }
149
- elif response_format == "b64_json":
150
- image = item.b64_json
151
- image_bytes = base64.b64decode(image)
152
-
153
- tos_url = _upload_image_to_tos(
154
- image_bytes=image_bytes, object_key=f"{image_name}.png"
155
- )
156
- if tos_url:
157
- tool_context.state[f"{image_name}_url"] = tos_url
158
- image = tos_url
159
- output_part = {
160
- "message.role": "model",
161
- "message.parts.0.type": "image_url",
162
- "message.parts.0.image_url.name": image_name,
163
- "message.parts.0.image_url.url": image,
164
- }
165
- else:
166
- logger.error(
167
- f"Upload image to TOS failed: {image_name}"
168
- )
169
- error_list.append(image_name)
170
- continue
171
-
172
- logger.debug(f"Image saved as ADK artifact: {image_name}")
173
- logger.debug(
174
- f"Image {image_name} generated successfully: {image}"
175
- )
176
- success_list.append({image_name: image})
177
- else:
178
- error_details = f"No images returned by Doubao model: {response}"
179
- logger.error(error_details)
180
- error_list.append(image_name)
181
-
182
- add_span_attributes(
183
- span,
184
- tool_context,
185
- input_part=input_part,
186
- output_part=output_part,
187
- output_tokens=response.usage.output_tokens,
188
- total_tokens=response.usage.total_tokens,
189
- request_model=getenv(
190
- "MODEL_IMAGE_NAME", DEFAULT_TEXT_TO_IMAGE_MODEL_NAME
191
- ),
192
- response_model=getenv(
193
- "MODEL_IMAGE_NAME", DEFAULT_TEXT_TO_IMAGE_MODEL_NAME
194
- ),
195
- )
196
-
197
- except Exception as e:
198
- error_details = f"No images returned by Doubao model: {e}"
199
- logger.error(error_details)
200
- error_list.append(image_name)
201
-
202
- if len(success_list) == 0:
203
- logger.debug(
204
- f"image_generate success_list: {success_list}\nerror_list: {error_list}"
290
+ if model.startswith("doubao-seedream-3-0"):
291
+ logger.error(
292
+ f"Image generation by Doubao Seedream 3.0 ({model}) is depracated. Please use Doubao Seedream 4.0 (e.g., doubao-seedream-4-0-250828) instead."
205
293
  )
206
294
  return {
207
- "status": "error",
208
- "success_list": success_list,
209
- "error_list": error_list,
295
+ "status": "failed",
296
+ "success_list": [],
297
+ "error_list": [
298
+ "Image generation by Doubao Seedream 3.0 ({model}) is depracated. Please use Doubao Seedream 4.0 (e.g., doubao-seedream-4-0-250828) instead."
299
+ ],
210
300
  }
211
- else:
301
+
302
+ logger.debug(f"Using model to generate image: {model}")
303
+
304
+ success_list: list[dict] = []
305
+ error_list: list[str] = []
306
+
307
+ logger.debug(f"image_generate tasks: {tasks}")
308
+
309
+ with tracer.start_as_current_span("image_generate"):
310
+ base_ctx = contextvars.copy_context()
311
+
312
+ def make_task(idx, item):
313
+ ctx = base_ctx.copy()
314
+ return lambda: ctx.run(handle_single_task_sync, idx, item, tool_context)
315
+
316
+ loop = asyncio.get_event_loop()
317
+ futures = [
318
+ loop.run_in_executor(executor, make_task(idx, item))
319
+ for idx, item in enumerate(tasks)
320
+ ]
321
+
322
+ results = await asyncio.gather(*futures, return_exceptions=True)
323
+
324
+ for res in results:
325
+ if isinstance(res, Exception):
326
+ logger.error(f"Task raised exception: {res}")
327
+ error_list.append("unknown_task_exception")
328
+ continue
329
+ s, e = res
330
+ success_list.extend(s)
331
+ error_list.extend(e)
332
+
333
+ if not success_list:
212
334
  logger.debug(
213
335
  f"image_generate success_list: {success_list}\nerror_list: {error_list}"
214
336
  )
215
337
  return {
216
- "status": "success",
338
+ "status": "error",
217
339
  "success_list": success_list,
218
340
  "error_list": error_list,
219
341
  }
342
+ app_name = tool_context._invocation_context.app_name
343
+ user_id = tool_context._invocation_context.user_id
344
+ session_id = tool_context._invocation_context.session.id
345
+ artifact_service = tool_context._invocation_context.artifact_service
346
+
347
+ if artifact_service:
348
+ for image in success_list:
349
+ for _, image_tos_url in image.items():
350
+ filename = f"artifact_{formatted_timestamp()}"
351
+ await artifact_service.save_artifact(
352
+ app_name=app_name,
353
+ user_id=user_id,
354
+ session_id=session_id,
355
+ filename=filename,
356
+ artifact=Part(
357
+ inline_data=Blob(
358
+ display_name=filename,
359
+ data=read_file_to_bytes(image_tos_url),
360
+ mime_type=mimetypes.guess_type(image_tos_url)[0],
361
+ )
362
+ ),
363
+ )
364
+
365
+ logger.debug(
366
+ f"image_generate success_list: {success_list}\nerror_list: {error_list}"
367
+ )
368
+ return {"status": "success", "success_list": success_list, "error_list": error_list}
220
369
 
221
370
 
222
371
  def add_span_attributes(
@@ -273,10 +422,11 @@ def add_span_attributes(
273
422
 
274
423
  def _upload_image_to_tos(image_bytes: bytes, object_key: str) -> None:
275
424
  try:
276
- from veadk.integrations.ve_tos.ve_tos import VeTOS
277
425
  import os
278
426
  from datetime import datetime
279
427
 
428
+ from veadk.integrations.ve_tos.ve_tos import VeTOS
429
+
280
430
  timestamp: str = datetime.now().strftime("%Y%m%d%H%M%S%f")[:-3]
281
431
  object_key = f"{timestamp}-{object_key}"
282
432
  bucket_name = os.getenv("DATABASE_TOS_BUCKET")
@@ -0,0 +1,81 @@
1
+ # Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+
17
+ from google.adk.tools import ToolContext
18
+
19
+ from veadk.config import getenv
20
+ from veadk.utils.logger import get_logger
21
+ from veadk.utils.volcengine_sign import ve_request
22
+
23
+ logger = get_logger(__name__)
24
+
25
+
26
+ def run_code(code: str, language: str, tool_context: ToolContext) -> str:
27
+ """Run code in a code sandbox and return the output.
28
+
29
+ Args:
30
+ code (str): The code to run.
31
+ language (str): The programming language of the code. Language must be one of the supported languages: python3.
32
+
33
+ Returns:
34
+ str: The output of the code execution.
35
+ """
36
+
37
+ tool_id = getenv("AGENTKIT_TOOL_ID")
38
+ host = getenv("AGENTKIT_TOOL_HOST") # temporary host for code run tool
39
+ service = getenv(
40
+ "AGENTKIT_TOOL_SERVICE_CODE"
41
+ ) # temporary service for code run tool
42
+ region = getenv("AGENTKIT_TOOL_REGION", "cn-beijing")
43
+
44
+ session_id = tool_context._invocation_context.session.id
45
+
46
+ logger.debug(
47
+ f"Running code in language: {language}, session_id={session_id}, code={code}, tool_id={tool_id}, host={host}, service={service}, region={region}"
48
+ )
49
+
50
+ access_key = getenv("VOLCENGINE_ACCESS_KEY")
51
+ secret_key = getenv("VOLCENGINE_SECRET_KEY")
52
+
53
+ res = ve_request(
54
+ request_body={
55
+ "ToolId": tool_id,
56
+ "UserSessionId": session_id,
57
+ "OperationType": "RunCode",
58
+ "OperationPayload": json.dumps(
59
+ {
60
+ "code": code,
61
+ "timeout": 30,
62
+ "kernel_name": language,
63
+ }
64
+ ),
65
+ },
66
+ action="InvokeTool",
67
+ ak=access_key,
68
+ sk=secret_key,
69
+ service=service,
70
+ version="2025-10-30",
71
+ region=region,
72
+ host=host,
73
+ )
74
+
75
+ logger.debug(f"Invoke run code response: {res}")
76
+
77
+ try:
78
+ return res["Result"]["Result"]
79
+ except KeyError as e:
80
+ logger.error(f"Error occurred while running code: {e}, response is {res}")
81
+ return res
veadk/version.py CHANGED
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- VERSION = "0.2.12"
15
+ VERSION = "0.2.14"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: veadk-python
3
- Version: 0.2.12
3
+ Version: 0.2.14
4
4
  Summary: Volcengine agent development kit, integrations with Volcengine cloud services.
5
5
  Author-email: Yaozheng Fang <fangyozheng@gmail.com>, Guodong Li <cu.eric.lee@gmail.com>, Zhi Han <sliverydayday@gmail.com>, Meng Wang <mengwangwm@gmail.com>
6
6
  License: Apache License