veadk-python 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/agent.py +7 -3
- veadk/auth/veauth/ark_veauth.py +43 -51
- veadk/auth/veauth/utils.py +57 -0
- veadk/cli/cli.py +2 -0
- veadk/cli/cli_uploadevalset.py +125 -0
- veadk/cli/cli_web.py +15 -2
- veadk/configs/model_configs.py +3 -3
- veadk/consts.py +9 -0
- veadk/knowledgebase/knowledgebase.py +19 -32
- veadk/memory/long_term_memory.py +39 -92
- veadk/memory/long_term_memory_backends/base_backend.py +4 -2
- veadk/memory/long_term_memory_backends/in_memory_backend.py +8 -6
- veadk/memory/long_term_memory_backends/mem0_backend.py +8 -8
- veadk/memory/long_term_memory_backends/opensearch_backend.py +40 -36
- veadk/memory/long_term_memory_backends/redis_backend.py +59 -46
- veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py +54 -29
- veadk/memory/short_term_memory.py +9 -11
- veadk/runner.py +19 -11
- veadk/tools/builtin_tools/generate_image.py +230 -189
- veadk/tools/builtin_tools/image_edit.py +24 -5
- veadk/tools/builtin_tools/image_generate.py +24 -5
- veadk/tools/builtin_tools/load_knowledgebase.py +97 -0
- veadk/tools/builtin_tools/video_generate.py +38 -11
- veadk/utils/misc.py +6 -10
- veadk/utils/volcengine_sign.py +2 -0
- veadk/version.py +1 -1
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/METADATA +2 -1
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/RECORD +32 -29
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.10.dist-info → veadk_python-0.2.12.dist-info}/top_level.txt +0 -0
|
@@ -25,24 +25,186 @@ from opentelemetry.trace import Span
|
|
|
25
25
|
from volcenginesdkarkruntime import Ark
|
|
26
26
|
from volcenginesdkarkruntime.types.images.images import SequentialImageGenerationOptions
|
|
27
27
|
|
|
28
|
-
from veadk.config import getenv
|
|
29
|
-
from veadk.consts import
|
|
28
|
+
from veadk.config import getenv, settings
|
|
29
|
+
from veadk.consts import (
|
|
30
|
+
DEFAULT_IMAGE_GENERATE_MODEL_NAME,
|
|
31
|
+
DEFAULT_IMAGE_GENERATE_MODEL_API_BASE,
|
|
32
|
+
)
|
|
30
33
|
from veadk.utils.logger import get_logger
|
|
31
|
-
from veadk.utils.misc import formatted_timestamp,
|
|
34
|
+
from veadk.utils.misc import formatted_timestamp, read_file_to_bytes
|
|
32
35
|
from veadk.version import VERSION
|
|
36
|
+
import asyncio
|
|
37
|
+
import concurrent.futures
|
|
38
|
+
import contextvars
|
|
39
|
+
|
|
33
40
|
|
|
34
41
|
logger = get_logger(__name__)
|
|
35
42
|
|
|
36
43
|
client = Ark(
|
|
37
|
-
api_key=getenv(
|
|
38
|
-
|
|
44
|
+
api_key=getenv(
|
|
45
|
+
"MODEL_IMAGE_API_KEY", getenv("MODEL_AGENT_API_KEY", settings.model.api_key)
|
|
46
|
+
),
|
|
47
|
+
base_url=getenv("MODEL_IMAGE_API_BASE", DEFAULT_IMAGE_GENERATE_MODEL_API_BASE),
|
|
39
48
|
)
|
|
40
49
|
|
|
50
|
+
executor = concurrent.futures.ThreadPoolExecutor(max_workers=8)
|
|
51
|
+
tracer = trace.get_tracer("gcp.vertex.agent")
|
|
41
52
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
53
|
+
|
|
54
|
+
def _build_input_parts(item: dict, task_type: str, image_field):
|
|
55
|
+
input_part = {"role": "user"}
|
|
56
|
+
input_part["parts.0.type"] = "text"
|
|
57
|
+
input_part["parts.0.text"] = json.dumps(item, ensure_ascii=False)
|
|
58
|
+
|
|
59
|
+
if image_field:
|
|
60
|
+
if task_type.startswith("single"):
|
|
61
|
+
assert isinstance(image_field, str), (
|
|
62
|
+
f"single_* task_type image must be str, got {type(image_field)}"
|
|
63
|
+
)
|
|
64
|
+
input_part["parts.1.type"] = "image_url"
|
|
65
|
+
input_part["parts.1.image_url.name"] = "origin_image"
|
|
66
|
+
input_part["parts.1.image_url.url"] = image_field
|
|
67
|
+
elif task_type.startswith("multi"):
|
|
68
|
+
assert isinstance(image_field, list), (
|
|
69
|
+
f"multi_* task_type image must be list, got {type(image_field)}"
|
|
70
|
+
)
|
|
71
|
+
assert len(image_field) <= 10, (
|
|
72
|
+
f"multi_* task_type image list length must be <= 10, got {len(image_field)}"
|
|
73
|
+
)
|
|
74
|
+
for i, image_url in enumerate(image_field):
|
|
75
|
+
idx = i + 1
|
|
76
|
+
input_part[f"parts.{idx}.type"] = "image_url"
|
|
77
|
+
input_part[f"parts.{idx}.image_url.name"] = f"origin_image_{i}"
|
|
78
|
+
input_part[f"parts.{idx}.image_url.url"] = image_url
|
|
79
|
+
|
|
80
|
+
return input_part
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def handle_single_task_sync(
|
|
84
|
+
idx: int, item: dict, tool_context
|
|
85
|
+
) -> tuple[list[dict], list[str]]:
|
|
86
|
+
logger.debug(f"handle_single_task_sync item {idx}: {item}")
|
|
87
|
+
success_list: list[dict] = []
|
|
88
|
+
error_list: list[str] = []
|
|
89
|
+
total_tokens = 0
|
|
90
|
+
output_tokens = 0
|
|
91
|
+
output_part = {"message.role": "model"}
|
|
92
|
+
|
|
93
|
+
task_type = item.get("task_type", "text_to_single")
|
|
94
|
+
prompt = item.get("prompt", "")
|
|
95
|
+
response_format = item.get("response_format", None)
|
|
96
|
+
size = item.get("size", None)
|
|
97
|
+
watermark = item.get("watermark", None)
|
|
98
|
+
image_field = item.get("image", None)
|
|
99
|
+
sequential_image_generation = item.get("sequential_image_generation", None)
|
|
100
|
+
max_images = item.get("max_images", None)
|
|
101
|
+
|
|
102
|
+
input_part = _build_input_parts(item, task_type, image_field)
|
|
103
|
+
|
|
104
|
+
inputs = {"prompt": prompt}
|
|
105
|
+
if size:
|
|
106
|
+
inputs["size"] = size
|
|
107
|
+
if response_format:
|
|
108
|
+
inputs["response_format"] = response_format
|
|
109
|
+
if watermark is not None:
|
|
110
|
+
inputs["watermark"] = watermark
|
|
111
|
+
if sequential_image_generation:
|
|
112
|
+
inputs["sequential_image_generation"] = sequential_image_generation
|
|
113
|
+
|
|
114
|
+
with tracer.start_as_current_span(f"call_llm_task_{idx}") as span:
|
|
115
|
+
try:
|
|
116
|
+
if (
|
|
117
|
+
sequential_image_generation
|
|
118
|
+
and sequential_image_generation == "auto"
|
|
119
|
+
and max_images
|
|
120
|
+
):
|
|
121
|
+
response = client.images.generate(
|
|
122
|
+
model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
|
|
123
|
+
**inputs,
|
|
124
|
+
sequential_image_generation_options=SequentialImageGenerationOptions(
|
|
125
|
+
max_images=max_images
|
|
126
|
+
),
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
response = client.images.generate(
|
|
130
|
+
model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
|
|
131
|
+
**inputs,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
if not response.error:
|
|
135
|
+
logger.debug(f"task {idx} Image generate response: {response}")
|
|
136
|
+
|
|
137
|
+
total_tokens += getattr(response.usage, "total_tokens", 0) or 0
|
|
138
|
+
output_tokens += getattr(response.usage, "output_tokens", 0) or 0
|
|
139
|
+
|
|
140
|
+
for i, image_data in enumerate(response.data):
|
|
141
|
+
image_name = f"task_{idx}_image_{i}"
|
|
142
|
+
if "error" in image_data:
|
|
143
|
+
logger.error(f"Image {image_name} error: {image_data.error}")
|
|
144
|
+
error_list.append(image_name)
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
if getattr(image_data, "url", None):
|
|
148
|
+
image_url = image_data.url
|
|
149
|
+
else:
|
|
150
|
+
b64 = getattr(image_data, "b64_json", None)
|
|
151
|
+
if not b64:
|
|
152
|
+
logger.error(
|
|
153
|
+
f"Image {image_name} missing data (no url/b64)"
|
|
154
|
+
)
|
|
155
|
+
error_list.append(image_name)
|
|
156
|
+
continue
|
|
157
|
+
image_bytes = base64.b64decode(b64)
|
|
158
|
+
image_url = _upload_image_to_tos(
|
|
159
|
+
image_bytes=image_bytes, object_key=f"{image_name}.png"
|
|
160
|
+
)
|
|
161
|
+
if not image_url:
|
|
162
|
+
logger.error(f"Upload image to TOS failed: {image_name}")
|
|
163
|
+
error_list.append(image_name)
|
|
164
|
+
continue
|
|
165
|
+
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
166
|
+
|
|
167
|
+
tool_context.state[f"{image_name}_url"] = image_url
|
|
168
|
+
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
169
|
+
output_part[f"message.parts.{i}.image_url.name"] = image_name
|
|
170
|
+
output_part[f"message.parts.{i}.image_url.url"] = image_url
|
|
171
|
+
logger.debug(
|
|
172
|
+
f"Image {image_name} generated successfully: {image_url}"
|
|
173
|
+
)
|
|
174
|
+
success_list.append({image_name: image_url})
|
|
175
|
+
else:
|
|
176
|
+
logger.error(
|
|
177
|
+
f"Task {idx} No images returned by model: {response.error}"
|
|
178
|
+
)
|
|
179
|
+
error_list.append(f"task_{idx}")
|
|
180
|
+
|
|
181
|
+
except Exception as e:
|
|
182
|
+
logger.error(f"Error in task {idx}: {e}")
|
|
183
|
+
traceback.print_exc()
|
|
184
|
+
error_list.append(f"task_{idx}")
|
|
185
|
+
|
|
186
|
+
finally:
|
|
187
|
+
add_span_attributes(
|
|
188
|
+
span,
|
|
189
|
+
tool_context,
|
|
190
|
+
input_part=input_part,
|
|
191
|
+
output_part=output_part,
|
|
192
|
+
output_tokens=output_tokens,
|
|
193
|
+
total_tokens=total_tokens,
|
|
194
|
+
request_model=getenv(
|
|
195
|
+
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
196
|
+
),
|
|
197
|
+
response_model=getenv(
|
|
198
|
+
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
199
|
+
),
|
|
200
|
+
)
|
|
201
|
+
logger.debug(
|
|
202
|
+
f"task {idx} Image generate success_list: {success_list}\nerror_list: {error_list}"
|
|
203
|
+
)
|
|
204
|
+
return success_list, error_list
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
async def image_generate(tasks: list[dict], tool_context) -> Dict:
|
|
46
208
|
"""
|
|
47
209
|
Seedream 4.0: batch image generation via tasks.
|
|
48
210
|
Args:
|
|
@@ -122,193 +284,72 @@ async def image_generate(
|
|
|
122
284
|
- 如果想要指定生成组图的数量,请在prompt里添加数量说明,例如:"生成3张图片"。
|
|
123
285
|
- size 推荐使用 2048x2048 或表格里的标准比例,确保生成质量。
|
|
124
286
|
"""
|
|
125
|
-
|
|
287
|
+
logger.debug(
|
|
288
|
+
f"Using model: {getenv('MODEL_IMAGE_NAME', DEFAULT_IMAGE_GENERATE_MODEL_NAME)}"
|
|
289
|
+
)
|
|
126
290
|
success_list: list[dict] = []
|
|
127
|
-
error_list = []
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
if image:
|
|
158
|
-
if task_type.startswith("single"):
|
|
159
|
-
assert isinstance(image, str), (
|
|
160
|
-
f"single_* task_type image must be str, got {type(image)}"
|
|
161
|
-
)
|
|
162
|
-
input_part["parts.1.type"] = "image_url"
|
|
163
|
-
input_part["parts.1.image_url.name"] = "origin_image"
|
|
164
|
-
input_part["parts.1.image_url.url"] = image
|
|
165
|
-
elif task_type.startswith("multi"):
|
|
166
|
-
assert isinstance(image, list), (
|
|
167
|
-
f"multi_* task_type image must be list, got {type(image)}"
|
|
168
|
-
)
|
|
169
|
-
assert len(image) <= 10, (
|
|
170
|
-
f"multi_* task_type image list length must be <= 10, got {len(image)}"
|
|
171
|
-
)
|
|
172
|
-
for i, image_url in enumerate(image):
|
|
173
|
-
input_part[f"parts.{i + 1}.type"] = "image_url"
|
|
174
|
-
input_part[f"parts.{i + 1}.image_url.name"] = (
|
|
175
|
-
f"origin_image_{i}"
|
|
176
|
-
)
|
|
177
|
-
input_part[f"parts.{i + 1}.image_url.url"] = image_url
|
|
178
|
-
|
|
179
|
-
if sequential_image_generation:
|
|
180
|
-
inputs["sequential_image_generation"] = sequential_image_generation
|
|
181
|
-
|
|
182
|
-
try:
|
|
183
|
-
if (
|
|
184
|
-
sequential_image_generation
|
|
185
|
-
and sequential_image_generation == "auto"
|
|
186
|
-
and max_images
|
|
187
|
-
):
|
|
188
|
-
response = client.images.generate(
|
|
189
|
-
model=getenv(
|
|
190
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
191
|
-
),
|
|
192
|
-
**inputs,
|
|
193
|
-
sequential_image_generation_options=SequentialImageGenerationOptions(
|
|
194
|
-
max_images=max_images
|
|
195
|
-
),
|
|
196
|
-
)
|
|
197
|
-
else:
|
|
198
|
-
response = client.images.generate(
|
|
199
|
-
model=getenv(
|
|
200
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
201
|
-
),
|
|
202
|
-
**inputs,
|
|
203
|
-
)
|
|
204
|
-
if not response.error:
|
|
205
|
-
for i, image_data in enumerate(response.data):
|
|
206
|
-
image_name = f"task_{idx}_image_{i}"
|
|
207
|
-
if "error" in image_data:
|
|
208
|
-
error_details = (
|
|
209
|
-
f"Image {image_name} error: {image_data.error}"
|
|
210
|
-
)
|
|
211
|
-
logger.error(error_details)
|
|
212
|
-
error_list.append(image_name)
|
|
213
|
-
continue
|
|
214
|
-
if image_data.url:
|
|
215
|
-
image = image_data.url
|
|
216
|
-
tool_context.state[f"{image_name}_url"] = image
|
|
217
|
-
|
|
218
|
-
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
219
|
-
output_part[f"message.parts.{i}.image_url.name"] = (
|
|
220
|
-
image_name
|
|
221
|
-
)
|
|
222
|
-
output_part[f"message.parts.{i}.image_url.url"] = image
|
|
223
|
-
|
|
224
|
-
else:
|
|
225
|
-
image = image_data.b64_json
|
|
226
|
-
image_bytes = base64.b64decode(image)
|
|
227
|
-
|
|
228
|
-
tos_url = _upload_image_to_tos(
|
|
229
|
-
image_bytes=image_bytes, object_key=f"{image_name}.png"
|
|
230
|
-
)
|
|
231
|
-
if tos_url:
|
|
232
|
-
tool_context.state[f"{image_name}_url"] = tos_url
|
|
233
|
-
image = tos_url
|
|
234
|
-
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
235
|
-
output_part[f"message.parts.{i}.image_url.name"] = (
|
|
236
|
-
image_name
|
|
237
|
-
)
|
|
238
|
-
output_part[f"message.parts.{i}.image_url.url"] = image
|
|
239
|
-
else:
|
|
240
|
-
logger.error(
|
|
241
|
-
f"Upload image to TOS failed: {image_name}"
|
|
242
|
-
)
|
|
243
|
-
error_list.append(image_name)
|
|
244
|
-
continue
|
|
245
|
-
|
|
246
|
-
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
247
|
-
|
|
248
|
-
total_tokens += response.usage.total_tokens
|
|
249
|
-
output_tokens += response.usage.output_tokens
|
|
250
|
-
success_list.append({image_name: image})
|
|
251
|
-
else:
|
|
252
|
-
error_details = (
|
|
253
|
-
f"No images returned by Doubao model: {response.error}"
|
|
254
|
-
)
|
|
255
|
-
logger.error(error_details)
|
|
256
|
-
error_list.append(f"task_{idx}")
|
|
257
|
-
|
|
258
|
-
except Exception as e:
|
|
259
|
-
error_details = f"Error: {e}"
|
|
260
|
-
logger.error(error_details)
|
|
261
|
-
traceback.print_exc()
|
|
262
|
-
error_list.append(f"task_{idx}")
|
|
263
|
-
|
|
264
|
-
add_span_attributes(
|
|
265
|
-
span,
|
|
266
|
-
tool_context,
|
|
267
|
-
input_part=input_part,
|
|
268
|
-
output_part=output_part,
|
|
269
|
-
output_tokens=output_tokens,
|
|
270
|
-
total_tokens=total_tokens,
|
|
271
|
-
request_model=getenv(
|
|
272
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
273
|
-
),
|
|
274
|
-
response_model=getenv(
|
|
275
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
276
|
-
),
|
|
277
|
-
)
|
|
278
|
-
if len(success_list) == 0:
|
|
291
|
+
error_list: list[str] = []
|
|
292
|
+
logger.debug(f"image_generate tasks: {tasks}")
|
|
293
|
+
with tracer.start_as_current_span("image_generate"):
|
|
294
|
+
base_ctx = contextvars.copy_context()
|
|
295
|
+
|
|
296
|
+
def make_task(idx, item):
|
|
297
|
+
ctx = base_ctx.copy()
|
|
298
|
+
return lambda: ctx.run(handle_single_task_sync, idx, item, tool_context)
|
|
299
|
+
|
|
300
|
+
loop = asyncio.get_event_loop()
|
|
301
|
+
futures = [
|
|
302
|
+
loop.run_in_executor(executor, make_task(idx, item))
|
|
303
|
+
for idx, item in enumerate(tasks)
|
|
304
|
+
]
|
|
305
|
+
|
|
306
|
+
results = await asyncio.gather(*futures, return_exceptions=True)
|
|
307
|
+
|
|
308
|
+
for res in results:
|
|
309
|
+
if isinstance(res, Exception):
|
|
310
|
+
logger.error(f"Task raised exception: {res}")
|
|
311
|
+
error_list.append("unknown_task_exception")
|
|
312
|
+
continue
|
|
313
|
+
s, e = res
|
|
314
|
+
success_list.extend(s)
|
|
315
|
+
error_list.extend(e)
|
|
316
|
+
|
|
317
|
+
if not success_list:
|
|
318
|
+
logger.debug(
|
|
319
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
320
|
+
)
|
|
279
321
|
return {
|
|
280
322
|
"status": "error",
|
|
281
323
|
"success_list": success_list,
|
|
282
324
|
"error_list": error_list,
|
|
283
325
|
}
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
326
|
+
app_name = tool_context._invocation_context.app_name
|
|
327
|
+
user_id = tool_context._invocation_context.user_id
|
|
328
|
+
session_id = tool_context._invocation_context.session.id
|
|
329
|
+
artifact_service = tool_context._invocation_context.artifact_service
|
|
330
|
+
|
|
331
|
+
if artifact_service:
|
|
332
|
+
for image in success_list:
|
|
333
|
+
for _, image_tos_url in image.items():
|
|
334
|
+
filename = f"artifact_{formatted_timestamp()}"
|
|
335
|
+
await artifact_service.save_artifact(
|
|
336
|
+
app_name=app_name,
|
|
337
|
+
user_id=user_id,
|
|
338
|
+
session_id=session_id,
|
|
339
|
+
filename=filename,
|
|
340
|
+
artifact=Part(
|
|
341
|
+
inline_data=Blob(
|
|
342
|
+
display_name=filename,
|
|
343
|
+
data=read_file_to_bytes(image_tos_url),
|
|
344
|
+
mime_type=mimetypes.guess_type(image_tos_url)[0],
|
|
345
|
+
)
|
|
346
|
+
),
|
|
347
|
+
)
|
|
288
348
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
filename = f"artifact_{formatted_timestamp()}"
|
|
294
|
-
await artifact_service.save_artifact(
|
|
295
|
-
app_name=app_name,
|
|
296
|
-
user_id=user_id,
|
|
297
|
-
session_id=session_id,
|
|
298
|
-
filename=filename,
|
|
299
|
-
artifact=Part(
|
|
300
|
-
inline_data=Blob(
|
|
301
|
-
display_name=filename,
|
|
302
|
-
data=read_png_to_bytes(image_tos_url),
|
|
303
|
-
mime_type=mimetypes.guess_type(image_tos_url)[0],
|
|
304
|
-
)
|
|
305
|
-
),
|
|
306
|
-
)
|
|
307
|
-
return {
|
|
308
|
-
"status": "success",
|
|
309
|
-
"success_list": success_list,
|
|
310
|
-
"error_list": error_list,
|
|
311
|
-
}
|
|
349
|
+
logger.debug(
|
|
350
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
351
|
+
)
|
|
352
|
+
return {"status": "success", "success_list": success_list, "error_list": error_list}
|
|
312
353
|
|
|
313
354
|
|
|
314
355
|
def add_span_attributes(
|
|
@@ -15,8 +15,11 @@
|
|
|
15
15
|
from typing import Dict
|
|
16
16
|
from google.adk.tools import ToolContext
|
|
17
17
|
from volcenginesdkarkruntime import Ark
|
|
18
|
-
from veadk.config import getenv
|
|
19
|
-
from veadk.consts import
|
|
18
|
+
from veadk.config import getenv, settings
|
|
19
|
+
from veadk.consts import (
|
|
20
|
+
DEFAULT_IMAGE_EDIT_MODEL_API_BASE,
|
|
21
|
+
DEFAULT_IMAGE_EDIT_MODEL_NAME,
|
|
22
|
+
)
|
|
20
23
|
import base64
|
|
21
24
|
from opentelemetry import trace
|
|
22
25
|
import traceback
|
|
@@ -28,8 +31,10 @@ from veadk.utils.logger import get_logger
|
|
|
28
31
|
logger = get_logger(__name__)
|
|
29
32
|
|
|
30
33
|
client = Ark(
|
|
31
|
-
api_key=getenv(
|
|
32
|
-
|
|
34
|
+
api_key=getenv(
|
|
35
|
+
"MODEL_EDIT_API_KEY", getenv("MODEL_AGENT_API_KEY", settings.model.api_key)
|
|
36
|
+
),
|
|
37
|
+
base_url=getenv("MODEL_EDIT_API_BASE", DEFAULT_IMAGE_EDIT_MODEL_API_BASE),
|
|
33
38
|
)
|
|
34
39
|
|
|
35
40
|
|
|
@@ -92,9 +97,14 @@ async def image_edit(
|
|
|
92
97
|
- Provide the same `seed` for consistent outputs across runs.
|
|
93
98
|
- A high `guidance_scale` enforces stricter adherence to text prompt.
|
|
94
99
|
"""
|
|
100
|
+
logger.debug(
|
|
101
|
+
f"Using model: {getenv('MODEL_EDIT_NAME', DEFAULT_IMAGE_EDIT_MODEL_NAME)}"
|
|
102
|
+
)
|
|
95
103
|
success_list = []
|
|
96
104
|
error_list = []
|
|
105
|
+
logger.debug(f"image_edit params: {params}")
|
|
97
106
|
for idx, item in enumerate(params):
|
|
107
|
+
logger.debug(f"image_edit item {idx}: {item}")
|
|
98
108
|
image_name = item.get("image_name", f"generated_image_{idx}")
|
|
99
109
|
prompt = item.get("prompt")
|
|
100
110
|
origin_image = item.get("origin_image")
|
|
@@ -128,6 +138,7 @@ async def image_edit(
|
|
|
128
138
|
)
|
|
129
139
|
output_part = None
|
|
130
140
|
if response.data and len(response.data) > 0:
|
|
141
|
+
logger.debug(f"task {idx} Image edit response: {response}")
|
|
131
142
|
for item in response.data:
|
|
132
143
|
if response_format == "url":
|
|
133
144
|
image = item.url
|
|
@@ -162,7 +173,9 @@ async def image_edit(
|
|
|
162
173
|
continue
|
|
163
174
|
|
|
164
175
|
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
165
|
-
|
|
176
|
+
logger.debug(
|
|
177
|
+
f"Image {image_name} generated successfully: {image}"
|
|
178
|
+
)
|
|
166
179
|
success_list.append({image_name: image})
|
|
167
180
|
else:
|
|
168
181
|
error_details = f"No images returned by Doubao model: {response}"
|
|
@@ -191,12 +204,18 @@ async def image_edit(
|
|
|
191
204
|
error_list.append(image_name)
|
|
192
205
|
|
|
193
206
|
if len(success_list) == 0:
|
|
207
|
+
logger.debug(
|
|
208
|
+
f"image_edit success_list: {success_list}\nerror_list: {error_list}"
|
|
209
|
+
)
|
|
194
210
|
return {
|
|
195
211
|
"status": "error",
|
|
196
212
|
"success_list": success_list,
|
|
197
213
|
"error_list": error_list,
|
|
198
214
|
}
|
|
199
215
|
else:
|
|
216
|
+
logger.debug(
|
|
217
|
+
f"image_edit success_list: {success_list}\nerror_list: {error_list}"
|
|
218
|
+
)
|
|
200
219
|
return {
|
|
201
220
|
"status": "success",
|
|
202
221
|
"success_list": success_list,
|
|
@@ -15,8 +15,11 @@
|
|
|
15
15
|
from typing import Dict
|
|
16
16
|
|
|
17
17
|
from google.adk.tools import ToolContext
|
|
18
|
-
from veadk.config import getenv
|
|
19
|
-
from veadk.consts import
|
|
18
|
+
from veadk.config import getenv, settings
|
|
19
|
+
from veadk.consts import (
|
|
20
|
+
DEFAULT_TEXT_TO_IMAGE_MODEL_NAME,
|
|
21
|
+
DEFAULT_TEXT_TO_IMAGE_MODEL_API_BASE,
|
|
22
|
+
)
|
|
20
23
|
import base64
|
|
21
24
|
from volcenginesdkarkruntime import Ark
|
|
22
25
|
from opentelemetry import trace
|
|
@@ -29,8 +32,10 @@ from veadk.utils.logger import get_logger
|
|
|
29
32
|
logger = get_logger(__name__)
|
|
30
33
|
|
|
31
34
|
client = Ark(
|
|
32
|
-
api_key=getenv(
|
|
33
|
-
|
|
35
|
+
api_key=getenv(
|
|
36
|
+
"MODEL_IMAGE_API_KEY", getenv("MODEL_AGENT_API_KEY", settings.model.api_key)
|
|
37
|
+
),
|
|
38
|
+
base_url=getenv("MODEL_IMAGE_API_BASE", DEFAULT_TEXT_TO_IMAGE_MODEL_API_BASE),
|
|
34
39
|
)
|
|
35
40
|
|
|
36
41
|
|
|
@@ -93,9 +98,14 @@ async def image_generate(
|
|
|
93
98
|
- Use a fixed `seed` for reproducibility.
|
|
94
99
|
- Choose appropriate `size` for desired aspect ratio.
|
|
95
100
|
"""
|
|
101
|
+
logger.debug(
|
|
102
|
+
f"Using model: {getenv('MODEL_IMAGE_NAME', DEFAULT_TEXT_TO_IMAGE_MODEL_NAME)}"
|
|
103
|
+
)
|
|
96
104
|
success_list = []
|
|
97
105
|
error_list = []
|
|
106
|
+
logger.debug(f"image_generate params: {params}")
|
|
98
107
|
for idx, item in enumerate(params):
|
|
108
|
+
logger.debug(f"image_generate item {idx}: {item}")
|
|
99
109
|
prompt = item.get("prompt", "")
|
|
100
110
|
image_name = item.get("image_name", f"generated_image_{idx}")
|
|
101
111
|
response_format = item.get("response_format", "url")
|
|
@@ -125,6 +135,7 @@ async def image_generate(
|
|
|
125
135
|
)
|
|
126
136
|
output_part = None
|
|
127
137
|
if response.data and len(response.data) > 0:
|
|
138
|
+
logger.debug(f"task {idx} Image generate response: {response}")
|
|
128
139
|
for item in response.data:
|
|
129
140
|
if response_format == "url":
|
|
130
141
|
image = item.url
|
|
@@ -159,7 +170,9 @@ async def image_generate(
|
|
|
159
170
|
continue
|
|
160
171
|
|
|
161
172
|
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
162
|
-
|
|
173
|
+
logger.debug(
|
|
174
|
+
f"Image {image_name} generated successfully: {image}"
|
|
175
|
+
)
|
|
163
176
|
success_list.append({image_name: image})
|
|
164
177
|
else:
|
|
165
178
|
error_details = f"No images returned by Doubao model: {response}"
|
|
@@ -187,12 +200,18 @@ async def image_generate(
|
|
|
187
200
|
error_list.append(image_name)
|
|
188
201
|
|
|
189
202
|
if len(success_list) == 0:
|
|
203
|
+
logger.debug(
|
|
204
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
205
|
+
)
|
|
190
206
|
return {
|
|
191
207
|
"status": "error",
|
|
192
208
|
"success_list": success_list,
|
|
193
209
|
"error_list": error_list,
|
|
194
210
|
}
|
|
195
211
|
else:
|
|
212
|
+
logger.debug(
|
|
213
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
214
|
+
)
|
|
196
215
|
return {
|
|
197
216
|
"status": "success",
|
|
198
217
|
"success_list": success_list,
|