veadk-python 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of veadk-python might be problematic. Click here for more details.
- veadk/cli/cli.py +2 -0
- veadk/cli/cli_uploadevalset.py +125 -0
- veadk/cli/cli_web.py +15 -2
- veadk/integrations/ve_tos/ve_tos.py +5 -0
- veadk/knowledgebase/backends/utils.py +2 -1
- veadk/knowledgebase/backends/vikingdb_knowledge_backend.py +47 -19
- veadk/memory/long_term_memory.py +9 -0
- veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py +24 -5
- veadk/tools/builtin_tools/generate_image.py +220 -184
- veadk/tools/builtin_tools/image_edit.py +15 -1
- veadk/tools/builtin_tools/image_generate.py +15 -1
- veadk/tools/builtin_tools/run_code.py +81 -0
- veadk/tools/builtin_tools/video_generate.py +32 -7
- veadk/version.py +1 -1
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/METADATA +1 -1
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/RECORD +20 -18
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/WHEEL +0 -0
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/entry_points.txt +0 -0
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/licenses/LICENSE +0 -0
- {veadk_python-0.2.11.dist-info → veadk_python-0.2.13.dist-info}/top_level.txt +0 -0
veadk/cli/cli.py
CHANGED
|
@@ -22,6 +22,7 @@ from veadk.cli.cli_kb import kb
|
|
|
22
22
|
from veadk.cli.cli_pipeline import pipeline
|
|
23
23
|
from veadk.cli.cli_prompt import prompt
|
|
24
24
|
from veadk.cli.cli_web import web
|
|
25
|
+
from veadk.cli.cli_uploadevalset import uploadevalset
|
|
25
26
|
from veadk.version import VERSION
|
|
26
27
|
|
|
27
28
|
|
|
@@ -41,6 +42,7 @@ veadk.add_command(web)
|
|
|
41
42
|
veadk.add_command(pipeline)
|
|
42
43
|
veadk.add_command(eval)
|
|
43
44
|
veadk.add_command(kb)
|
|
45
|
+
veadk.add_command(uploadevalset)
|
|
44
46
|
|
|
45
47
|
if __name__ == "__main__":
|
|
46
48
|
veadk()
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import click
|
|
16
|
+
import json
|
|
17
|
+
import requests
|
|
18
|
+
from veadk.utils.logger import get_logger
|
|
19
|
+
from veadk.config import getenv
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@click.command()
|
|
26
|
+
@click.option("--file", required=True, help="JSON file path containing dataset items")
|
|
27
|
+
@click.option("--cozeloop-workspace-id", default=None, help="CozeLoop workspace ID")
|
|
28
|
+
@click.option("--cozeloop-evalset-id", default=None, help="CozeLoop evaluation set ID")
|
|
29
|
+
@click.option(
|
|
30
|
+
"--cozeloop-api-key",
|
|
31
|
+
default=None,
|
|
32
|
+
help="CozeLoop API key (or set COZELOOP_API_KEY env var)",
|
|
33
|
+
)
|
|
34
|
+
def uploadevalset(
|
|
35
|
+
file: str,
|
|
36
|
+
cozeloop_workspace_id: str,
|
|
37
|
+
cozeloop_evalset_id: str,
|
|
38
|
+
cozeloop_api_key: str,
|
|
39
|
+
) -> None:
|
|
40
|
+
"""Upload dataset items to CozeLoop evaluation set."""
|
|
41
|
+
|
|
42
|
+
if not cozeloop_workspace_id:
|
|
43
|
+
cozeloop_workspace_id = getenv(
|
|
44
|
+
"OBSERVABILITY_OPENTELEMETRY_COZELOOP_SERVICE_NAME"
|
|
45
|
+
)
|
|
46
|
+
if not cozeloop_evalset_id:
|
|
47
|
+
cozeloop_evalset_id = getenv("OBSERVABILITY_OPENTELEMETRY_COZELOOP_EVALSET_ID")
|
|
48
|
+
if not cozeloop_api_key:
|
|
49
|
+
cozeloop_api_key = getenv("OBSERVABILITY_OPENTELEMETRY_COZELOOP_API_KEY")
|
|
50
|
+
|
|
51
|
+
# Read JSON file
|
|
52
|
+
file_path = Path(file)
|
|
53
|
+
if not file_path.exists():
|
|
54
|
+
logger.error(f"File not found: {file}")
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
logger.info(f"Reading dataset from {file}")
|
|
58
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
59
|
+
data = json.load(f)
|
|
60
|
+
|
|
61
|
+
# Prepare items
|
|
62
|
+
items = []
|
|
63
|
+
for case in data.get("eval_cases", []):
|
|
64
|
+
conversation = case.get("conversation", [])
|
|
65
|
+
for turn in conversation:
|
|
66
|
+
user_text = (
|
|
67
|
+
turn.get("user_content", {}).get("parts", [{}])[0].get("text", "")
|
|
68
|
+
)
|
|
69
|
+
output_text = (
|
|
70
|
+
turn.get("final_response", {}).get("parts", [{}])[0].get("text", "")
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
items.append(
|
|
74
|
+
{
|
|
75
|
+
"turns": [
|
|
76
|
+
{
|
|
77
|
+
"field_datas": [
|
|
78
|
+
{
|
|
79
|
+
"name": "input",
|
|
80
|
+
"content": {
|
|
81
|
+
"content_type": "Text",
|
|
82
|
+
"text": user_text,
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
"name": "output",
|
|
87
|
+
"content": {
|
|
88
|
+
"content_type": "Text",
|
|
89
|
+
"text": output_text,
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
]
|
|
93
|
+
}
|
|
94
|
+
]
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Upload to CozeLoop
|
|
99
|
+
url = f"https://api.coze.cn/v1/loop/evaluation/evaluation_sets/{cozeloop_evalset_id}/items"
|
|
100
|
+
logger.info(
|
|
101
|
+
f"Uploading {len(items)} items to workspace_id={cozeloop_workspace_id} evalset_id={cozeloop_evalset_id}"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
response = requests.post(
|
|
105
|
+
url=url,
|
|
106
|
+
headers={
|
|
107
|
+
"Authorization": f"Bearer {cozeloop_api_key}",
|
|
108
|
+
"Content-Type": "application/json",
|
|
109
|
+
"X-TT-ENV": "ppe_eval_openapi",
|
|
110
|
+
"x-use-ppe": "1",
|
|
111
|
+
},
|
|
112
|
+
json={
|
|
113
|
+
"workspace_id": cozeloop_workspace_id,
|
|
114
|
+
"is_allow_partial_add": True,
|
|
115
|
+
"is_skip_invalid_items": True,
|
|
116
|
+
"items": items,
|
|
117
|
+
},
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if response.status_code == 200:
|
|
121
|
+
logger.info(
|
|
122
|
+
f"Successfully uploaded dataset to CozeLoop evalset {cozeloop_evalset_id}"
|
|
123
|
+
)
|
|
124
|
+
else:
|
|
125
|
+
logger.error(f"Failed to upload dataset: {response.text}")
|
veadk/cli/cli_web.py
CHANGED
|
@@ -56,10 +56,14 @@ def _get_ltm_from_env() -> LongTermMemory | None:
|
|
|
56
56
|
logger = get_logger(__name__)
|
|
57
57
|
|
|
58
58
|
long_term_memory_backend = os.getenv("LONG_TERM_MEMORY_BACKEND")
|
|
59
|
+
app_name = os.getenv("VEADK_WEB_APP_NAME", "")
|
|
60
|
+
user_id = os.getenv("VEADK_WEB_USER_ID", "")
|
|
59
61
|
|
|
60
62
|
if long_term_memory_backend:
|
|
61
63
|
logger.info(f"Long term memory: backend={long_term_memory_backend}")
|
|
62
|
-
return LongTermMemory(
|
|
64
|
+
return LongTermMemory(
|
|
65
|
+
backend=long_term_memory_backend, app_name=app_name, user_id=user_id
|
|
66
|
+
) # type: ignore
|
|
63
67
|
else:
|
|
64
68
|
logger.warning("No long term memory backend settings detected.")
|
|
65
69
|
return None
|
|
@@ -131,7 +135,13 @@ def patch_adkwebserver_disable_openapi():
|
|
|
131
135
|
|
|
132
136
|
@click.command()
|
|
133
137
|
@click.option("--host", default="127.0.0.1", help="Host to run the web server on")
|
|
134
|
-
|
|
138
|
+
@click.option(
|
|
139
|
+
"--app_name", default="", help="The `app_name` for initializing long term memory"
|
|
140
|
+
)
|
|
141
|
+
@click.option(
|
|
142
|
+
"--user_id", default="", help="The `user_id` for initializing long term memory"
|
|
143
|
+
)
|
|
144
|
+
def web(host: str, app_name: str, user_id: str) -> None:
|
|
135
145
|
"""Launch web with long term and short term memory."""
|
|
136
146
|
import os
|
|
137
147
|
from typing import Any
|
|
@@ -175,6 +185,9 @@ def web(host: str) -> None:
|
|
|
175
185
|
self.session_service = short_term_memory.session_service
|
|
176
186
|
self.memory_service = long_term_memory
|
|
177
187
|
|
|
188
|
+
os.environ["VEADK_WEB_APP_NAME"] = app_name
|
|
189
|
+
os.environ["VEADK_WEB_USER_ID"] = user_id
|
|
190
|
+
|
|
178
191
|
import google.adk.cli.adk_web_server
|
|
179
192
|
|
|
180
193
|
google.adk.cli.adk_web_server.AdkWebServer.__init__ = init_for_veadk
|
|
@@ -36,11 +36,14 @@ class VeTOS:
|
|
|
36
36
|
self,
|
|
37
37
|
ak: str = "",
|
|
38
38
|
sk: str = "",
|
|
39
|
+
session_token: str = "",
|
|
39
40
|
region: str = "cn-beijing",
|
|
40
41
|
bucket_name: str = DEFAULT_TOS_BUCKET_NAME,
|
|
41
42
|
) -> None:
|
|
42
43
|
self.ak = ak if ak else os.getenv("VOLCENGINE_ACCESS_KEY", "")
|
|
43
44
|
self.sk = sk if sk else os.getenv("VOLCENGINE_SECRET_KEY", "")
|
|
45
|
+
self.session_token = session_token
|
|
46
|
+
|
|
44
47
|
# Add empty value validation
|
|
45
48
|
if not self.ak or not self.sk:
|
|
46
49
|
raise ValueError(
|
|
@@ -71,6 +74,7 @@ class VeTOS:
|
|
|
71
74
|
self._client = self._tos_module.TosClientV2(
|
|
72
75
|
ak=self.ak,
|
|
73
76
|
sk=self.sk,
|
|
77
|
+
security_token=self.session_token,
|
|
74
78
|
endpoint=f"tos-{self.region}.volces.com",
|
|
75
79
|
region=self.region,
|
|
76
80
|
)
|
|
@@ -85,6 +89,7 @@ class VeTOS:
|
|
|
85
89
|
self._client = self._tos_module.TosClientV2(
|
|
86
90
|
self.ak,
|
|
87
91
|
self.sk,
|
|
92
|
+
security_token=self.session_token,
|
|
88
93
|
endpoint=f"tos-{self.region}.volces.com",
|
|
89
94
|
region=self.region,
|
|
90
95
|
)
|
|
@@ -46,6 +46,7 @@ def build_vikingdb_knowledgebase_request(
|
|
|
46
46
|
path: str,
|
|
47
47
|
volcengine_access_key: str,
|
|
48
48
|
volcengine_secret_key: str,
|
|
49
|
+
session_token: str = "",
|
|
49
50
|
method: Literal["GET", "POST", "PUT", "DELETE"] = "POST",
|
|
50
51
|
region: str = "cn-beijing",
|
|
51
52
|
params=None,
|
|
@@ -85,7 +86,7 @@ def build_vikingdb_knowledgebase_request(
|
|
|
85
86
|
r.set_body(json.dumps(data))
|
|
86
87
|
|
|
87
88
|
credentials = Credentials(
|
|
88
|
-
volcengine_access_key, volcengine_secret_key, "air", region
|
|
89
|
+
volcengine_access_key, volcengine_secret_key, "air", region, session_token
|
|
89
90
|
)
|
|
90
91
|
SignerV4.sign(r, credentials)
|
|
91
92
|
return r
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
import asyncio
|
|
16
16
|
import json
|
|
17
|
+
import os
|
|
17
18
|
import re
|
|
18
19
|
from pathlib import Path
|
|
19
20
|
from typing import Any, Literal
|
|
@@ -23,7 +24,7 @@ from pydantic import Field
|
|
|
23
24
|
from typing_extensions import override
|
|
24
25
|
|
|
25
26
|
import veadk.config # noqa E401
|
|
26
|
-
from veadk.
|
|
27
|
+
from veadk.auth.veauth.utils import get_credential_from_vefaas_iam
|
|
27
28
|
from veadk.configs.database_configs import NormalTOSConfig, TOSConfig
|
|
28
29
|
from veadk.knowledgebase.backends.base_backend import BaseKnowledgebaseBackend
|
|
29
30
|
from veadk.knowledgebase.backends.utils import build_vikingdb_knowledgebase_request
|
|
@@ -58,14 +59,16 @@ def get_files_in_directory(directory: str):
|
|
|
58
59
|
|
|
59
60
|
|
|
60
61
|
class VikingDBKnowledgeBackend(BaseKnowledgebaseBackend):
|
|
61
|
-
volcengine_access_key: str = Field(
|
|
62
|
-
default_factory=lambda: getenv("VOLCENGINE_ACCESS_KEY")
|
|
62
|
+
volcengine_access_key: str | None = Field(
|
|
63
|
+
default_factory=lambda: os.getenv("VOLCENGINE_ACCESS_KEY")
|
|
63
64
|
)
|
|
64
65
|
|
|
65
|
-
volcengine_secret_key: str = Field(
|
|
66
|
-
default_factory=lambda: getenv("VOLCENGINE_SECRET_KEY")
|
|
66
|
+
volcengine_secret_key: str | None = Field(
|
|
67
|
+
default_factory=lambda: os.getenv("VOLCENGINE_SECRET_KEY")
|
|
67
68
|
)
|
|
68
69
|
|
|
70
|
+
session_token: str = ""
|
|
71
|
+
|
|
69
72
|
volcengine_project: str = "default"
|
|
70
73
|
"""VikingDB knowledgebase project in Volcengine console platform. Default by `default`"""
|
|
71
74
|
|
|
@@ -75,6 +78,15 @@ class VikingDBKnowledgeBackend(BaseKnowledgebaseBackend):
|
|
|
75
78
|
tos_config: TOSConfig | NormalTOSConfig = Field(default_factory=TOSConfig)
|
|
76
79
|
"""TOS config, used to upload files to TOS"""
|
|
77
80
|
|
|
81
|
+
def model_post_init(self, __context: Any) -> None:
|
|
82
|
+
self.precheck_index_naming()
|
|
83
|
+
|
|
84
|
+
# check whether collection exist, if not, create it
|
|
85
|
+
if not self.collection_status()["existed"]:
|
|
86
|
+
logger.warning(
|
|
87
|
+
f"VikingDB knowledgebase collection {self.index} does not exist, please create it first..."
|
|
88
|
+
)
|
|
89
|
+
|
|
78
90
|
def precheck_index_naming(self):
|
|
79
91
|
if not (
|
|
80
92
|
isinstance(self.index, str)
|
|
@@ -86,18 +98,21 @@ class VikingDBKnowledgeBackend(BaseKnowledgebaseBackend):
|
|
|
86
98
|
"it must start with an English letter, contain only letters, numbers, and underscores, and have a length of 1-128."
|
|
87
99
|
)
|
|
88
100
|
|
|
89
|
-
def
|
|
90
|
-
self.
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
+
def _get_tos_client(self) -> VeTOS:
|
|
102
|
+
volcengine_access_key = self.volcengine_access_key
|
|
103
|
+
volcengine_secret_key = self.volcengine_secret_key
|
|
104
|
+
session_token = self.session_token
|
|
105
|
+
|
|
106
|
+
if not (volcengine_access_key and volcengine_secret_key):
|
|
107
|
+
cred = get_credential_from_vefaas_iam()
|
|
108
|
+
volcengine_access_key = cred.access_key_id
|
|
109
|
+
volcengine_secret_key = cred.secret_access_key
|
|
110
|
+
session_token = cred.session_token
|
|
111
|
+
|
|
112
|
+
return VeTOS(
|
|
113
|
+
ak=volcengine_access_key,
|
|
114
|
+
sk=volcengine_secret_key,
|
|
115
|
+
session_token=session_token,
|
|
101
116
|
region=self.tos_config.region,
|
|
102
117
|
bucket_name=self.tos_config.bucket,
|
|
103
118
|
)
|
|
@@ -404,6 +419,8 @@ class VikingDBKnowledgeBackend(BaseKnowledgebaseBackend):
|
|
|
404
419
|
metadata: dict | None = None,
|
|
405
420
|
) -> str:
|
|
406
421
|
# Here, we set the metadata via the TOS object, ref: https://www.volcengine.com/docs/84313/1254624
|
|
422
|
+
self._tos_client = self._get_tos_client()
|
|
423
|
+
|
|
407
424
|
self._tos_client.bucket_name = tos_bucket_name
|
|
408
425
|
coro = self._tos_client.upload(
|
|
409
426
|
object_key=object_key,
|
|
@@ -504,10 +521,21 @@ class VikingDBKnowledgeBackend(BaseKnowledgebaseBackend):
|
|
|
504
521
|
) -> dict:
|
|
505
522
|
VIKINGDB_KNOWLEDGEBASE_BASE_URL = "api-knowledgebase.mlp.cn-beijing.volces.com"
|
|
506
523
|
|
|
524
|
+
volcengine_access_key = self.volcengine_access_key
|
|
525
|
+
volcengine_secret_key = self.volcengine_secret_key
|
|
526
|
+
session_token = self.session_token
|
|
527
|
+
|
|
528
|
+
if not (volcengine_access_key and volcengine_secret_key):
|
|
529
|
+
cred = get_credential_from_vefaas_iam()
|
|
530
|
+
volcengine_access_key = cred.access_key_id
|
|
531
|
+
volcengine_secret_key = cred.secret_access_key
|
|
532
|
+
session_token = cred.session_token
|
|
533
|
+
|
|
507
534
|
request = build_vikingdb_knowledgebase_request(
|
|
508
535
|
path=path,
|
|
509
|
-
volcengine_access_key=
|
|
510
|
-
volcengine_secret_key=
|
|
536
|
+
volcengine_access_key=volcengine_access_key,
|
|
537
|
+
volcengine_secret_key=volcengine_secret_key,
|
|
538
|
+
session_token=session_token,
|
|
511
539
|
method=method,
|
|
512
540
|
data=body,
|
|
513
541
|
)
|
veadk/memory/long_term_memory.py
CHANGED
|
@@ -104,6 +104,15 @@ class LongTermMemory(BaseMemoryService, BaseModel):
|
|
|
104
104
|
|
|
105
105
|
# Once user define backend config, use it directly
|
|
106
106
|
if self.backend_config:
|
|
107
|
+
if "index" not in self.backend_config:
|
|
108
|
+
logger.warning(
|
|
109
|
+
"Attribute `index` not provided in backend_config, use `index` or `app_name` instead."
|
|
110
|
+
)
|
|
111
|
+
self.backend_config["index"] = self.index or self.app_name
|
|
112
|
+
|
|
113
|
+
logger.debug(
|
|
114
|
+
f"Init {self.backend}, Use provided backend config: {self.backend_config}"
|
|
115
|
+
)
|
|
107
116
|
self._backend = _get_backend_cls(self.backend)(**self.backend_config)
|
|
108
117
|
return
|
|
109
118
|
|
|
@@ -49,7 +49,23 @@ class VikingDBLTMBackend(BaseLongTermMemoryBackend):
|
|
|
49
49
|
region: str = "cn-beijing"
|
|
50
50
|
"""VikingDB memory region"""
|
|
51
51
|
|
|
52
|
+
memory_type: list[str] = Field(default_factory=list)
|
|
53
|
+
|
|
52
54
|
def model_post_init(self, __context: Any) -> None:
|
|
55
|
+
# We get memory type from:
|
|
56
|
+
# 1. user input
|
|
57
|
+
# 2. environment variable
|
|
58
|
+
# 3. default value
|
|
59
|
+
if not self.memory_type:
|
|
60
|
+
env_memory_type = os.getenv("DATABASE_VIKINGMEM_MEMORY_TYPE")
|
|
61
|
+
if env_memory_type:
|
|
62
|
+
# "event_1, event_2" -> ["event_1", "event_2"]
|
|
63
|
+
self.memory_type = [x.strip() for x in env_memory_type.split(",")]
|
|
64
|
+
else:
|
|
65
|
+
self.memory_type = ["sys_event_v1", "event_v1"]
|
|
66
|
+
|
|
67
|
+
logger.info(f"Using memory type: {self.memory_type}")
|
|
68
|
+
|
|
53
69
|
# check whether collection exist, if not, create it
|
|
54
70
|
if not self._collection_exist():
|
|
55
71
|
self._create_collection()
|
|
@@ -68,17 +84,23 @@ class VikingDBLTMBackend(BaseLongTermMemoryBackend):
|
|
|
68
84
|
try:
|
|
69
85
|
client = self._get_client()
|
|
70
86
|
client.get_collection(collection_name=self.index)
|
|
87
|
+
logger.info(f"Collection {self.index} exist.")
|
|
71
88
|
return True
|
|
72
89
|
except Exception:
|
|
90
|
+
logger.info(f"Collection {self.index} not exist.")
|
|
73
91
|
return False
|
|
74
92
|
|
|
75
93
|
def _create_collection(self) -> None:
|
|
94
|
+
logger.info(
|
|
95
|
+
f"Create collection with collection_name={self.index}, builtin_event_types={self.memory_type}"
|
|
96
|
+
)
|
|
76
97
|
client = self._get_client()
|
|
77
98
|
response = client.create_collection(
|
|
78
99
|
collection_name=self.index,
|
|
79
100
|
description="Created by Volcengine Agent Development Kit VeADK",
|
|
80
|
-
builtin_event_types=
|
|
101
|
+
builtin_event_types=self.memory_type,
|
|
81
102
|
)
|
|
103
|
+
logger.debug(f"Create collection with response {response}")
|
|
82
104
|
return response
|
|
83
105
|
|
|
84
106
|
def _get_client(self) -> VikingDBMemoryClient:
|
|
@@ -135,10 +157,7 @@ class VikingDBLTMBackend(BaseLongTermMemoryBackend):
|
|
|
135
157
|
def search_memory(
|
|
136
158
|
self, user_id: str, query: str, top_k: int, **kwargs
|
|
137
159
|
) -> list[str]:
|
|
138
|
-
filter = {
|
|
139
|
-
"user_id": user_id,
|
|
140
|
-
"memory_type": ["sys_event_v1"],
|
|
141
|
-
}
|
|
160
|
+
filter = {"user_id": user_id, "memory_type": self.memory_type}
|
|
142
161
|
|
|
143
162
|
logger.debug(
|
|
144
163
|
f"Request for search memory in VikingDB: filter={filter}, collection_name={self.index}, query={query}, limit={top_k}"
|
|
@@ -33,6 +33,10 @@ from veadk.consts import (
|
|
|
33
33
|
from veadk.utils.logger import get_logger
|
|
34
34
|
from veadk.utils.misc import formatted_timestamp, read_file_to_bytes
|
|
35
35
|
from veadk.version import VERSION
|
|
36
|
+
import asyncio
|
|
37
|
+
import concurrent.futures
|
|
38
|
+
import contextvars
|
|
39
|
+
|
|
36
40
|
|
|
37
41
|
logger = get_logger(__name__)
|
|
38
42
|
|
|
@@ -43,11 +47,164 @@ client = Ark(
|
|
|
43
47
|
base_url=getenv("MODEL_IMAGE_API_BASE", DEFAULT_IMAGE_GENERATE_MODEL_API_BASE),
|
|
44
48
|
)
|
|
45
49
|
|
|
50
|
+
executor = concurrent.futures.ThreadPoolExecutor(max_workers=8)
|
|
51
|
+
tracer = trace.get_tracer("gcp.vertex.agent")
|
|
46
52
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
53
|
+
|
|
54
|
+
def _build_input_parts(item: dict, task_type: str, image_field):
|
|
55
|
+
input_part = {"role": "user"}
|
|
56
|
+
input_part["parts.0.type"] = "text"
|
|
57
|
+
input_part["parts.0.text"] = json.dumps(item, ensure_ascii=False)
|
|
58
|
+
|
|
59
|
+
if image_field:
|
|
60
|
+
if task_type.startswith("single"):
|
|
61
|
+
assert isinstance(image_field, str), (
|
|
62
|
+
f"single_* task_type image must be str, got {type(image_field)}"
|
|
63
|
+
)
|
|
64
|
+
input_part["parts.1.type"] = "image_url"
|
|
65
|
+
input_part["parts.1.image_url.name"] = "origin_image"
|
|
66
|
+
input_part["parts.1.image_url.url"] = image_field
|
|
67
|
+
elif task_type.startswith("multi"):
|
|
68
|
+
assert isinstance(image_field, list), (
|
|
69
|
+
f"multi_* task_type image must be list, got {type(image_field)}"
|
|
70
|
+
)
|
|
71
|
+
assert len(image_field) <= 10, (
|
|
72
|
+
f"multi_* task_type image list length must be <= 10, got {len(image_field)}"
|
|
73
|
+
)
|
|
74
|
+
for i, image_url in enumerate(image_field):
|
|
75
|
+
idx = i + 1
|
|
76
|
+
input_part[f"parts.{idx}.type"] = "image_url"
|
|
77
|
+
input_part[f"parts.{idx}.image_url.name"] = f"origin_image_{i}"
|
|
78
|
+
input_part[f"parts.{idx}.image_url.url"] = image_url
|
|
79
|
+
|
|
80
|
+
return input_part
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def handle_single_task_sync(
|
|
84
|
+
idx: int, item: dict, tool_context
|
|
85
|
+
) -> tuple[list[dict], list[str]]:
|
|
86
|
+
logger.debug(f"handle_single_task_sync item {idx}: {item}")
|
|
87
|
+
success_list: list[dict] = []
|
|
88
|
+
error_list: list[str] = []
|
|
89
|
+
total_tokens = 0
|
|
90
|
+
output_tokens = 0
|
|
91
|
+
output_part = {"message.role": "model"}
|
|
92
|
+
|
|
93
|
+
task_type = item.get("task_type", "text_to_single")
|
|
94
|
+
prompt = item.get("prompt", "")
|
|
95
|
+
response_format = item.get("response_format", None)
|
|
96
|
+
size = item.get("size", None)
|
|
97
|
+
watermark = item.get("watermark", None)
|
|
98
|
+
image_field = item.get("image", None)
|
|
99
|
+
sequential_image_generation = item.get("sequential_image_generation", None)
|
|
100
|
+
max_images = item.get("max_images", None)
|
|
101
|
+
|
|
102
|
+
input_part = _build_input_parts(item, task_type, image_field)
|
|
103
|
+
|
|
104
|
+
inputs = {"prompt": prompt}
|
|
105
|
+
if size:
|
|
106
|
+
inputs["size"] = size
|
|
107
|
+
if response_format:
|
|
108
|
+
inputs["response_format"] = response_format
|
|
109
|
+
if watermark is not None:
|
|
110
|
+
inputs["watermark"] = watermark
|
|
111
|
+
if sequential_image_generation:
|
|
112
|
+
inputs["sequential_image_generation"] = sequential_image_generation
|
|
113
|
+
|
|
114
|
+
with tracer.start_as_current_span(f"call_llm_task_{idx}") as span:
|
|
115
|
+
try:
|
|
116
|
+
if (
|
|
117
|
+
sequential_image_generation
|
|
118
|
+
and sequential_image_generation == "auto"
|
|
119
|
+
and max_images
|
|
120
|
+
):
|
|
121
|
+
response = client.images.generate(
|
|
122
|
+
model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
|
|
123
|
+
**inputs,
|
|
124
|
+
sequential_image_generation_options=SequentialImageGenerationOptions(
|
|
125
|
+
max_images=max_images
|
|
126
|
+
),
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
response = client.images.generate(
|
|
130
|
+
model=getenv("MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME),
|
|
131
|
+
**inputs,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
if not response.error:
|
|
135
|
+
logger.debug(f"task {idx} Image generate response: {response}")
|
|
136
|
+
|
|
137
|
+
total_tokens += getattr(response.usage, "total_tokens", 0) or 0
|
|
138
|
+
output_tokens += getattr(response.usage, "output_tokens", 0) or 0
|
|
139
|
+
|
|
140
|
+
for i, image_data in enumerate(response.data):
|
|
141
|
+
image_name = f"task_{idx}_image_{i}"
|
|
142
|
+
if "error" in image_data:
|
|
143
|
+
logger.error(f"Image {image_name} error: {image_data.error}")
|
|
144
|
+
error_list.append(image_name)
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
if getattr(image_data, "url", None):
|
|
148
|
+
image_url = image_data.url
|
|
149
|
+
else:
|
|
150
|
+
b64 = getattr(image_data, "b64_json", None)
|
|
151
|
+
if not b64:
|
|
152
|
+
logger.error(
|
|
153
|
+
f"Image {image_name} missing data (no url/b64)"
|
|
154
|
+
)
|
|
155
|
+
error_list.append(image_name)
|
|
156
|
+
continue
|
|
157
|
+
image_bytes = base64.b64decode(b64)
|
|
158
|
+
image_url = _upload_image_to_tos(
|
|
159
|
+
image_bytes=image_bytes, object_key=f"{image_name}.png"
|
|
160
|
+
)
|
|
161
|
+
if not image_url:
|
|
162
|
+
logger.error(f"Upload image to TOS failed: {image_name}")
|
|
163
|
+
error_list.append(image_name)
|
|
164
|
+
continue
|
|
165
|
+
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
166
|
+
|
|
167
|
+
tool_context.state[f"{image_name}_url"] = image_url
|
|
168
|
+
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
169
|
+
output_part[f"message.parts.{i}.image_url.name"] = image_name
|
|
170
|
+
output_part[f"message.parts.{i}.image_url.url"] = image_url
|
|
171
|
+
logger.debug(
|
|
172
|
+
f"Image {image_name} generated successfully: {image_url}"
|
|
173
|
+
)
|
|
174
|
+
success_list.append({image_name: image_url})
|
|
175
|
+
else:
|
|
176
|
+
logger.error(
|
|
177
|
+
f"Task {idx} No images returned by model: {response.error}"
|
|
178
|
+
)
|
|
179
|
+
error_list.append(f"task_{idx}")
|
|
180
|
+
|
|
181
|
+
except Exception as e:
|
|
182
|
+
logger.error(f"Error in task {idx}: {e}")
|
|
183
|
+
traceback.print_exc()
|
|
184
|
+
error_list.append(f"task_{idx}")
|
|
185
|
+
|
|
186
|
+
finally:
|
|
187
|
+
add_span_attributes(
|
|
188
|
+
span,
|
|
189
|
+
tool_context,
|
|
190
|
+
input_part=input_part,
|
|
191
|
+
output_part=output_part,
|
|
192
|
+
output_tokens=output_tokens,
|
|
193
|
+
total_tokens=total_tokens,
|
|
194
|
+
request_model=getenv(
|
|
195
|
+
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
196
|
+
),
|
|
197
|
+
response_model=getenv(
|
|
198
|
+
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
199
|
+
),
|
|
200
|
+
)
|
|
201
|
+
logger.debug(
|
|
202
|
+
f"task {idx} Image generate success_list: {success_list}\nerror_list: {error_list}"
|
|
203
|
+
)
|
|
204
|
+
return success_list, error_list
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
async def image_generate(tasks: list[dict], tool_context) -> Dict:
|
|
51
208
|
"""
|
|
52
209
|
Seedream 4.0: batch image generation via tasks.
|
|
53
210
|
Args:
|
|
@@ -127,193 +284,72 @@ async def image_generate(
|
|
|
127
284
|
- 如果想要指定生成组图的数量,请在prompt里添加数量说明,例如:"生成3张图片"。
|
|
128
285
|
- size 推荐使用 2048x2048 或表格里的标准比例,确保生成质量。
|
|
129
286
|
"""
|
|
130
|
-
|
|
287
|
+
logger.debug(
|
|
288
|
+
f"Using model: {getenv('MODEL_IMAGE_NAME', DEFAULT_IMAGE_GENERATE_MODEL_NAME)}"
|
|
289
|
+
)
|
|
131
290
|
success_list: list[dict] = []
|
|
132
|
-
error_list = []
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
if image:
|
|
163
|
-
if task_type.startswith("single"):
|
|
164
|
-
assert isinstance(image, str), (
|
|
165
|
-
f"single_* task_type image must be str, got {type(image)}"
|
|
166
|
-
)
|
|
167
|
-
input_part["parts.1.type"] = "image_url"
|
|
168
|
-
input_part["parts.1.image_url.name"] = "origin_image"
|
|
169
|
-
input_part["parts.1.image_url.url"] = image
|
|
170
|
-
elif task_type.startswith("multi"):
|
|
171
|
-
assert isinstance(image, list), (
|
|
172
|
-
f"multi_* task_type image must be list, got {type(image)}"
|
|
173
|
-
)
|
|
174
|
-
assert len(image) <= 10, (
|
|
175
|
-
f"multi_* task_type image list length must be <= 10, got {len(image)}"
|
|
176
|
-
)
|
|
177
|
-
for i, image_url in enumerate(image):
|
|
178
|
-
input_part[f"parts.{i + 1}.type"] = "image_url"
|
|
179
|
-
input_part[f"parts.{i + 1}.image_url.name"] = (
|
|
180
|
-
f"origin_image_{i}"
|
|
181
|
-
)
|
|
182
|
-
input_part[f"parts.{i + 1}.image_url.url"] = image_url
|
|
183
|
-
|
|
184
|
-
if sequential_image_generation:
|
|
185
|
-
inputs["sequential_image_generation"] = sequential_image_generation
|
|
186
|
-
|
|
187
|
-
try:
|
|
188
|
-
if (
|
|
189
|
-
sequential_image_generation
|
|
190
|
-
and sequential_image_generation == "auto"
|
|
191
|
-
and max_images
|
|
192
|
-
):
|
|
193
|
-
response = client.images.generate(
|
|
194
|
-
model=getenv(
|
|
195
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
196
|
-
),
|
|
197
|
-
**inputs,
|
|
198
|
-
sequential_image_generation_options=SequentialImageGenerationOptions(
|
|
199
|
-
max_images=max_images
|
|
200
|
-
),
|
|
201
|
-
)
|
|
202
|
-
else:
|
|
203
|
-
response = client.images.generate(
|
|
204
|
-
model=getenv(
|
|
205
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
206
|
-
),
|
|
207
|
-
**inputs,
|
|
208
|
-
)
|
|
209
|
-
if not response.error:
|
|
210
|
-
for i, image_data in enumerate(response.data):
|
|
211
|
-
image_name = f"task_{idx}_image_{i}"
|
|
212
|
-
if "error" in image_data:
|
|
213
|
-
error_details = (
|
|
214
|
-
f"Image {image_name} error: {image_data.error}"
|
|
215
|
-
)
|
|
216
|
-
logger.error(error_details)
|
|
217
|
-
error_list.append(image_name)
|
|
218
|
-
continue
|
|
219
|
-
if image_data.url:
|
|
220
|
-
image = image_data.url
|
|
221
|
-
tool_context.state[f"{image_name}_url"] = image
|
|
222
|
-
|
|
223
|
-
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
224
|
-
output_part[f"message.parts.{i}.image_url.name"] = (
|
|
225
|
-
image_name
|
|
226
|
-
)
|
|
227
|
-
output_part[f"message.parts.{i}.image_url.url"] = image
|
|
228
|
-
|
|
229
|
-
else:
|
|
230
|
-
image = image_data.b64_json
|
|
231
|
-
image_bytes = base64.b64decode(image)
|
|
232
|
-
|
|
233
|
-
tos_url = _upload_image_to_tos(
|
|
234
|
-
image_bytes=image_bytes, object_key=f"{image_name}.png"
|
|
235
|
-
)
|
|
236
|
-
if tos_url:
|
|
237
|
-
tool_context.state[f"{image_name}_url"] = tos_url
|
|
238
|
-
image = tos_url
|
|
239
|
-
output_part[f"message.parts.{i}.type"] = "image_url"
|
|
240
|
-
output_part[f"message.parts.{i}.image_url.name"] = (
|
|
241
|
-
image_name
|
|
242
|
-
)
|
|
243
|
-
output_part[f"message.parts.{i}.image_url.url"] = image
|
|
244
|
-
else:
|
|
245
|
-
logger.error(
|
|
246
|
-
f"Upload image to TOS failed: {image_name}"
|
|
247
|
-
)
|
|
248
|
-
error_list.append(image_name)
|
|
249
|
-
continue
|
|
250
|
-
|
|
251
|
-
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
252
|
-
|
|
253
|
-
total_tokens += response.usage.total_tokens
|
|
254
|
-
output_tokens += response.usage.output_tokens
|
|
255
|
-
success_list.append({image_name: image})
|
|
256
|
-
else:
|
|
257
|
-
error_details = (
|
|
258
|
-
f"No images returned by Doubao model: {response.error}"
|
|
259
|
-
)
|
|
260
|
-
logger.error(error_details)
|
|
261
|
-
error_list.append(f"task_{idx}")
|
|
262
|
-
|
|
263
|
-
except Exception as e:
|
|
264
|
-
error_details = f"Error: {e}"
|
|
265
|
-
logger.error(error_details)
|
|
266
|
-
traceback.print_exc()
|
|
267
|
-
error_list.append(f"task_{idx}")
|
|
268
|
-
|
|
269
|
-
add_span_attributes(
|
|
270
|
-
span,
|
|
271
|
-
tool_context,
|
|
272
|
-
input_part=input_part,
|
|
273
|
-
output_part=output_part,
|
|
274
|
-
output_tokens=output_tokens,
|
|
275
|
-
total_tokens=total_tokens,
|
|
276
|
-
request_model=getenv(
|
|
277
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
278
|
-
),
|
|
279
|
-
response_model=getenv(
|
|
280
|
-
"MODEL_IMAGE_NAME", DEFAULT_IMAGE_GENERATE_MODEL_NAME
|
|
281
|
-
),
|
|
282
|
-
)
|
|
283
|
-
if len(success_list) == 0:
|
|
291
|
+
error_list: list[str] = []
|
|
292
|
+
logger.debug(f"image_generate tasks: {tasks}")
|
|
293
|
+
with tracer.start_as_current_span("image_generate"):
|
|
294
|
+
base_ctx = contextvars.copy_context()
|
|
295
|
+
|
|
296
|
+
def make_task(idx, item):
|
|
297
|
+
ctx = base_ctx.copy()
|
|
298
|
+
return lambda: ctx.run(handle_single_task_sync, idx, item, tool_context)
|
|
299
|
+
|
|
300
|
+
loop = asyncio.get_event_loop()
|
|
301
|
+
futures = [
|
|
302
|
+
loop.run_in_executor(executor, make_task(idx, item))
|
|
303
|
+
for idx, item in enumerate(tasks)
|
|
304
|
+
]
|
|
305
|
+
|
|
306
|
+
results = await asyncio.gather(*futures, return_exceptions=True)
|
|
307
|
+
|
|
308
|
+
for res in results:
|
|
309
|
+
if isinstance(res, Exception):
|
|
310
|
+
logger.error(f"Task raised exception: {res}")
|
|
311
|
+
error_list.append("unknown_task_exception")
|
|
312
|
+
continue
|
|
313
|
+
s, e = res
|
|
314
|
+
success_list.extend(s)
|
|
315
|
+
error_list.extend(e)
|
|
316
|
+
|
|
317
|
+
if not success_list:
|
|
318
|
+
logger.debug(
|
|
319
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
320
|
+
)
|
|
284
321
|
return {
|
|
285
322
|
"status": "error",
|
|
286
323
|
"success_list": success_list,
|
|
287
324
|
"error_list": error_list,
|
|
288
325
|
}
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
326
|
+
app_name = tool_context._invocation_context.app_name
|
|
327
|
+
user_id = tool_context._invocation_context.user_id
|
|
328
|
+
session_id = tool_context._invocation_context.session.id
|
|
329
|
+
artifact_service = tool_context._invocation_context.artifact_service
|
|
330
|
+
|
|
331
|
+
if artifact_service:
|
|
332
|
+
for image in success_list:
|
|
333
|
+
for _, image_tos_url in image.items():
|
|
334
|
+
filename = f"artifact_{formatted_timestamp()}"
|
|
335
|
+
await artifact_service.save_artifact(
|
|
336
|
+
app_name=app_name,
|
|
337
|
+
user_id=user_id,
|
|
338
|
+
session_id=session_id,
|
|
339
|
+
filename=filename,
|
|
340
|
+
artifact=Part(
|
|
341
|
+
inline_data=Blob(
|
|
342
|
+
display_name=filename,
|
|
343
|
+
data=read_file_to_bytes(image_tos_url),
|
|
344
|
+
mime_type=mimetypes.guess_type(image_tos_url)[0],
|
|
345
|
+
)
|
|
346
|
+
),
|
|
347
|
+
)
|
|
293
348
|
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
filename = f"artifact_{formatted_timestamp()}"
|
|
299
|
-
await artifact_service.save_artifact(
|
|
300
|
-
app_name=app_name,
|
|
301
|
-
user_id=user_id,
|
|
302
|
-
session_id=session_id,
|
|
303
|
-
filename=filename,
|
|
304
|
-
artifact=Part(
|
|
305
|
-
inline_data=Blob(
|
|
306
|
-
display_name=filename,
|
|
307
|
-
data=read_file_to_bytes(image_tos_url),
|
|
308
|
-
mime_type=mimetypes.guess_type(image_tos_url)[0],
|
|
309
|
-
)
|
|
310
|
-
),
|
|
311
|
-
)
|
|
312
|
-
return {
|
|
313
|
-
"status": "success",
|
|
314
|
-
"success_list": success_list,
|
|
315
|
-
"error_list": error_list,
|
|
316
|
-
}
|
|
349
|
+
logger.debug(
|
|
350
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
351
|
+
)
|
|
352
|
+
return {"status": "success", "success_list": success_list, "error_list": error_list}
|
|
317
353
|
|
|
318
354
|
|
|
319
355
|
def add_span_attributes(
|
|
@@ -97,9 +97,14 @@ async def image_edit(
|
|
|
97
97
|
- Provide the same `seed` for consistent outputs across runs.
|
|
98
98
|
- A high `guidance_scale` enforces stricter adherence to text prompt.
|
|
99
99
|
"""
|
|
100
|
+
logger.debug(
|
|
101
|
+
f"Using model: {getenv('MODEL_EDIT_NAME', DEFAULT_IMAGE_EDIT_MODEL_NAME)}"
|
|
102
|
+
)
|
|
100
103
|
success_list = []
|
|
101
104
|
error_list = []
|
|
105
|
+
logger.debug(f"image_edit params: {params}")
|
|
102
106
|
for idx, item in enumerate(params):
|
|
107
|
+
logger.debug(f"image_edit item {idx}: {item}")
|
|
103
108
|
image_name = item.get("image_name", f"generated_image_{idx}")
|
|
104
109
|
prompt = item.get("prompt")
|
|
105
110
|
origin_image = item.get("origin_image")
|
|
@@ -133,6 +138,7 @@ async def image_edit(
|
|
|
133
138
|
)
|
|
134
139
|
output_part = None
|
|
135
140
|
if response.data and len(response.data) > 0:
|
|
141
|
+
logger.debug(f"task {idx} Image edit response: {response}")
|
|
136
142
|
for item in response.data:
|
|
137
143
|
if response_format == "url":
|
|
138
144
|
image = item.url
|
|
@@ -167,7 +173,9 @@ async def image_edit(
|
|
|
167
173
|
continue
|
|
168
174
|
|
|
169
175
|
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
170
|
-
|
|
176
|
+
logger.debug(
|
|
177
|
+
f"Image {image_name} generated successfully: {image}"
|
|
178
|
+
)
|
|
171
179
|
success_list.append({image_name: image})
|
|
172
180
|
else:
|
|
173
181
|
error_details = f"No images returned by Doubao model: {response}"
|
|
@@ -196,12 +204,18 @@ async def image_edit(
|
|
|
196
204
|
error_list.append(image_name)
|
|
197
205
|
|
|
198
206
|
if len(success_list) == 0:
|
|
207
|
+
logger.debug(
|
|
208
|
+
f"image_edit success_list: {success_list}\nerror_list: {error_list}"
|
|
209
|
+
)
|
|
199
210
|
return {
|
|
200
211
|
"status": "error",
|
|
201
212
|
"success_list": success_list,
|
|
202
213
|
"error_list": error_list,
|
|
203
214
|
}
|
|
204
215
|
else:
|
|
216
|
+
logger.debug(
|
|
217
|
+
f"image_edit success_list: {success_list}\nerror_list: {error_list}"
|
|
218
|
+
)
|
|
205
219
|
return {
|
|
206
220
|
"status": "success",
|
|
207
221
|
"success_list": success_list,
|
|
@@ -98,9 +98,14 @@ async def image_generate(
|
|
|
98
98
|
- Use a fixed `seed` for reproducibility.
|
|
99
99
|
- Choose appropriate `size` for desired aspect ratio.
|
|
100
100
|
"""
|
|
101
|
+
logger.debug(
|
|
102
|
+
f"Using model: {getenv('MODEL_IMAGE_NAME', DEFAULT_TEXT_TO_IMAGE_MODEL_NAME)}"
|
|
103
|
+
)
|
|
101
104
|
success_list = []
|
|
102
105
|
error_list = []
|
|
106
|
+
logger.debug(f"image_generate params: {params}")
|
|
103
107
|
for idx, item in enumerate(params):
|
|
108
|
+
logger.debug(f"image_generate item {idx}: {item}")
|
|
104
109
|
prompt = item.get("prompt", "")
|
|
105
110
|
image_name = item.get("image_name", f"generated_image_{idx}")
|
|
106
111
|
response_format = item.get("response_format", "url")
|
|
@@ -130,6 +135,7 @@ async def image_generate(
|
|
|
130
135
|
)
|
|
131
136
|
output_part = None
|
|
132
137
|
if response.data and len(response.data) > 0:
|
|
138
|
+
logger.debug(f"task {idx} Image generate response: {response}")
|
|
133
139
|
for item in response.data:
|
|
134
140
|
if response_format == "url":
|
|
135
141
|
image = item.url
|
|
@@ -164,7 +170,9 @@ async def image_generate(
|
|
|
164
170
|
continue
|
|
165
171
|
|
|
166
172
|
logger.debug(f"Image saved as ADK artifact: {image_name}")
|
|
167
|
-
|
|
173
|
+
logger.debug(
|
|
174
|
+
f"Image {image_name} generated successfully: {image}"
|
|
175
|
+
)
|
|
168
176
|
success_list.append({image_name: image})
|
|
169
177
|
else:
|
|
170
178
|
error_details = f"No images returned by Doubao model: {response}"
|
|
@@ -192,12 +200,18 @@ async def image_generate(
|
|
|
192
200
|
error_list.append(image_name)
|
|
193
201
|
|
|
194
202
|
if len(success_list) == 0:
|
|
203
|
+
logger.debug(
|
|
204
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
205
|
+
)
|
|
195
206
|
return {
|
|
196
207
|
"status": "error",
|
|
197
208
|
"success_list": success_list,
|
|
198
209
|
"error_list": error_list,
|
|
199
210
|
}
|
|
200
211
|
else:
|
|
212
|
+
logger.debug(
|
|
213
|
+
f"image_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
214
|
+
)
|
|
201
215
|
return {
|
|
202
216
|
"status": "success",
|
|
203
217
|
"success_list": success_list,
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
|
|
17
|
+
from google.adk.tools import ToolContext
|
|
18
|
+
|
|
19
|
+
from veadk.config import getenv
|
|
20
|
+
from veadk.utils.logger import get_logger
|
|
21
|
+
from veadk.utils.volcengine_sign import ve_request
|
|
22
|
+
|
|
23
|
+
logger = get_logger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def run_code(code: str, language: str, tool_context: ToolContext) -> str:
|
|
27
|
+
"""Run code in a code sandbox and return the output.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
code (str): The code to run.
|
|
31
|
+
language (str): The programming language of the code. Language must be one of the supported languages: python3.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
str: The output of the code execution.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
tool_id = getenv("AGENTKIT_TOOL_ID")
|
|
38
|
+
host = getenv("AGENTKIT_TOOL_HOST") # temporary host for code run tool
|
|
39
|
+
service = getenv(
|
|
40
|
+
"AGENTKIT_TOOL_SERVICE_CODE"
|
|
41
|
+
) # temporary service for code run tool
|
|
42
|
+
region = getenv("AGENTKIT_TOOL_REGION", "cn-beijing")
|
|
43
|
+
|
|
44
|
+
session_id = tool_context._invocation_context.session.id
|
|
45
|
+
|
|
46
|
+
logger.debug(
|
|
47
|
+
f"Running code in language: {language}, session_id={session_id}, code={code}, tool_id={tool_id}, host={host}, service={service}, region={region}"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
access_key = getenv("VOLCENGINE_ACCESS_KEY")
|
|
51
|
+
secret_key = getenv("VOLCENGINE_SECRET_KEY")
|
|
52
|
+
|
|
53
|
+
res = ve_request(
|
|
54
|
+
request_body={
|
|
55
|
+
"ToolId": tool_id,
|
|
56
|
+
"UserSessionId": session_id,
|
|
57
|
+
"OperationType": "RunCode",
|
|
58
|
+
"OperationPayload": json.dumps(
|
|
59
|
+
{
|
|
60
|
+
"code": code,
|
|
61
|
+
"timeout": 30,
|
|
62
|
+
"kernel_name": language,
|
|
63
|
+
}
|
|
64
|
+
),
|
|
65
|
+
},
|
|
66
|
+
action="InvokeTool",
|
|
67
|
+
ak=access_key,
|
|
68
|
+
sk=secret_key,
|
|
69
|
+
service=service,
|
|
70
|
+
version="2025-10-30",
|
|
71
|
+
region=region,
|
|
72
|
+
host=host,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
logger.debug(f"Invoke run code response: {res}")
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
return res["Result"]["Result"]
|
|
79
|
+
except KeyError as e:
|
|
80
|
+
logger.error(f"Error occurred while running code: {e}, response is {res}")
|
|
81
|
+
return res
|
|
@@ -43,7 +43,6 @@ client = Ark(
|
|
|
43
43
|
async def generate(prompt, first_frame_image=None, last_frame_image=None):
|
|
44
44
|
try:
|
|
45
45
|
if first_frame_image is None:
|
|
46
|
-
logger.debug("text generation")
|
|
47
46
|
response = client.content_generation.tasks.create(
|
|
48
47
|
model=getenv("MODEL_VIDEO_NAME", DEFAULT_VIDEO_MODEL_NAME),
|
|
49
48
|
content=[
|
|
@@ -51,7 +50,6 @@ async def generate(prompt, first_frame_image=None, last_frame_image=None):
|
|
|
51
50
|
],
|
|
52
51
|
)
|
|
53
52
|
elif last_frame_image is None:
|
|
54
|
-
logger.debug("first frame generation")
|
|
55
53
|
response = client.content_generation.tasks.create(
|
|
56
54
|
model=getenv("MODEL_VIDEO_NAME", DEFAULT_VIDEO_MODEL_NAME),
|
|
57
55
|
content=cast(
|
|
@@ -66,7 +64,6 @@ async def generate(prompt, first_frame_image=None, last_frame_image=None):
|
|
|
66
64
|
),
|
|
67
65
|
)
|
|
68
66
|
else:
|
|
69
|
-
logger.debug("last frame generation")
|
|
70
67
|
response = client.content_generation.tasks.create(
|
|
71
68
|
model=getenv("MODEL_VIDEO_NAME", DEFAULT_VIDEO_MODEL_NAME),
|
|
72
69
|
content=[
|
|
@@ -197,9 +194,13 @@ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
|
197
194
|
batch_size = 10
|
|
198
195
|
success_list = []
|
|
199
196
|
error_list = []
|
|
197
|
+
logger.debug(f"Using model: {getenv('MODEL_VIDEO_NAME', DEFAULT_VIDEO_MODEL_NAME)}")
|
|
198
|
+
logger.debug(f"video_generate params: {params}")
|
|
200
199
|
|
|
201
200
|
for start_idx in range(0, len(params), batch_size):
|
|
202
201
|
batch = params[start_idx : start_idx + batch_size]
|
|
202
|
+
logger.debug(f"video_generate batch {start_idx // batch_size}: {batch}")
|
|
203
|
+
|
|
203
204
|
task_dict = {}
|
|
204
205
|
tracer = trace.get_tracer("gcp.vertex.agent")
|
|
205
206
|
with tracer.start_as_current_span("call_llm") as span:
|
|
@@ -216,15 +217,30 @@ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
|
216
217
|
last_frame = item.get("last_frame", None)
|
|
217
218
|
try:
|
|
218
219
|
if not first_frame:
|
|
220
|
+
logger.debug(
|
|
221
|
+
f"video_generate task_{idx} text generation: prompt={prompt}"
|
|
222
|
+
)
|
|
219
223
|
response = await generate(prompt)
|
|
220
224
|
elif not last_frame:
|
|
225
|
+
logger.debug(
|
|
226
|
+
f"video_generate task_{idx} first frame generation: prompt={prompt}, first_frame={first_frame}"
|
|
227
|
+
)
|
|
221
228
|
response = await generate(prompt, first_frame)
|
|
222
229
|
else:
|
|
230
|
+
logger.debug(
|
|
231
|
+
f"video_generate task_{idx} first and last frame generation: prompt={prompt}, first_frame={first_frame}, last_frame={last_frame}"
|
|
232
|
+
)
|
|
223
233
|
response = await generate(prompt, first_frame, last_frame)
|
|
234
|
+
logger.debug(
|
|
235
|
+
f"batch_{start_idx // batch_size} video_generate task_{idx} response: {response}"
|
|
236
|
+
)
|
|
224
237
|
task_dict[response.id] = video_name
|
|
225
238
|
except Exception as e:
|
|
226
239
|
logger.error(f"Error: {e}")
|
|
227
240
|
error_list.append(video_name)
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
logger.debug("begin query video_generate task status...")
|
|
228
244
|
|
|
229
245
|
while True:
|
|
230
246
|
task_list = list(task_dict.keys())
|
|
@@ -234,7 +250,9 @@ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
|
234
250
|
result = client.content_generation.tasks.get(task_id=task_id)
|
|
235
251
|
status = result.status
|
|
236
252
|
if status == "succeeded":
|
|
237
|
-
logger.debug(
|
|
253
|
+
logger.debug(
|
|
254
|
+
f"{task_dict[task_id]} video_generate {status}. Video URL: {result.content.video_url}"
|
|
255
|
+
)
|
|
238
256
|
tool_context.state[f"{task_dict[task_id]}_video_url"] = (
|
|
239
257
|
result.content.video_url
|
|
240
258
|
)
|
|
@@ -248,13 +266,14 @@ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
|
248
266
|
)
|
|
249
267
|
task_dict.pop(task_id, None)
|
|
250
268
|
elif status == "failed":
|
|
251
|
-
logger.error(
|
|
252
|
-
|
|
269
|
+
logger.error(
|
|
270
|
+
f"{task_dict[task_id]} video_generate {status}. Error: {result.error}"
|
|
271
|
+
)
|
|
253
272
|
error_list.append(task_dict[task_id])
|
|
254
273
|
task_dict.pop(task_id, None)
|
|
255
274
|
else:
|
|
256
275
|
logger.debug(
|
|
257
|
-
f"
|
|
276
|
+
f"{task_dict[task_id]} video_generate current status: {status}, Retrying after 10 seconds..."
|
|
258
277
|
)
|
|
259
278
|
time.sleep(10)
|
|
260
279
|
|
|
@@ -270,12 +289,18 @@ async def video_generate(params: list, tool_context: ToolContext) -> Dict:
|
|
|
270
289
|
)
|
|
271
290
|
|
|
272
291
|
if len(success_list) == 0:
|
|
292
|
+
logger.debug(
|
|
293
|
+
f"video_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
294
|
+
)
|
|
273
295
|
return {
|
|
274
296
|
"status": "error",
|
|
275
297
|
"success_list": success_list,
|
|
276
298
|
"error_list": error_list,
|
|
277
299
|
}
|
|
278
300
|
else:
|
|
301
|
+
logger.debug(
|
|
302
|
+
f"video_generate success_list: {success_list}\nerror_list: {error_list}"
|
|
303
|
+
)
|
|
279
304
|
return {
|
|
280
305
|
"status": "success",
|
|
281
306
|
"success_list": success_list,
|
veadk/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: veadk-python
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.13
|
|
4
4
|
Summary: Volcengine agent development kit, integrations with Volcengine cloud services.
|
|
5
5
|
Author-email: Yaozheng Fang <fangyozheng@gmail.com>, Guodong Li <cu.eric.lee@gmail.com>, Zhi Han <sliverydayday@gmail.com>, Meng Wang <mengwangwm@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -5,7 +5,7 @@ veadk/config.py,sha256=Ezl9Lna9iriC_Uf7m1ZXTWzylLyd7YspUFAQqh94Ong,3203
|
|
|
5
5
|
veadk/consts.py,sha256=LTl4NQYJf7C8EvconGa96NRschzZZmmOqxlEikJe2Nk,2831
|
|
6
6
|
veadk/runner.py,sha256=_DGNwX-t3sHJFJvHs-rRHXbjCZza8I_zU8AN3Fw5nRY,14217
|
|
7
7
|
veadk/types.py,sha256=zOOzG-QJy-MkzHeicWJzy2_L5U4ERrWziPubIUEbd8c,1656
|
|
8
|
-
veadk/version.py,sha256=
|
|
8
|
+
veadk/version.py,sha256=verJVWp2IHUGhnn0kH5_-upx08hfZiViE_fI27Xhh4A,654
|
|
9
9
|
veadk/a2a/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
10
10
|
veadk/a2a/agent_card.py,sha256=lhtgW1acMpxYUdULHEZwVFXOi6Xh4lNkf4S7QIhbFFI,1525
|
|
11
11
|
veadk/a2a/remote_ve_agent.py,sha256=L2nzT8PlDI-lLtcaTJqk-D2Uvw9beKl8OEUqp-8qCbA,3510
|
|
@@ -28,14 +28,15 @@ veadk/auth/veauth/prompt_pilot_veauth.py,sha256=cls1LK2Un4cOMfHdaAqRhDHIXuk7cTuA
|
|
|
28
28
|
veadk/auth/veauth/utils.py,sha256=cVEKWQZeX5fzx3JLB1odv59D8lhOAF1Pb3rsgO6evmM,2152
|
|
29
29
|
veadk/auth/veauth/vesearch_veauth.py,sha256=rgup3VBbRSLligrsDFOEwpneq1BEtFwf9xpgNFWHKqc,2008
|
|
30
30
|
veadk/cli/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
31
|
-
veadk/cli/cli.py,sha256
|
|
31
|
+
veadk/cli/cli.py,sha256=-IGJKG9bGZpZXdLirOHv0LlpN9Vo42qzeEyNnZKsKv8,1430
|
|
32
32
|
veadk/cli/cli_deploy.py,sha256=-P4PmXLGByypXGksshBT7BQ0U42hIvlHibXd_k4YfhQ,5328
|
|
33
33
|
veadk/cli/cli_eval.py,sha256=TVSraCTyTxo_pLu5fhtk3TiZUOZkN3G2BLam1ybFXBc,5446
|
|
34
34
|
veadk/cli/cli_init.py,sha256=f2A3RwUj9pApmUTl6FHmMwTTwyKl83pkvZRorTgl-XM,3982
|
|
35
35
|
veadk/cli/cli_kb.py,sha256=SmLz3g6o2LiPa6WzkdyAOExuboHkpAIrN-4qaH4rxn8,1962
|
|
36
36
|
veadk/cli/cli_pipeline.py,sha256=6FV4WyoapFPAy_P3dzrRm07m6aGjrtLiY4aCFT7CEHs,7510
|
|
37
37
|
veadk/cli/cli_prompt.py,sha256=atw6O3zkjD1tOsFOOg7rs9HbS4exwaNe_Pces6CoyFY,2582
|
|
38
|
-
veadk/cli/
|
|
38
|
+
veadk/cli/cli_uploadevalset.py,sha256=RdelvbXEBalXGxHnPJ-8ZQ1PRiex39328yhAWgZ5mAI,4342
|
|
39
|
+
veadk/cli/cli_web.py,sha256=wPuLoG57s3E3GOQQ7wnDJ923LgIEG4pcO_pNjkd9ynE,6732
|
|
39
40
|
veadk/cloud/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
40
41
|
veadk/cloud/cloud_agent_engine.py,sha256=u-v-kkAhRgZY1r82CQRwfkYnj0n7ft8qIW_r-yhnMSI,8461
|
|
41
42
|
veadk/cloud/cloud_app.py,sha256=2bmEf7RH1Kwz8HLZ0aY3pVn0R8xi1T7kcGRTRyaWawY,8746
|
|
@@ -104,7 +105,7 @@ veadk/integrations/ve_prompt_pilot/ve_prompt_pilot.py,sha256=Wp16pejFObnfudzNv1A
|
|
|
104
105
|
veadk/integrations/ve_tls/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
105
106
|
veadk/integrations/ve_tls/utils.py,sha256=DmWS37gSuKsUgiqlLyk1obfAAZZnEpkemQuoTDxAEG4,3769
|
|
106
107
|
veadk/integrations/ve_tls/ve_tls.py,sha256=zO8rSX0CqxcjKjosXI90ykNFO6dhpsIsPa9uZGrf50s,7253
|
|
107
|
-
veadk/integrations/ve_tos/ve_tos.py,sha256=
|
|
108
|
+
veadk/integrations/ve_tos/ve_tos.py,sha256=8tWb-eO7IK140VwjI-Bl4LNCeQNI9CgvKsr9gLXe5aA,25365
|
|
108
109
|
veadk/integrations/ve_viking_db_memory/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
109
110
|
veadk/integrations/ve_viking_db_memory/ve_viking_db_memory.py,sha256=gX5asox3VDra6o1BIg7AickPec0-Vj8_lgZ1zz5ue5Y,10165
|
|
110
111
|
veadk/knowledgebase/__init__.py,sha256=k-5WM0DAqNq94Dg64Rl12ozoS1BnE9i-MY_ZOR2s2CQ,705
|
|
@@ -115,10 +116,10 @@ veadk/knowledgebase/backends/base_backend.py,sha256=gqkJNIASc-ZrJ2e4enIeUvI9CWuc
|
|
|
115
116
|
veadk/knowledgebase/backends/in_memory_backend.py,sha256=uC4_LCkjp9DFQzTgTKzEr6yw1hcNUpZ0h7WSR0fpifc,3332
|
|
116
117
|
veadk/knowledgebase/backends/opensearch_backend.py,sha256=4oChkqd25RvL460p1-D7noxQIoZVlD03aB61pKREqNw,4953
|
|
117
118
|
veadk/knowledgebase/backends/redis_backend.py,sha256=pc-T_QicyAmOp6KbMz0V8iIL1ZlKCKQpdNB9SkaNqm8,5226
|
|
118
|
-
veadk/knowledgebase/backends/utils.py,sha256=
|
|
119
|
-
veadk/knowledgebase/backends/vikingdb_knowledge_backend.py,sha256=
|
|
119
|
+
veadk/knowledgebase/backends/utils.py,sha256=383La0fsmPcbJEpGWk54EP8p8QO1SP_ThqJPmRWa5Rs,2705
|
|
120
|
+
veadk/knowledgebase/backends/vikingdb_knowledge_backend.py,sha256=khk33XDDdeK1tb84lHtTAjtuZQLmubZFo3shyd70YLs,19245
|
|
120
121
|
veadk/memory/__init__.py,sha256=GRIoRqapS2gUtRMB9JZvodU29PwMxFe2KDjPnVpqib0,1252
|
|
121
|
-
veadk/memory/long_term_memory.py,sha256=
|
|
122
|
+
veadk/memory/long_term_memory.py,sha256=122uacyNsKkUIfsc5Vru8lMGsJKE0A7JJqddLaxX-ls,7838
|
|
122
123
|
veadk/memory/short_term_memory.py,sha256=MLR2Op_l_Zr-ZKCJ7pyNhe_OzPG06Zcb8pt8qA_zrFY,4922
|
|
123
124
|
veadk/memory/short_term_memory_processor.py,sha256=vqmYSUNFGXEogJ8ZxIAhSSrv66LGT8eEHCzxF9h07LA,3276
|
|
124
125
|
veadk/memory/long_term_memory_backends/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
@@ -127,7 +128,7 @@ veadk/memory/long_term_memory_backends/in_memory_backend.py,sha256=0IKRqstS3Rtm8
|
|
|
127
128
|
veadk/memory/long_term_memory_backends/mem0_backend.py,sha256=sj6OxMHA4oG6kuzTMK5zsGEIIa0-ESPrRYv6dR3KI8E,4869
|
|
128
129
|
veadk/memory/long_term_memory_backends/opensearch_backend.py,sha256=u5odPUv0fchH3IsZNTH0-nNpdW4KwkWojRQi3cn52Wo,4567
|
|
129
130
|
veadk/memory/long_term_memory_backends/redis_backend.py,sha256=0XX21AJA4UwEqrteXdgOr5WAfuo31wK1fMLUtXMssfg,5262
|
|
130
|
-
veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py,sha256=
|
|
131
|
+
veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py,sha256=TfAtQXiJ-iZLRKPTBMakQTwRoHdDsN0l2R5udNFJatY,6683
|
|
131
132
|
veadk/memory/short_term_memory_backends/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
132
133
|
veadk/memory/short_term_memory_backends/base_backend.py,sha256=5AtnWuFE_TYpTAlAKnm2xglf4AT3iXmU9QSF7LkO1BA,1053
|
|
133
134
|
veadk/memory/short_term_memory_backends/mysql_backend.py,sha256=_m-yuB7X-pPvs93-W8ZswDIkIpYpCFMRRdt9D5SZzn8,1513
|
|
@@ -142,15 +143,16 @@ veadk/tools/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
|
142
143
|
veadk/tools/demo_tools.py,sha256=Gu3sxygcYVS2cv3WqUOl-Gq4JhMlDAktoCHOFT0gbFQ,2216
|
|
143
144
|
veadk/tools/load_knowledgebase_tool.py,sha256=UUTv0Za9GkEXAkl1SXmyq0HGCKGvSlH_f8Ok6O6e52M,4704
|
|
144
145
|
veadk/tools/builtin_tools/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
145
|
-
veadk/tools/builtin_tools/generate_image.py,sha256=
|
|
146
|
-
veadk/tools/builtin_tools/image_edit.py,sha256=
|
|
147
|
-
veadk/tools/builtin_tools/image_generate.py,sha256
|
|
146
|
+
veadk/tools/builtin_tools/generate_image.py,sha256=aDMlR-IrxUMepIl9hPkywlH-4e7uIRiyFzLTtmezOnw,17495
|
|
147
|
+
veadk/tools/builtin_tools/image_edit.py,sha256=KslsuabBchAYR3ZrWSO5viEe5ORUAe0GI1qQ6mxoIU0,11588
|
|
148
|
+
veadk/tools/builtin_tools/image_generate.py,sha256=-L_3k3KRJ_arljLfKz946fbd9ppxTDNvJmkNGhgj6qQ,11357
|
|
148
149
|
veadk/tools/builtin_tools/lark.py,sha256=b2IWsN8fZFh9aweSGznaOqA30TCOLpVjNCDNa1LHZl4,2046
|
|
149
150
|
veadk/tools/builtin_tools/las.py,sha256=rgKfnK5GsHVbmkp-rc7rtCvWg-yYNxMjeV0ayCyRpjM,913
|
|
150
151
|
veadk/tools/builtin_tools/load_knowledgebase.py,sha256=Xqtq25DL720goRegCVmmkpH2Ye2VWLcrF5ncC37gK_Y,3427
|
|
151
152
|
veadk/tools/builtin_tools/mcp_router.py,sha256=l3xcIHAHQ0AGCZG3mYyhwM0btqEMDe4TY2S-UYUM8M0,883
|
|
153
|
+
veadk/tools/builtin_tools/run_code.py,sha256=C2CRMNZYgKoyVpe4JoGtUa-Xfb48IuyNIHzF5WKJfxw,2623
|
|
152
154
|
veadk/tools/builtin_tools/vesearch.py,sha256=prPP0w6lYeIEPwuZdmV00RAzaW4MeH8lYtK-NluaXtU,1748
|
|
153
|
-
veadk/tools/builtin_tools/video_generate.py,sha256=
|
|
155
|
+
veadk/tools/builtin_tools/video_generate.py,sha256=hlvwoLESUV8vOPiNFVNPF0ithWqH7N5c6ElMvyI-lBM,16101
|
|
154
156
|
veadk/tools/builtin_tools/web_scraper.py,sha256=iVnxWVf2mVgOnEOeQ6Bg5ATYN-g1ZPCTK6VJm710be0,2408
|
|
155
157
|
veadk/tools/builtin_tools/web_search.py,sha256=1DtdhlcxolzIvx2TyqvUNWk60BHMuLkcru8Wlw3XBtQ,6016
|
|
156
158
|
veadk/tools/sandbox/__init__.py,sha256=pkSabKw7_ai4NOo56pXKL40EcaxIDh6HYxPXOY7qWbo,634
|
|
@@ -179,9 +181,9 @@ veadk/utils/mcp_utils.py,sha256=aET7pX3LXmRe2-Jh7_xRvxrVyl1dN7uPAUk16luwMlQ,1525
|
|
|
179
181
|
veadk/utils/misc.py,sha256=ghEqrqoDfKrW9ZD3IB0bwcfyyB0gRWN2yEP9eRxQ4nE,4953
|
|
180
182
|
veadk/utils/patches.py,sha256=dcHdlJ8IciyMjDuMy6-_6McUqJYyLz0yHmJ0xH8lWOw,2752
|
|
181
183
|
veadk/utils/volcengine_sign.py,sha256=3xn6ca2OAg_AFyP2dqFTSioqkeDel_BoKURUtCcO-EQ,6736
|
|
182
|
-
veadk_python-0.2.
|
|
183
|
-
veadk_python-0.2.
|
|
184
|
-
veadk_python-0.2.
|
|
185
|
-
veadk_python-0.2.
|
|
186
|
-
veadk_python-0.2.
|
|
187
|
-
veadk_python-0.2.
|
|
184
|
+
veadk_python-0.2.13.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
185
|
+
veadk_python-0.2.13.dist-info/METADATA,sha256=xonF3Utpt7sSjF-2uewm1laDYbSrgbDeiX9dEqwb_pM,18428
|
|
186
|
+
veadk_python-0.2.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
187
|
+
veadk_python-0.2.13.dist-info/entry_points.txt,sha256=-g28D6dNV-2UvAiRP9VF0oOVSDSJ5zlLUIZ34ArAqF8,46
|
|
188
|
+
veadk_python-0.2.13.dist-info/top_level.txt,sha256=Qqi3ycJ4anKiZWBXtUBIy8zK9ZuXJsFa05oFq8O8qqY,6
|
|
189
|
+
veadk_python-0.2.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|