intellif-aihub 0.1.25__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of intellif-aihub might be problematic. Click here for more details.
- aihub/__init__.py +1 -1
- aihub/models/dataset_management.py +10 -1
- aihub/models/eval.py +50 -2
- aihub/services/dataset_management.py +40 -16
- aihub/services/eval.py +163 -6
- aihub/services/model_center.py +3 -4
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/METADATA +12 -1
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/RECORD +12 -12
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/WHEEL +0 -0
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/entry_points.txt +0 -0
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/licenses/LICENSE +0 -0
- {intellif_aihub-0.1.25.dist-info → intellif_aihub-0.1.28.dist-info}/top_level.txt +0 -0
aihub/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.28"
|
|
@@ -152,6 +152,7 @@ class FileUploadData(BaseModel):
|
|
|
152
152
|
|
|
153
153
|
class ListDatasetReq(BaseModel):
|
|
154
154
|
"""列表查询数据集请求(使用 dataset_management v2)"""
|
|
155
|
+
|
|
155
156
|
page_size: int = Field(20, alias="page_size", description="每页大小,默认20")
|
|
156
157
|
page_num: int = Field(1, alias="page_num", description="页码,从1开始")
|
|
157
158
|
name: Optional[str] = Field(None, description="数据集名称筛选")
|
|
@@ -162,6 +163,7 @@ class ListDatasetReq(BaseModel):
|
|
|
162
163
|
|
|
163
164
|
class ListDatasetItem(BaseModel):
|
|
164
165
|
"""列表数据集项"""
|
|
166
|
+
|
|
165
167
|
id: int = Field(description="数据集ID")
|
|
166
168
|
name: str = Field(description="数据集名称")
|
|
167
169
|
description: str = Field(description="数据集描述")
|
|
@@ -177,6 +179,7 @@ class ListDatasetItem(BaseModel):
|
|
|
177
179
|
|
|
178
180
|
class ListDatasetResp(BaseModel):
|
|
179
181
|
"""列表查询数据集响应"""
|
|
182
|
+
|
|
180
183
|
total: int = Field(description="总数")
|
|
181
184
|
page_size: int = Field(alias="page_size", description="每页大小")
|
|
182
185
|
page_num: int = Field(alias="page_num", description="当前页码")
|
|
@@ -185,14 +188,18 @@ class ListDatasetResp(BaseModel):
|
|
|
185
188
|
|
|
186
189
|
class ListDatasetVersionReq(BaseModel):
|
|
187
190
|
"""列表查询数据集版本请求(使用 dataset_management v2)"""
|
|
191
|
+
|
|
188
192
|
page_size: int = Field(10000000, alias="page_size", description="每页大小,默认10000000")
|
|
189
193
|
page_num: int = Field(1, alias="page_num", description="页码,从1开始")
|
|
190
194
|
dataset_id: Optional[int] = Field(None, alias="dataset_id", description="数据集ID筛选")
|
|
191
|
-
dataset_version_ids: Optional[str] = Field(
|
|
195
|
+
dataset_version_ids: Optional[str] = Field(
|
|
196
|
+
None, alias="dataset_version_ids", description="数据集版本ID列表,逗号分隔"
|
|
197
|
+
)
|
|
192
198
|
|
|
193
199
|
|
|
194
200
|
class ListDatasetVersionItem(BaseModel):
|
|
195
201
|
"""列表数据集版本项"""
|
|
202
|
+
|
|
196
203
|
id: int = Field(description="版本ID")
|
|
197
204
|
version: int = Field(description="版本号")
|
|
198
205
|
dataset_id: int = Field(alias="dataset_id", description="数据集ID")
|
|
@@ -212,6 +219,7 @@ class ListDatasetVersionItem(BaseModel):
|
|
|
212
219
|
|
|
213
220
|
class ListDatasetVersionResp(BaseModel):
|
|
214
221
|
"""列表查询数据集版本响应"""
|
|
222
|
+
|
|
215
223
|
total: int = Field(description="总数")
|
|
216
224
|
page_size: int = Field(alias="page_size", description="每页大小")
|
|
217
225
|
page_num: int = Field(alias="page_num", description="当前页码")
|
|
@@ -226,3 +234,4 @@ class CreateDatasetVersionByDataIngestReqV2(BaseModel):
|
|
|
226
234
|
s3_object_sheet: str = Field(..., description="S3对象表")
|
|
227
235
|
object_cnt: Optional[int] = Field(None, description="对象数量")
|
|
228
236
|
data_size: Optional[int] = Field(None, description="数据大小")
|
|
237
|
+
user_upload_data_path: Optional[str] = Field(None, description="用户上传数据路径")
|
aihub/models/eval.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# !/usr/bin/env python
|
|
2
2
|
# -*-coding:utf-8 -*-
|
|
3
3
|
from enum import Enum
|
|
4
|
-
from typing import Dict, List, Optional
|
|
4
|
+
from typing import Dict, List, Optional, Any
|
|
5
5
|
|
|
6
6
|
from pydantic import BaseModel, Field
|
|
7
7
|
|
|
@@ -46,6 +46,38 @@ class CreateCVEvalReq(BaseEvalReq):
|
|
|
46
46
|
model_config = {"use_enum_values": True}
|
|
47
47
|
|
|
48
48
|
|
|
49
|
+
class MetricsArtifact(BaseModel):
|
|
50
|
+
"""指标产物配置"""
|
|
51
|
+
|
|
52
|
+
MetricVizConfigID: int = Field(description="指标可视化配置ID")
|
|
53
|
+
MetricArtifactPath: str = Field(description="指标产物路径")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class FaceReidConfig(BaseModel):
|
|
57
|
+
"""人脸检索配置"""
|
|
58
|
+
|
|
59
|
+
gallery_dataset_id: int = Field(description="底库数据集ID")
|
|
60
|
+
gallery_dataset_version_id: int = Field(description="底库数据集版本ID")
|
|
61
|
+
query_dataset_id: int = Field(description="查询数据集ID")
|
|
62
|
+
query_dataset_version_id: int = Field(description="查询数据集版本ID")
|
|
63
|
+
id_dataset_id: int = Field(description="ID数据集ID")
|
|
64
|
+
id_dataset_version_id: int = Field(description="ID数据集版本ID")
|
|
65
|
+
metrics_viz_artifacts: List[MetricsArtifact] = Field(description="指标可视化产物列表")
|
|
66
|
+
search_result_artifact_path: str = Field(description="搜索结果产物路径")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class CreateFaceReidEvalReq(BaseEvalReq):
|
|
70
|
+
"""创建人脸检索类型评测任务请求"""
|
|
71
|
+
|
|
72
|
+
type: str = Field(default="face", description="评测类型,固定为 'face'")
|
|
73
|
+
model_id: int = Field(description="模型ID")
|
|
74
|
+
face_reid_config: FaceReidConfig = Field(description="人脸检索配置")
|
|
75
|
+
metrics_artifact_path: str = Field(description="指标产物路径")
|
|
76
|
+
is_public: bool = Field(default=False, description="是否公开")
|
|
77
|
+
client_type: ClientType = Field(default=ClientType.Workflow, description="客户端类型")
|
|
78
|
+
model_config = {"use_enum_values": True}
|
|
79
|
+
|
|
80
|
+
|
|
49
81
|
class EvalRun(BaseModel):
|
|
50
82
|
"""评测任务的运行实体"""
|
|
51
83
|
|
|
@@ -64,7 +96,7 @@ class EvalRun(BaseModel):
|
|
|
64
96
|
run_id: str = Field(description="运行ID")
|
|
65
97
|
dataset_summary: Dict = Field(default_factory=dict, description="数据集摘要")
|
|
66
98
|
metrics_summary: Dict = Field(default_factory=dict, description="指标摘要")
|
|
67
|
-
viz_summary: Dict = Field(default_factory=dict, description="可视化摘要")
|
|
99
|
+
viz_summary: Optional[Dict] = Field(default_factory=dict, description="可视化摘要")
|
|
68
100
|
eval_config: Optional[Dict] = Field(default=None, description="评测配置")
|
|
69
101
|
created_at: int = Field(description="创建时间")
|
|
70
102
|
updated_at: int = Field(description="更新时间")
|
|
@@ -110,3 +142,19 @@ class GrantPermissionReq(BaseModel):
|
|
|
110
142
|
"""授权权限请求"""
|
|
111
143
|
|
|
112
144
|
user_ids: list[int] = Field(description="用户ID数组")
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class CreatePerformanceEvalReq(BaseModel):
|
|
148
|
+
"""创建CV类型评测任务请求"""
|
|
149
|
+
|
|
150
|
+
Name: str = Field(description="评测名称")
|
|
151
|
+
type: str = Field(default="performance", description="评测类型,固定为 'cv'")
|
|
152
|
+
is_public: bool = Field(default=False, description="是否公开")
|
|
153
|
+
client_type: ClientType = Field(default=ClientType.Workflow, description="客户端类型")
|
|
154
|
+
model_config = {"use_enum_values": True}
|
|
155
|
+
# PerformanceArtifactPath
|
|
156
|
+
performance_artifact_path: str = Field(description="性能产物路径")
|
|
157
|
+
report: Dict = Field(description="评测报告")
|
|
158
|
+
run_id: str = Field(description="运行ID")
|
|
159
|
+
model_id: int = Field(description="模型ID")
|
|
160
|
+
eval_config: Dict[str, Any] = Field(description="评测配置")
|
|
@@ -19,9 +19,11 @@ import os
|
|
|
19
19
|
import pathlib
|
|
20
20
|
import time
|
|
21
21
|
import uuid
|
|
22
|
+
from pathlib import Path
|
|
22
23
|
|
|
23
24
|
import httpx
|
|
24
25
|
from loguru import logger
|
|
26
|
+
from minio import Minio
|
|
25
27
|
|
|
26
28
|
from ..exceptions import APIError
|
|
27
29
|
from ..models.artifact import StsResp
|
|
@@ -46,6 +48,7 @@ from ..models.dataset_management import (
|
|
|
46
48
|
from ..models.dataset_management import DatasetVersionStatus
|
|
47
49
|
from ..utils.di import SimpleS3Client, DataUploader
|
|
48
50
|
from ..utils.download import dataset_download
|
|
51
|
+
from ..utils.s3 import upload_dir_to_s3
|
|
49
52
|
|
|
50
53
|
_BASE = "/dataset-mng/api/v2"
|
|
51
54
|
|
|
@@ -163,6 +166,7 @@ class DatasetManagementService:
|
|
|
163
166
|
local_file_path: str | None = None,
|
|
164
167
|
server_file_path: str | None = None,
|
|
165
168
|
version_description: str = "",
|
|
169
|
+
user_upload_data_path: str | None = None,
|
|
166
170
|
timeout: int = 1_800,
|
|
167
171
|
) -> tuple[int, int, str]:
|
|
168
172
|
"""创建数据集及其版本,并等待版本状态变为 *Success*。
|
|
@@ -178,6 +182,7 @@ class DatasetManagementService:
|
|
|
178
182
|
server_file_path: 服务器已有文件路径,当 is_local_upload=False 时必须提供。
|
|
179
183
|
version_description: 版本描述,默认为空。
|
|
180
184
|
timeout: 最大等待秒数(默认1800s)。超过后仍未成功则引发 ``TimeoutError``。
|
|
185
|
+
user_upload_data_path: 用户本体数据的存储地址
|
|
181
186
|
|
|
182
187
|
Returns:
|
|
183
188
|
tuple[int, int, str]: 一个三元组,包含:[数据集 ID,数据集版本 ID, 数据集版本标签(格式为 <dataset_name>/V<version_number>)]
|
|
@@ -201,6 +206,7 @@ class DatasetManagementService:
|
|
|
201
206
|
local_file_path=local_file_path,
|
|
202
207
|
server_file_path=server_file_path,
|
|
203
208
|
version_description=version_description,
|
|
209
|
+
user_upload_data_path=user_upload_data_path,
|
|
204
210
|
)
|
|
205
211
|
|
|
206
212
|
# 获取版本标签
|
|
@@ -244,25 +250,30 @@ class DatasetManagementService:
|
|
|
244
250
|
local_file_path: str | None,
|
|
245
251
|
server_file_path: str | None,
|
|
246
252
|
version_description: str,
|
|
253
|
+
user_upload_data_path: str | None,
|
|
247
254
|
) -> int:
|
|
248
255
|
"""根据上传类型创建数据集版本"""
|
|
249
256
|
if is_local_upload:
|
|
250
|
-
return self._create_local_dataset_version(
|
|
257
|
+
return self._create_local_dataset_version(
|
|
258
|
+
dataset_id, local_file_path, version_description, user_upload_data_path
|
|
259
|
+
)
|
|
251
260
|
else:
|
|
252
261
|
return self._create_server_dataset_version(dataset_id, server_file_path, version_description)
|
|
253
262
|
|
|
254
263
|
def _create_local_dataset_version(
|
|
255
|
-
self, dataset_id: int, local_file_path: str | None, version_description: str
|
|
264
|
+
self, dataset_id: int, local_file_path: str | None, version_description: str, user_upload_data_path: str | None
|
|
256
265
|
) -> int:
|
|
257
266
|
"""创建本地文件数据集版本"""
|
|
258
267
|
if pathlib.Path(local_file_path).is_dir():
|
|
259
|
-
return self._create_local_dir_dataset_version(dataset_id, local_file_path)
|
|
268
|
+
return self._create_local_dir_dataset_version(dataset_id, local_file_path, user_upload_data_path)
|
|
260
269
|
elif pathlib.Path(local_file_path).is_file():
|
|
261
270
|
return self._create_local_file_dataset_version(dataset_id, local_file_path, version_description)
|
|
262
271
|
else:
|
|
263
272
|
raise ValueError(f"本地路径既不是文件也不是目录: {local_file_path}")
|
|
264
273
|
|
|
265
|
-
def _create_local_dir_dataset_version(
|
|
274
|
+
def _create_local_dir_dataset_version(
|
|
275
|
+
self, dataset_id: int, local_file_path: str, user_upload_data_path: str
|
|
276
|
+
) -> int:
|
|
266
277
|
"""处理本地目录上传"""
|
|
267
278
|
sts = self._get_sts()
|
|
268
279
|
s3_client = SimpleS3Client(
|
|
@@ -290,6 +301,7 @@ class DatasetManagementService:
|
|
|
290
301
|
s3_object_sheet=s3_csv_path,
|
|
291
302
|
object_cnt=upload_stats.uploaded_count,
|
|
292
303
|
data_size=upload_stats.uploaded_size,
|
|
304
|
+
user_upload_data_path=user_upload_data_path,
|
|
293
305
|
)
|
|
294
306
|
return self.upload_by_data_ingest(req).id
|
|
295
307
|
|
|
@@ -386,7 +398,7 @@ class DatasetManagementService:
|
|
|
386
398
|
name: str | None = None,
|
|
387
399
|
tags: str | None = None,
|
|
388
400
|
create_by: int | None = None,
|
|
389
|
-
scope: str = "all"
|
|
401
|
+
scope: str = "all",
|
|
390
402
|
) -> ListDatasetResp:
|
|
391
403
|
"""列表查询数据集
|
|
392
404
|
|
|
@@ -402,12 +414,7 @@ class DatasetManagementService:
|
|
|
402
414
|
ListDatasetResp: 数据集列表响应,包含分页信息和数据集列表
|
|
403
415
|
"""
|
|
404
416
|
payload = ListDatasetReq(
|
|
405
|
-
page_size=page_size,
|
|
406
|
-
page_num=page_num,
|
|
407
|
-
name=name,
|
|
408
|
-
tags=tags,
|
|
409
|
-
create_by=create_by,
|
|
410
|
-
scope=scope
|
|
417
|
+
page_size=page_size, page_num=page_num, name=name, tags=tags, create_by=create_by, scope=scope
|
|
411
418
|
)
|
|
412
419
|
return self._dataset.list_datasets(payload)
|
|
413
420
|
|
|
@@ -417,7 +424,7 @@ class DatasetManagementService:
|
|
|
417
424
|
page_size: int = 10000000,
|
|
418
425
|
page_num: int = 1,
|
|
419
426
|
dataset_id: int | None = None,
|
|
420
|
-
dataset_version_ids: str | None = None
|
|
427
|
+
dataset_version_ids: str | None = None,
|
|
421
428
|
) -> ListDatasetVersionResp:
|
|
422
429
|
"""列表查询数据集版本
|
|
423
430
|
|
|
@@ -431,13 +438,30 @@ class DatasetManagementService:
|
|
|
431
438
|
ListDatasetVersionResp: 数据集版本列表响应,包含分页信息和数据集版本列表
|
|
432
439
|
"""
|
|
433
440
|
payload = ListDatasetVersionReq(
|
|
434
|
-
page_size=page_size,
|
|
435
|
-
page_num=page_num,
|
|
436
|
-
dataset_id=dataset_id,
|
|
437
|
-
dataset_version_ids=dataset_version_ids
|
|
441
|
+
page_size=page_size, page_num=page_num, dataset_id=dataset_id, dataset_version_ids=dataset_version_ids
|
|
438
442
|
)
|
|
439
443
|
return self._dataset_version.list_dataset_versions(payload)
|
|
440
444
|
|
|
445
|
+
def upload_data(self, local_path: str) -> str:
|
|
446
|
+
|
|
447
|
+
sts = self._get_sts()
|
|
448
|
+
|
|
449
|
+
s3_client = Minio(
|
|
450
|
+
endpoint=sts.endpoint,
|
|
451
|
+
access_key=sts.access_key_id,
|
|
452
|
+
secret_key=sts.secret_access_key,
|
|
453
|
+
session_token=sts.session_token,
|
|
454
|
+
secure=False,
|
|
455
|
+
)
|
|
456
|
+
s3_prefix = f"user_data/{uuid.uuid4().hex}"
|
|
457
|
+
if Path(local_path).is_file():
|
|
458
|
+
s3_key = f"{s3_prefix}/{Path(local_path).name}"
|
|
459
|
+
s3_client.fput_object(bucket_name=sts.bucket, object_name=s3_prefix, file_path=local_path)
|
|
460
|
+
return f"s3://{sts.bucket}/{s3_key}"
|
|
461
|
+
else:
|
|
462
|
+
upload_dir_to_s3(s3_client, local_path, sts.bucket, s3_prefix)
|
|
463
|
+
return f"s3://{sts.bucket}/{s3_prefix}"
|
|
464
|
+
|
|
441
465
|
|
|
442
466
|
class _Dataset:
|
|
443
467
|
def __init__(self, http: httpx.Client):
|
aihub/services/eval.py
CHANGED
|
@@ -2,25 +2,32 @@
|
|
|
2
2
|
# -*- coding:utf-8 -*-
|
|
3
3
|
"""评测平台服务模块
|
|
4
4
|
|
|
5
|
-
本模块围绕
|
|
5
|
+
本模块围绕 **"模型评测(Run → Report)"** 提供能力:
|
|
6
6
|
|
|
7
7
|
- **创建评测任务 / 评测报告**
|
|
8
8
|
- **获取评测任务列表**
|
|
9
9
|
"""
|
|
10
|
-
|
|
10
|
+
import os
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import List, Dict, Any, Optional
|
|
11
13
|
|
|
12
14
|
import httpx
|
|
13
15
|
|
|
16
|
+
from .model_center import ModelCenterService
|
|
14
17
|
from ..exceptions import APIError
|
|
15
18
|
from ..models.common import APIWrapper
|
|
16
19
|
from ..models.eval import (
|
|
17
20
|
CreateLLMEvalReq,
|
|
18
21
|
CreateCVEvalReq,
|
|
22
|
+
CreateFaceReidEvalReq,
|
|
19
23
|
CreateEvalResp,
|
|
20
24
|
ListEvalReq,
|
|
21
25
|
ListEvalResp,
|
|
22
26
|
GrantPermissionReq,
|
|
23
27
|
ClientType,
|
|
28
|
+
FaceReidConfig,
|
|
29
|
+
MetricsArtifact,
|
|
30
|
+
CreatePerformanceEvalReq,
|
|
24
31
|
)
|
|
25
32
|
|
|
26
33
|
_BASE = "/eval-platform/api/v1"
|
|
@@ -38,7 +45,7 @@ class EvalService:
|
|
|
38
45
|
dataset_version_name: str,
|
|
39
46
|
prediction_artifact_path: str,
|
|
40
47
|
evaled_artifact_path: str,
|
|
41
|
-
report_json:
|
|
48
|
+
report_json: Dict[str, Any],
|
|
42
49
|
run_id,
|
|
43
50
|
user_id: int = 0,
|
|
44
51
|
is_public: bool = True,
|
|
@@ -123,6 +130,108 @@ class EvalService:
|
|
|
123
130
|
|
|
124
131
|
return resp
|
|
125
132
|
|
|
133
|
+
def create_performance_run(
|
|
134
|
+
self,
|
|
135
|
+
eval_name: str,
|
|
136
|
+
benchmark_artifact_path: str,
|
|
137
|
+
model_name: str,
|
|
138
|
+
benchmark_report: list[Dict[str, Any]],
|
|
139
|
+
eval_config: Dict[str, Any],
|
|
140
|
+
is_public: bool = True,
|
|
141
|
+
run_id: Optional[str] = None,
|
|
142
|
+
access_user_ids: List[int] = None,
|
|
143
|
+
) -> int:
|
|
144
|
+
if not run_id:
|
|
145
|
+
run_id = os.getenv("AI_HUB_WORKFLOW_RUN_ID", uuid.uuid4().hex)
|
|
146
|
+
|
|
147
|
+
model_service = ModelCenterService(self._http)
|
|
148
|
+
model_item = model_service.get_model_db(name=model_name)
|
|
149
|
+
|
|
150
|
+
payload = CreatePerformanceEvalReq(
|
|
151
|
+
Name=eval_name,
|
|
152
|
+
run_id=run_id,
|
|
153
|
+
performance_artifact_path=benchmark_artifact_path,
|
|
154
|
+
is_public=is_public,
|
|
155
|
+
model_id=model_item.id,
|
|
156
|
+
report={"performance": benchmark_report},
|
|
157
|
+
eval_config=eval_config,
|
|
158
|
+
type="performance",
|
|
159
|
+
client_type=ClientType.Workflow,
|
|
160
|
+
)
|
|
161
|
+
resp = self._eval.create(payload)
|
|
162
|
+
if is_public is False and access_user_ids:
|
|
163
|
+
self.grant_permission(user_ids=access_user_ids, run_id=resp)
|
|
164
|
+
|
|
165
|
+
return resp
|
|
166
|
+
|
|
167
|
+
def create_face_reid_run(
|
|
168
|
+
self,
|
|
169
|
+
run_id: str,
|
|
170
|
+
model_id: int,
|
|
171
|
+
prediction_artifact_path: str,
|
|
172
|
+
gallery_dataset_id: int,
|
|
173
|
+
gallery_dataset_version_id: int,
|
|
174
|
+
query_dataset_id: int,
|
|
175
|
+
query_dataset_version_id: int,
|
|
176
|
+
id_dataset_id: int,
|
|
177
|
+
id_dataset_version_id: int,
|
|
178
|
+
metrics_viz_artifacts: List[MetricsArtifact],
|
|
179
|
+
search_result_artifact_path: str,
|
|
180
|
+
metrics_artifact_path: str,
|
|
181
|
+
user_id: int = 0,
|
|
182
|
+
is_public: bool = True,
|
|
183
|
+
access_user_ids: List[int] = None,
|
|
184
|
+
) -> int:
|
|
185
|
+
"""创建 Face ReID 类型评测运行
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
run_id (str): 运行ID
|
|
189
|
+
model_id (int): 模型ID
|
|
190
|
+
prediction_artifact_path (str): 推理产物的路径
|
|
191
|
+
gallery_dataset_id (int): 底库数据集ID
|
|
192
|
+
gallery_dataset_version_id (int): 底库数据集版本ID
|
|
193
|
+
query_dataset_id (int): 查询数据集ID
|
|
194
|
+
query_dataset_version_id (int): 查询数据集版本ID
|
|
195
|
+
id_dataset_id (int): ID数据集ID
|
|
196
|
+
id_dataset_version_id (int): ID数据集版本ID
|
|
197
|
+
metrics_viz_artifacts (List[MetricsArtifact]): 指标可视化产物列表
|
|
198
|
+
search_result_artifact_path (str): 搜索结果产物路径
|
|
199
|
+
metrics_artifact_path (str): 指标产物路径
|
|
200
|
+
user_id (int, optional): 用户ID,默认为0
|
|
201
|
+
is_public (bool): 是否公开
|
|
202
|
+
access_user_ids (list): 授权访问的用户id
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
id (int): 评测运行id
|
|
206
|
+
"""
|
|
207
|
+
face_reid_config = FaceReidConfig(
|
|
208
|
+
gallery_dataset_id=gallery_dataset_id,
|
|
209
|
+
gallery_dataset_version_id=gallery_dataset_version_id,
|
|
210
|
+
query_dataset_id=query_dataset_id,
|
|
211
|
+
query_dataset_version_id=query_dataset_version_id,
|
|
212
|
+
id_dataset_id=id_dataset_id,
|
|
213
|
+
id_dataset_version_id=id_dataset_version_id,
|
|
214
|
+
metrics_viz_artifacts=metrics_viz_artifacts,
|
|
215
|
+
search_result_artifact_path=search_result_artifact_path,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
payload = CreateFaceReidEvalReq(
|
|
219
|
+
run_id=run_id,
|
|
220
|
+
model_id=model_id,
|
|
221
|
+
prediction_artifact_path=prediction_artifact_path,
|
|
222
|
+
face_reid_config=face_reid_config,
|
|
223
|
+
metrics_artifact_path=metrics_artifact_path,
|
|
224
|
+
user_id=user_id,
|
|
225
|
+
is_public=is_public,
|
|
226
|
+
type="face",
|
|
227
|
+
client_type=ClientType.Workflow,
|
|
228
|
+
)
|
|
229
|
+
resp = self._eval.create(payload)
|
|
230
|
+
if is_public is False and access_user_ids:
|
|
231
|
+
self.grant_permission(user_ids=access_user_ids, run_id=resp)
|
|
232
|
+
|
|
233
|
+
return resp
|
|
234
|
+
|
|
126
235
|
def list(
|
|
127
236
|
self,
|
|
128
237
|
page_size: int = 20,
|
|
@@ -196,14 +305,47 @@ class _Eval:
|
|
|
196
305
|
|
|
197
306
|
def create(self, payload) -> int:
|
|
198
307
|
resp = self._http.post(f"{_BASE}/run/", json=payload.model_dump())
|
|
199
|
-
|
|
308
|
+
|
|
309
|
+
if resp.status_code != 200:
|
|
310
|
+
raise APIError(f"HTTP {resp.status_code} error. " f"Response: {resp.text[:1000]}")
|
|
311
|
+
|
|
312
|
+
if not resp.content:
|
|
313
|
+
raise APIError(f"Empty response from server (HTTP {resp.status_code})")
|
|
314
|
+
|
|
315
|
+
try:
|
|
316
|
+
json_data = resp.json()
|
|
317
|
+
except Exception as e:
|
|
318
|
+
raise APIError(
|
|
319
|
+
f"Failed to parse JSON response: {e}. " f"Status: {resp.status_code}, " f"Content: {resp.text[:1000]}"
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
wrapper = APIWrapper[CreateEvalResp].model_validate(json_data)
|
|
324
|
+
except Exception as e:
|
|
325
|
+
raise APIError(f"Failed to validate response structure: {e}. " f"Response: {json_data}")
|
|
326
|
+
|
|
200
327
|
if wrapper.code != 0:
|
|
201
328
|
raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
|
|
202
329
|
return wrapper.data.eval_run.id
|
|
203
330
|
|
|
204
331
|
def grant_permission(self, payload, task_id):
|
|
205
332
|
resp = self._http.post(f"{_BASE}/run/{task_id}/permissions", json=payload.model_dump())
|
|
206
|
-
|
|
333
|
+
|
|
334
|
+
if resp.status_code != 200:
|
|
335
|
+
raise APIError(f"HTTP {resp.status_code} error. " f"Response: {resp.text[:1000]}")
|
|
336
|
+
|
|
337
|
+
try:
|
|
338
|
+
json_data = resp.json()
|
|
339
|
+
except Exception as e:
|
|
340
|
+
raise APIError(
|
|
341
|
+
f"Failed to parse JSON response: {e}. " f"Status: {resp.status_code}, " f"Content: {resp.text[:1000]}"
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
try:
|
|
345
|
+
wrapper = APIWrapper[CreateEvalResp].model_validate(json_data)
|
|
346
|
+
except Exception as e:
|
|
347
|
+
raise APIError(f"Failed to validate response structure: {e}. " f"Response: {json_data}")
|
|
348
|
+
|
|
207
349
|
if wrapper.code != 0:
|
|
208
350
|
raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
|
|
209
351
|
return wrapper.data
|
|
@@ -216,7 +358,22 @@ class _Eval:
|
|
|
216
358
|
params[field] = value
|
|
217
359
|
|
|
218
360
|
resp = self._http.get(f"{_BASE}/run/", params=params)
|
|
219
|
-
|
|
361
|
+
|
|
362
|
+
if resp.status_code != 200:
|
|
363
|
+
raise APIError(f"HTTP {resp.status_code} error. " f"Response: {resp.text[:1000]}")
|
|
364
|
+
|
|
365
|
+
try:
|
|
366
|
+
json_data = resp.json()
|
|
367
|
+
except Exception as e:
|
|
368
|
+
raise APIError(
|
|
369
|
+
f"Failed to parse JSON response: {e}. " f"Status: {resp.status_code}, " f"Content: {resp.text[:1000]}"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
wrapper = APIWrapper[ListEvalResp].model_validate(json_data)
|
|
374
|
+
except Exception as e:
|
|
375
|
+
raise APIError(f"Failed to validate response structure: {e}. " f"Response: {json_data}")
|
|
376
|
+
|
|
220
377
|
if wrapper.code != 0:
|
|
221
378
|
raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
|
|
222
379
|
return wrapper.data
|
aihub/services/model_center.py
CHANGED
|
@@ -101,7 +101,7 @@ class ModelCenterService:
|
|
|
101
101
|
"""
|
|
102
102
|
self._model.delete(model_id)
|
|
103
103
|
|
|
104
|
-
def get_model_db(self,
|
|
104
|
+
def get_model_db(self, id: int | None = None, name: str | None = None) -> ModelDb:
|
|
105
105
|
"""通过 id 或 name 查询模型 DB 信息
|
|
106
106
|
|
|
107
107
|
Args:
|
|
@@ -115,10 +115,9 @@ class ModelCenterService:
|
|
|
115
115
|
|
|
116
116
|
def upload(
|
|
117
117
|
self,
|
|
118
|
-
*,
|
|
119
|
-
local_dir: str,
|
|
120
|
-
model_id: int | None = None,
|
|
121
118
|
model_name: str | None = None,
|
|
119
|
+
local_dir: str | None = None,
|
|
120
|
+
model_id: int | None = None,
|
|
122
121
|
timeout_seconds: int = 3600,
|
|
123
122
|
) -> None:
|
|
124
123
|
"""上传模型
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: intellif-aihub
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.28
|
|
4
4
|
Summary: Intellif AI-hub SDK.
|
|
5
5
|
Author-email: Platform Team <aihub@example.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -109,4 +109,15 @@ python -m pip install --upgrade twine
|
|
|
109
109
|
twine upload dist/*
|
|
110
110
|
```
|
|
111
111
|
|
|
112
|
+
文档调试:
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
mkdocs serve
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
构建文档镜像:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
docker build -t 192.168.14.129:80/library/aihub/sdk_doc:latest -f doc.Dockerfile .
|
|
122
|
+
```
|
|
112
123
|
---
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
aihub/__init__.py,sha256=
|
|
1
|
+
aihub/__init__.py,sha256=MWZDdAHrdUZS0c3VlLqX4O1eaxPodI7irMtEvknKQ94,23
|
|
2
2
|
aihub/client.py,sha256=NY6lIpYmZBAi-QbpuPTeTGma2430KWLqVuEjcgap7MA,5835
|
|
3
3
|
aihub/exceptions.py,sha256=l2cMAvipTqQOio3o11fXsCCSCevbuK4PTsxofkobFjk,500
|
|
4
4
|
aihub/cli/__init__.py,sha256=I6NwAccz4OA13yBkQNVGqdY4by3a9S4Nwc_Nb9myXqM,27
|
|
@@ -10,9 +10,9 @@ aihub/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
10
10
|
aihub/models/artifact.py,sha256=F-r7DJY9A09yIQJqWol6gLRu6y7NGjRa6-BxkMEluxU,4655
|
|
11
11
|
aihub/models/common.py,sha256=qmabc2LkAdQJXIcpT1P35zxd0Lc8yDYdD4ame1iF4Bs,241
|
|
12
12
|
aihub/models/data_warehouse.py,sha256=zXvWwg7ySoFJMdqQ_1UMTNEKDMhu1hDHlWdBAXdizBk,3905
|
|
13
|
-
aihub/models/dataset_management.py,sha256=
|
|
13
|
+
aihub/models/dataset_management.py,sha256=JNBLxjyqlXzsrVgGPfX93bGXyp9e_iKrxlmBgOLOKwg,10905
|
|
14
14
|
aihub/models/document_center.py,sha256=od9bzx6krAS6ktIA-ChxeqGcch0v2wsS1flY2vuHXBc,1340
|
|
15
|
-
aihub/models/eval.py,sha256=
|
|
15
|
+
aihub/models/eval.py,sha256=VWHr0X4SYU_eHOb1AqdftlSedSsNiMOyAkU2Y4m-TWo,7138
|
|
16
16
|
aihub/models/labelfree.py,sha256=YUnUv0tjYSFAFzYtmbnLOha8rnDe32sb50HkPOclAzU,2016
|
|
17
17
|
aihub/models/model_center.py,sha256=q-ga1Khnb-WbA_gbLHbhwqGPuQ2qjpC4ailaaNpoccU,5491
|
|
18
18
|
aihub/models/model_training_platform.py,sha256=2zir5i-XvuxKKVYr4wuNYUC7nwMzetdtCRoysZ1W_Tc,11725
|
|
@@ -25,11 +25,11 @@ aihub/models/workflow_center.py,sha256=4xtI1WZ38ceXJ8gwDBj-QNjOiRlLO_8kGiQybdudJ
|
|
|
25
25
|
aihub/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
aihub/services/artifact.py,sha256=lfOrgOT2AlH1w-75NLcQGOhVWdhmJcWD1gESPpUzqUw,11257
|
|
27
27
|
aihub/services/data_warehouse.py,sha256=awvlJdggo8ph6sXweXXVp4GLRuUSD46LoD0QQksXRts,2964
|
|
28
|
-
aihub/services/dataset_management.py,sha256=
|
|
28
|
+
aihub/services/dataset_management.py,sha256=r3heVMSWqh0ZD1ECv9MD00ebKlRM0pkEbp9vt_Ylvok,22562
|
|
29
29
|
aihub/services/document_center.py,sha256=dG67Ji-DOnzL2t-4x4gVfMt9fbSj_IjVHCLw5R-VTkQ,1813
|
|
30
|
-
aihub/services/eval.py,sha256=
|
|
30
|
+
aihub/services/eval.py,sha256=79mSkO12H7-DOnvdVsKOLhqxGGJ5QIg6EQ_fAfvhkm0,13010
|
|
31
31
|
aihub/services/labelfree.py,sha256=xua62UWhVXTxJjHRyy86waaormnJjmpQwepcARBy_h0,1450
|
|
32
|
-
aihub/services/model_center.py,sha256=
|
|
32
|
+
aihub/services/model_center.py,sha256=mAW3Fp4ECgi7YGQtr5wl5cuaraYDgoazfWJNlOWEnMY,11404
|
|
33
33
|
aihub/services/model_training_platform.py,sha256=38o6HJnyi3htFzpX7qj6UhzdqTchcXLRTYU0nM7ffJg,10176
|
|
34
34
|
aihub/services/notebook_management.py,sha256=zTV0hz7OdxSiZgC3L9hG2B15HEpwZrDwP4YbhNuylVk,8859
|
|
35
35
|
aihub/services/quota_schedule_management.py,sha256=UYOMwjXxJTgkpN6Rv5GzlcejtpZfu23PXlSKr0WihTY,9586
|
|
@@ -43,9 +43,9 @@ aihub/utils/di.py,sha256=vFUzno5WbRKu6-pj8Hnz9IqT7xb9UDZQ4qpOFH1YAtM,11812
|
|
|
43
43
|
aihub/utils/download.py,sha256=ZZVbcC-PnN3PumV7ZiJ_-srkt4HPPovu2F6Faa2RrPE,1830
|
|
44
44
|
aihub/utils/http.py,sha256=AmfHHNjptuuSFx2T1twWCnerR_hLN_gd0lUs8z36ERA,547
|
|
45
45
|
aihub/utils/s3.py,sha256=_HFL5QJQqOF8WuEX8RWGPFKYtad_lGn-jsNzTIfXjHM,3977
|
|
46
|
-
intellif_aihub-0.1.
|
|
47
|
-
intellif_aihub-0.1.
|
|
48
|
-
intellif_aihub-0.1.
|
|
49
|
-
intellif_aihub-0.1.
|
|
50
|
-
intellif_aihub-0.1.
|
|
51
|
-
intellif_aihub-0.1.
|
|
46
|
+
intellif_aihub-0.1.28.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
|
47
|
+
intellif_aihub-0.1.28.dist-info/METADATA,sha256=Ffg_Xhqu1OeTh5SttfkbDcl1HL1GEEivRE-mxRWhOMg,3139
|
|
48
|
+
intellif_aihub-0.1.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
49
|
+
intellif_aihub-0.1.28.dist-info/entry_points.txt,sha256=PfgnpEJlG76kFmrCdTvfRIRNsZO1Xu1pTEH2S5DSO1M,45
|
|
50
|
+
intellif_aihub-0.1.28.dist-info/top_level.txt,sha256=vIvTtSIN73xv46BpYM-ctVGnyOiUQ9EWP_6ngvdIlvw,6
|
|
51
|
+
intellif_aihub-0.1.28.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|