intellif-aihub 0.1.18__tar.gz → 0.1.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of intellif-aihub might be problematic. Click here for more details.

Files changed (66) hide show
  1. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/PKG-INFO +1 -1
  2. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/pyproject.toml +1 -1
  3. intellif_aihub-0.1.20/src/aihub/__init__.py +1 -0
  4. intellif_aihub-0.1.20/src/aihub/models/eval.py +68 -0
  5. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/labelfree.py +20 -0
  6. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/task_center.py +2 -2
  7. intellif_aihub-0.1.20/src/aihub/services/eval.py +140 -0
  8. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/intellif_aihub.egg-info/PKG-INFO +1 -1
  9. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/intellif_aihub.egg-info/SOURCES.txt +1 -0
  10. intellif_aihub-0.1.20/tests/test_eval.py +184 -0
  11. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_task_center.py +23 -9
  12. intellif_aihub-0.1.18/src/aihub/__init__.py +0 -1
  13. intellif_aihub-0.1.18/src/aihub/models/eval.py +0 -26
  14. intellif_aihub-0.1.18/src/aihub/services/eval.py +0 -75
  15. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/LICENSE +0 -0
  16. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/README.md +0 -0
  17. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/setup.cfg +0 -0
  18. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/client.py +0 -0
  19. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/exceptions.py +0 -0
  20. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/__init__.py +0 -0
  21. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/artifact.py +0 -0
  22. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/common.py +0 -0
  23. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/data_warehouse.py +0 -0
  24. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/dataset_management.py +0 -0
  25. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/document_center.py +0 -0
  26. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/model_center.py +0 -0
  27. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/model_training_platform.py +0 -0
  28. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/quota_schedule_management.py +0 -0
  29. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/tag_resource_management.py +0 -0
  30. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/user_system.py +0 -0
  31. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/models/workflow_center.py +0 -0
  32. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/__init__.py +0 -0
  33. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/artifact.py +0 -0
  34. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/data_warehouse.py +0 -0
  35. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/dataset_management.py +0 -0
  36. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/document_center.py +0 -0
  37. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/labelfree.py +0 -0
  38. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/model_center.py +0 -0
  39. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/model_training_platform.py +0 -0
  40. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/quota_schedule_management.py +0 -0
  41. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/reporter.py +0 -0
  42. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/tag_resource_management.py +0 -0
  43. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/task_center.py +0 -0
  44. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/user_system.py +0 -0
  45. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/services/workflow_center.py +0 -0
  46. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/utils/__init__.py +0 -0
  47. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/utils/di.py +0 -0
  48. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/utils/download.py +0 -0
  49. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/utils/http.py +0 -0
  50. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/aihub/utils/s3.py +0 -0
  51. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/intellif_aihub.egg-info/dependency_links.txt +0 -0
  52. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/intellif_aihub.egg-info/requires.txt +0 -0
  53. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/src/intellif_aihub.egg-info/top_level.txt +0 -0
  54. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_artifact.py +0 -0
  55. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_data_warehouse.py +0 -0
  56. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_dataset_management.py +0 -0
  57. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_di.py +0 -0
  58. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_document_center.py +0 -0
  59. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_labelfree.py +0 -0
  60. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_model_center.py +0 -0
  61. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_model_training_platform.py +0 -0
  62. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_quota_schedule_management.py +0 -0
  63. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_s3.py +0 -0
  64. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_tag_resource_management.py +0 -0
  65. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_user_system.py +0 -0
  66. {intellif_aihub-0.1.18 → intellif_aihub-0.1.20}/tests/test_workflow_center.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intellif-aihub
3
- Version: 0.1.18
3
+ Version: 0.1.20
4
4
  Summary: Intellif AI-hub SDK.
5
5
  Author-email: Platform Team <aihub@example.com>
6
6
  License-Expression: Apache-2.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "intellif-aihub"
3
- version = "0.1.18"
3
+ version = "0.1.20"
4
4
  description = "Intellif AI-hub SDK."
5
5
  readme = {file = "README.md", content-type = "text/markdown"}
6
6
  requires-python = ">=3.9"
@@ -0,0 +1 @@
1
+ __version__ = "0.1.20"
@@ -0,0 +1,68 @@
1
+ # !/usr/bin/env python
2
+ # -*-coding:utf-8 -*-
3
+ from typing import Dict, List, Optional
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+
8
+ class CreateEvalReq(BaseModel):
9
+ """创建评测任务"""
10
+ dataset_id: int = Field(description="数据集ID")
11
+ dataset_version_id: int = Field(description="数据集版本ID")
12
+ prediction_artifact_path: str = Field(description="推理产物的路径")
13
+ evaled_artifact_path: str = Field(description="评测结果产物的路径")
14
+ run_id: str = Field(description="运行ID")
15
+ user_id: int = Field(0, description="用户ID")
16
+ report: Dict = Field(default_factory=dict, description="评测报告")
17
+
18
+
19
+ class EvalRun(BaseModel):
20
+ """评测任务的运行实体"""
21
+ id: int = Field(description="评测的运行ID")
22
+ name: str = Field(description="评测名称")
23
+ description: str = Field(description="评测描述")
24
+ user_id: int = Field(description="用户ID")
25
+ model_id: int = Field(description="模型ID")
26
+ model_name: str = Field(description="模型名称")
27
+ dataset_id: int = Field(description="数据集ID")
28
+ dataset_version_id: int = Field(description="数据集版本ID")
29
+ dataset_name: str = Field(description="数据集名称")
30
+ status: str = Field(description="状态")
31
+ prediction_artifact_path: str = Field(description="推理产物路径")
32
+ evaled_artifact_path: str = Field(description="评测结果产物路径")
33
+ run_id: str = Field(description="运行ID")
34
+ dataset_summary: Dict = Field(default_factory=dict, description="数据集摘要")
35
+ metrics_summary: Dict = Field(default_factory=dict, description="指标摘要")
36
+ viz_summary: Dict = Field(default_factory=dict, description="可视化摘要")
37
+ eval_config: Optional[Dict] = Field(default=None, description="评测配置")
38
+ created_at: int = Field(description="创建时间")
39
+ updated_at: int = Field(description="更新时间")
40
+
41
+
42
+ class CreateEvalResp(BaseModel):
43
+ """创建评测任务的返回结果"""
44
+ eval_run: EvalRun = Field(alias="eval_run", description="评测运行信息")
45
+
46
+
47
+ class ListEvalReq(BaseModel):
48
+ """列出评测任务请求"""
49
+ page_size: int = Field(20, description="页面大小")
50
+ page_num: int = Field(1, description="页码")
51
+ status: Optional[str] = Field(None, description="状态过滤")
52
+ name: Optional[str] = Field(None, description="名称过滤")
53
+ model_id: Optional[int] = Field(None, description="模型ID过滤")
54
+ dataset_id: Optional[int] = Field(None, description="数据集ID过滤")
55
+ dataset_version_id: Optional[int] = Field(None, description="数据集版本ID过滤")
56
+ run_id: Optional[str] = Field(None, description="运行ID过滤")
57
+ user_id: Optional[int] = Field(None, description="用户ID过滤")
58
+ model_ids: Optional[str] = Field(None, description="模型ID列表过滤")
59
+ dataset_ids: Optional[str] = Field(None, description="数据集ID列表过滤")
60
+ dataset_version_ids: Optional[str] = Field(None, description="数据集版本ID列表过滤")
61
+
62
+
63
+ class ListEvalResp(BaseModel):
64
+ """列出评测任务响应"""
65
+ total: int = Field(description="总数")
66
+ page_size: int = Field(description="页面大小")
67
+ page_num: int = Field(description="页码")
68
+ data: List[EvalRun] = Field(description="评测运行列表")
@@ -1,12 +1,29 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from enum import Enum
3
4
  from typing import Optional
4
5
 
5
6
  from pydantic import BaseModel, Field
6
7
 
7
8
 
9
+ class LabelProjectStatus(Enum):
10
+ """标注状态"""
11
+
12
+ Pending = "pending"
13
+ """未开始"""
14
+ Loading = "loading"
15
+ """数据读取中"""
16
+ Error = "failed"
17
+ """数据读取异常"""
18
+ In_Progress = "ready"
19
+ """进行中"""
20
+ Finished = "finished"
21
+ """标注完成"""
22
+
23
+
8
24
  class Stats(BaseModel):
9
25
  """标注统计信息"""
26
+
10
27
  total_annotations: int = Field(alias="total_annotations", description="总数据量")
11
28
  labeled_annotations: int = Field(alias="labeled_annotations", description="已标注数据量")
12
29
  total_labels: int = Field(alias="total_labels", description="总标签量")
@@ -19,9 +36,12 @@ class Stats(BaseModel):
19
36
 
20
37
  class GetGlobalStatsResponse(BaseModel):
21
38
  """标注统计概况"""
39
+
22
40
  global_stats: Stats = Field(alias="global_stats")
23
41
  valid_ten_percent: bool = Field(alias="valid_ten_percent", description="是否完成验收10%")
24
42
  valid_fifty_percent: bool = Field(alias="valid_fifty_percent", description="是否完成验收50%")
25
43
  valid_hundred_percent: bool = Field(alias="valid_hundred_percent", description="是否完成验收100%")
26
44
  data_exported_count: int = Field(alias="data_exported_count", description="已导出数据次数")
27
45
  exported_dataset_name: str = Field(alias="exported_dataset_name", description="最新数据集名称")
46
+ status: LabelProjectStatus = Field(description="状态")
47
+ model_config = {"use_enum_values": True}
@@ -138,8 +138,8 @@ class LabelValidateStage(Enum):
138
138
  """10%阶段"""
139
139
  FIFTY_PERCENT = "标注阶段(50%)"
140
140
  """50%阶段"""
141
- HUNDRED_PERCENT = "标注阶段(100%)"
142
- """100%阶段(已交付)"""
141
+ LABEL_FINISHED = "标注阶段(100%)"
142
+ """标注完成"""
143
143
 
144
144
 
145
145
  class LabelValidateReq(BaseModel):
@@ -0,0 +1,140 @@
1
+ # !/usr/bin/env python
2
+ # -*- coding:utf-8 -*-
3
+ """评测平台服务模块
4
+
5
+ 本模块围绕 **“模型评测(Run → Report)”** 提供能力:
6
+
7
+ - **创建评测任务 / 评测报告**
8
+ - **获取评测任务列表**
9
+ """
10
+
11
+ import httpx
12
+
13
+ from ..exceptions import APIError
14
+ from ..models.common import APIWrapper
15
+ from ..models.eval import CreateEvalReq, CreateEvalResp, ListEvalReq, ListEvalResp
16
+
17
+ _BASE = "/eval-platform/api/v1"
18
+
19
+
20
+ class EvalService:
21
+ """评测服务"""
22
+
23
+ def __init__(self, http: httpx.Client):
24
+ self._http = http
25
+ self._eval = _Eval(http)
26
+
27
+ def create(
28
+ self,
29
+ dataset_version_name: str,
30
+ prediction_artifact_path: str,
31
+ evaled_artifact_path: str,
32
+ report_json: dict,
33
+ run_id,
34
+ ) -> int:
35
+ """创建评测报告
36
+
37
+ Args:
38
+ run_id (str): RUN ID
39
+ report_json (dict): 报告内容
40
+ evaled_artifact_path: 评测结果制品路径
41
+ prediction_artifact_path: 推理结果制品路径
42
+ dataset_version_name (str): 数据集名称
43
+
44
+
45
+ Returns:
46
+ id (int): 评测报告id
47
+
48
+ """
49
+ from .dataset_management import DatasetManagementService
50
+
51
+ dataset_service = DatasetManagementService(self._http)
52
+ dataset_version = dataset_service.get_dataset_version_by_name(
53
+ dataset_version_name
54
+ )
55
+ payload = CreateEvalReq(
56
+ dataset_id=dataset_version.dataset_id,
57
+ dataset_version_id=dataset_version.id,
58
+ evaled_artifact_path=evaled_artifact_path,
59
+ prediction_artifact_path=prediction_artifact_path,
60
+ report=report_json,
61
+ run_id=run_id,
62
+ )
63
+
64
+ return self._eval.create(payload)
65
+
66
+ def list(
67
+ self,
68
+ page_size: int = 20,
69
+ page_num: int = 1,
70
+ status: str = None,
71
+ name: str = None,
72
+ model_id: int = None,
73
+ dataset_id: int = None,
74
+ dataset_version_id: int = None,
75
+ run_id: str = None,
76
+ user_id: int = None,
77
+ model_ids: str = None,
78
+ dataset_ids: str = None,
79
+ dataset_version_ids: str = None,
80
+ ) -> ListEvalResp:
81
+ """列出评测结果
82
+
83
+ Args:
84
+ page_size (int): 页面大小,默认为20
85
+ page_num (int): 页码,默认为1
86
+ status (str, optional): 状态过滤
87
+ name (str, optional): 名称过滤
88
+ model_id (int, optional): 模型ID过滤
89
+ dataset_id (int, optional): 数据集ID过滤
90
+ dataset_version_id (int, optional): 数据集版本ID过滤
91
+ run_id (str, optional): 运行ID过滤
92
+ user_id (int, optional): 用户ID过滤
93
+ model_ids (str, optional): 模型ID列表过滤(逗号分隔)
94
+ dataset_ids (str, optional): 数据集ID列表过滤(逗号分隔)
95
+ dataset_version_ids (str, optional): 数据集版本ID列表过滤(逗号分隔)
96
+
97
+ Returns:
98
+ ListEvalResp: 评测结果列表响应
99
+ """
100
+ payload = ListEvalReq(
101
+ page_size=page_size,
102
+ page_num=page_num,
103
+ status=status,
104
+ name=name,
105
+ model_id=model_id,
106
+ dataset_id=dataset_id,
107
+ dataset_version_id=dataset_version_id,
108
+ run_id=run_id,
109
+ user_id=user_id,
110
+ model_ids=model_ids,
111
+ dataset_ids=dataset_ids,
112
+ dataset_version_ids=dataset_version_ids,
113
+ )
114
+
115
+ return self._eval.list(payload)
116
+
117
+
118
+ class _Eval:
119
+ def __init__(self, http: httpx.Client):
120
+ self._http = http
121
+
122
+ def create(self, payload: CreateEvalReq) -> int:
123
+ resp = self._http.post(f"{_BASE}/run/", json=payload.model_dump())
124
+ wrapper = APIWrapper[CreateEvalResp].model_validate(resp.json())
125
+ if wrapper.code != 0:
126
+ raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
127
+ return wrapper.data.eval_run.id
128
+
129
+ def list(self, payload: ListEvalReq) -> ListEvalResp:
130
+ # Build query parameters, excluding None values
131
+ params = {}
132
+ for field, value in payload.model_dump().items():
133
+ if value is not None:
134
+ params[field] = value
135
+
136
+ resp = self._http.get(f"{_BASE}/run/", params=params)
137
+ wrapper = APIWrapper[ListEvalResp].model_validate(resp.json())
138
+ if wrapper.code != 0:
139
+ raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
140
+ return wrapper.data
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intellif-aihub
3
- Version: 0.1.18
3
+ Version: 0.1.20
4
4
  Summary: Intellif AI-hub SDK.
5
5
  Author-email: Platform Team <aihub@example.com>
6
6
  License-Expression: Apache-2.0
@@ -49,6 +49,7 @@ tests/test_data_warehouse.py
49
49
  tests/test_dataset_management.py
50
50
  tests/test_di.py
51
51
  tests/test_document_center.py
52
+ tests/test_eval.py
52
53
  tests/test_labelfree.py
53
54
  tests/test_model_center.py
54
55
  tests/test_model_training_platform.py
@@ -0,0 +1,184 @@
1
+ # !/usr/bin/env python
2
+ # -*-coding:utf-8 -*-
3
+ import unittest
4
+ import uuid
5
+ from unittest.mock import Mock, patch
6
+
7
+ import httpx
8
+
9
+ from aihub.services.eval import EvalService
10
+ from aihub.models.eval import ListEvalResp, EvalRun
11
+ from aihub.models.common import APIWrapper
12
+
13
+ BASE_URL = "http://192.168.13.160:30052"
14
+ TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NTI1NDQwNDksImlhdCI6MTc1MVkzOTI0OSwidWlkIjoyfQ.MfB_7LK5oR3RAhga3jtgcvJqYESeUPLbz8Bc_y3fouc"
15
+
16
+
17
+ class TestEvalService(unittest.TestCase):
18
+
19
+ def setUp(self):
20
+ self.http_client = Mock(spec=httpx.Client)
21
+ self.eval_service = EvalService(self.http_client)
22
+
23
+ def test_list_eval_runs_default(self):
24
+ mock_eval_run = {
25
+ "id": 1,
26
+ "name": "test_eval",
27
+ "description": "Test evaluation",
28
+ "user_id": 1,
29
+ "model_id": 1,
30
+ "model_name": "test_model",
31
+ "dataset_id": 1,
32
+ "dataset_version_id": 1,
33
+ "dataset_name": "test_dataset",
34
+ "status": "completed",
35
+ "prediction_artifact_path": "/path/to/prediction",
36
+ "evaled_artifact_path": "/path/to/eval",
37
+ "run_id": "test_run_123",
38
+ "dataset_summary": {},
39
+ "metrics_summary": {"accuracy": 0.95},
40
+ "viz_summary": {},
41
+ "eval_config": {"metric": "accuracy"},
42
+ "created_at": 1640995200,
43
+ "updated_at": 1640995200
44
+ }
45
+
46
+ mock_response = {
47
+ "code": 0,
48
+ "msg": None,
49
+ "data": {
50
+ "total": 1,
51
+ "page_size": 20,
52
+ "page_num": 1,
53
+ "data": [mock_eval_run]
54
+ }
55
+ }
56
+
57
+ mock_resp = Mock()
58
+ mock_resp.json.return_value = mock_response
59
+ self.http_client.get.return_value = mock_resp
60
+
61
+ result = self.eval_service.list()
62
+
63
+ self.assertIsInstance(result, ListEvalResp)
64
+ self.assertEqual(result.total, 1)
65
+ self.assertEqual(result.page_size, 20)
66
+ self.assertEqual(result.page_num, 1)
67
+ self.assertEqual(len(result.data), 1)
68
+ self.assertEqual(result.data[0].id, 1)
69
+ self.assertEqual(result.data[0].name, "test_eval")
70
+
71
+ self.http_client.get.assert_called_once_with(
72
+ "/eval-platform/api/v1/run/",
73
+ params={"page_size": 20, "page_num": 1}
74
+ )
75
+
76
+ def test_list_eval_runs_with_filters(self):
77
+ mock_response = {
78
+ "code": 0,
79
+ "msg": None,
80
+ "data": {
81
+ "total": 0,
82
+ "page_size": 10,
83
+ "page_num": 1,
84
+ "data": []
85
+ }
86
+ }
87
+
88
+ mock_resp = Mock()
89
+ mock_resp.json.return_value = mock_response
90
+ self.http_client.get.return_value = mock_resp
91
+
92
+ # 带过滤参数
93
+ result = self.eval_service.list(
94
+ page_size=10,
95
+ page_num=1,
96
+ status="completed",
97
+ name="test",
98
+ model_id=1,
99
+ dataset_id=2,
100
+ dataset_version_id=3,
101
+ run_id="test_run",
102
+ user_id=1,
103
+ model_ids="1,2,3",
104
+ dataset_ids="2,3,4",
105
+ dataset_version_ids="3,4,5"
106
+ )
107
+
108
+ self.assertIsInstance(result, ListEvalResp)
109
+ self.assertEqual(result.total, 0)
110
+ self.assertEqual(len(result.data), 0)
111
+
112
+ expected_params = {
113
+ "page_size": 10,
114
+ "page_num": 1,
115
+ "status": "completed",
116
+ "name": "test",
117
+ "model_id": 1,
118
+ "dataset_id": 2,
119
+ "dataset_version_id": 3,
120
+ "run_id": "test_run",
121
+ "user_id": 1,
122
+ "model_ids": "1,2,3",
123
+ "dataset_ids": "2,3,4",
124
+ "dataset_version_ids": "3,4,5"
125
+ }
126
+ self.http_client.get.assert_called_once_with(
127
+ "/eval-platform/api/v1/run/",
128
+ params=expected_params
129
+ )
130
+
131
+ def test_list_eval_runs_api_error(self):
132
+ """测试列出评测运行 - API错误"""
133
+ # 模拟 API 错误
134
+ mock_response = {
135
+ "code": 1001,
136
+ "msg": "Database connection failed",
137
+ "data": None
138
+ }
139
+
140
+ mock_resp = Mock()
141
+ mock_resp.json.return_value = mock_response
142
+ self.http_client.get.return_value = mock_resp
143
+
144
+ with self.assertRaises(Exception) as context:
145
+ self.eval_service.list()
146
+
147
+ self.assertIn("backend code 1001", str(context.exception))
148
+ self.assertIn("Database connection failed", str(context.exception))
149
+
150
+ def test_list_eval_runs_only_specified_filters(self):
151
+ mock_response = {
152
+ "code": 0,
153
+ "msg": None,
154
+ "data": {
155
+ "total": 0,
156
+ "page_size": 20,
157
+ "page_num": 1,
158
+ "data": []
159
+ }
160
+ }
161
+
162
+ mock_resp = Mock()
163
+ mock_resp.json.return_value = mock_response
164
+ self.http_client.get.return_value = mock_resp
165
+
166
+ result = self.eval_service.list(
167
+ status="completed",
168
+ model_id=1
169
+ )
170
+
171
+ expected_params = {
172
+ "page_size": 20,
173
+ "page_num": 1,
174
+ "status": "completed",
175
+ "model_id": 1
176
+ }
177
+ self.http_client.get.assert_called_once_with(
178
+ "/eval-platform/api/v1/run/",
179
+ params=expected_params
180
+ )
181
+
182
+
183
+ if __name__ == "__main__":
184
+ unittest.main()
@@ -12,14 +12,16 @@ TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ5MDY2ODUwODAsImlhdCI6MT
12
12
 
13
13
  class TestTaskCenter(unittest.TestCase):
14
14
  def test_create_label_task(self):
15
- from src.aihub.models.task_center import LabelProjectTypeEnum
15
+ from src.aihub.models.task_center import LabelProjectTypeEnum, LabelValidateStage
16
16
  from src.aihub.client import Client
17
17
  from src.aihub.models.task_center import TaskCenterPriorityEnum
18
+ from src.aihub.models.labelfree import LabelProjectStatus
19
+ import time
18
20
 
19
21
  # 创建任务
20
22
  client = Client(base_url=BASE_URL, token=TOKEN)
21
23
  task_id = client.task_center.create_label_task(
22
- name="test_task",
24
+ name="test_tas2k232",
23
25
  dataset_version_name="re/V1",
24
26
  feishu_doc_name="人脸质量人脸照片分类",
25
27
  task_receiver_name="hyc",
@@ -34,18 +36,24 @@ class TestTaskCenter(unittest.TestCase):
34
36
  # 获取任务信息
35
37
  task_item = client.task_center.get(task_id)
36
38
  # 使用sdkl
39
+ if not task_item.other_info.label_projects:
40
+ print("任务未完成,请稍后...")
41
+ time.sleep(5)
42
+ continue
37
43
  p = task_item.other_info.label_projects[0]
38
44
  label_stats = client.labelfree.get_project_global_stats(p.label_project_name)
39
45
  # 等待标注完成、完成100%验收、存在数据导出
40
- if (
41
- (label_stats.global_stats.total_annotations == label_stats.global_stats.labeled_annotations)
42
- and (label_stats.valid_hundred_percent)
43
- and label_stats.data_exported_count != 0
44
- ):
46
+ if label_stats.status == LabelProjectStatus.Finished and label_stats.data_exported_count != 0:
45
47
  exported_dataset_name = label_stats.exported_dataset_name
48
+ client.task_center.validate_label_project(
49
+ task_id, p.label_project_name, LabelValidateStage.LABEL_FINISHED, True
50
+ )
46
51
 
47
- client.task_center.validate_label_project(task_id, {})
48
52
  break
53
+ else:
54
+ print("任务未完成,请稍后...")
55
+ time.sleep(5)
56
+ continue
49
57
 
50
58
  # 下载数据
51
59
  client.dataset_management.run_download(exported_dataset_name, local_dir="./output")
@@ -56,4 +64,10 @@ class TestTaskCenter(unittest.TestCase):
56
64
  from src.aihub.models.task_center import LabelValidateStage
57
65
 
58
66
  client = Client(base_url=BASE_URL, token=TOKEN)
59
- client.task_center.validate_label_project(1923, "project_893437", LabelValidateStage.HUNDRED_PERCENT, True)
67
+ client.task_center.validate_label_project(1923, "project_893437", LabelValidateStage.LABEL_FINISHED, True)
68
+
69
+ def test_get_label_project(self):
70
+ from src.aihub.client import Client
71
+
72
+ client = Client(base_url=BASE_URL, token=TOKEN)
73
+ client.labelfree.get_project_global_stats("project_889552")
@@ -1 +0,0 @@
1
- __version__ = "0.1.18"
@@ -1,26 +0,0 @@
1
- # !/usr/bin/env python
2
- # -*-coding:utf-8 -*-
3
- from typing import Dict
4
-
5
- from pydantic import BaseModel, Field
6
-
7
-
8
- class CreateEvalReq(BaseModel):
9
- """创建评测任务"""
10
- dataset_id: int = Field(description="数据集ID")
11
- dataset_version_id: int = Field(description="数据集版本ID")
12
- prediction_artifact_path: str = Field(description="推理产物的路径")
13
- evaled_artifact_path: str = Field(description="评测结果产物的路径")
14
- run_id: str = Field(description="运行ID")
15
- user_id: int = Field(0, description="用户ID")
16
- report: Dict = Field(default_factory=dict, description="评测报告")
17
-
18
-
19
- class EvalRun(BaseModel):
20
- """评测任务的运行实体"""
21
- id: int = Field(description="评测的运行ID")
22
-
23
-
24
- class CreateEvalResp(BaseModel):
25
- """创建评测任务的返回结果"""
26
- eval_run: EvalRun = Field(alias="eval_run", description="评测运行信息")
@@ -1,75 +0,0 @@
1
- # !/usr/bin/env python
2
- # -*- coding:utf-8 -*-
3
- """评测平台服务模块
4
-
5
- 本模块围绕 **“模型评测(Run → Report)”** 提供能力:
6
-
7
- - **创建评测任务 / 评测报告**
8
- """
9
-
10
- import httpx
11
-
12
- from ..exceptions import APIError
13
- from ..models.common import APIWrapper
14
- from ..models.eval import CreateEvalReq, CreateEvalResp
15
-
16
- _BASE = "/eval-platform/api/v1"
17
-
18
-
19
- class EvalService:
20
- """评测服务"""
21
-
22
- def __init__(self, http: httpx.Client):
23
- self._http = http
24
- self._eval = _Eval(http)
25
-
26
- def create(
27
- self,
28
- dataset_version_name: str,
29
- prediction_artifact_path: str,
30
- evaled_artifact_path: str,
31
- report_json: dict,
32
- run_id,
33
- ) -> int:
34
- """创建评测报告
35
-
36
- Args:
37
- run_id (str): RUN ID
38
- report_json (dict): 报告内容
39
- evaled_artifact_path: 评测结果制品路径
40
- prediction_artifact_path: 推理结果制品路径
41
- dataset_version_name (str): 数据集名称
42
-
43
-
44
- Returns:
45
- id (int): 评测报告id
46
-
47
- """
48
- from .dataset_management import DatasetManagementService
49
-
50
- dataset_service = DatasetManagementService(self._http)
51
- dataset_version = dataset_service.get_dataset_version_by_name(
52
- dataset_version_name
53
- )
54
- payload = CreateEvalReq(
55
- dataset_id=dataset_version.dataset_id,
56
- dataset_version_id=dataset_version.id,
57
- evaled_artifact_path=evaled_artifact_path,
58
- prediction_artifact_path=prediction_artifact_path,
59
- report=report_json,
60
- run_id=run_id,
61
- )
62
-
63
- return self._eval.create(payload)
64
-
65
-
66
- class _Eval:
67
- def __init__(self, http: httpx.Client):
68
- self._http = http
69
-
70
- def create(self, payload: CreateEvalReq) -> int:
71
- resp = self._http.post(f"{_BASE}/run/", json=payload.model_dump())
72
- wrapper = APIWrapper[CreateEvalResp].model_validate(resp.json())
73
- if wrapper.code != 0:
74
- raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
75
- return wrapper.data.eval_run.id
File without changes