intellif-aihub 0.1.12__tar.gz → 0.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of intellif-aihub might be problematic. Click here for more details.

Files changed (62) hide show
  1. {intellif_aihub-0.1.12/src/intellif_aihub.egg-info → intellif_aihub-0.1.14}/PKG-INFO +2 -1
  2. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/pyproject.toml +3 -2
  3. intellif_aihub-0.1.14/src/aihub/__init__.py +1 -0
  4. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/dataset_management.py +68 -0
  5. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/dataset_management.py +84 -0
  6. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/utils/http.py +4 -3
  7. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14/src/intellif_aihub.egg-info}/PKG-INFO +2 -1
  8. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/intellif_aihub.egg-info/requires.txt +1 -0
  9. intellif_aihub-0.1.14/tests/test_dataset_management.py +124 -0
  10. intellif_aihub-0.1.12/src/aihub/__init__.py +0 -1
  11. intellif_aihub-0.1.12/tests/test_dataset_management.py +0 -54
  12. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/LICENSE +0 -0
  13. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/README.md +0 -0
  14. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/setup.cfg +0 -0
  15. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/client.py +0 -0
  16. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/exceptions.py +0 -0
  17. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/__init__.py +0 -0
  18. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/artifact.py +0 -0
  19. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/common.py +0 -0
  20. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/data_warehouse.py +0 -0
  21. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/document_center.py +0 -0
  22. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/eval.py +0 -0
  23. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/labelfree.py +0 -0
  24. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/model_center.py +0 -0
  25. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/model_training_platform.py +0 -0
  26. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/quota_schedule_management.py +0 -0
  27. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/tag_resource_management.py +0 -0
  28. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/task_center.py +0 -0
  29. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/user_system.py +0 -0
  30. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/models/workflow_center.py +0 -0
  31. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/__init__.py +0 -0
  32. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/artifact.py +0 -0
  33. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/data_warehouse.py +0 -0
  34. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/document_center.py +0 -0
  35. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/eval.py +0 -0
  36. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/labelfree.py +0 -0
  37. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/model_center.py +0 -0
  38. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/model_training_platform.py +0 -0
  39. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/quota_schedule_management.py +0 -0
  40. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/reporter.py +0 -0
  41. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/tag_resource_management.py +0 -0
  42. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/task_center.py +0 -0
  43. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/user_system.py +0 -0
  44. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/services/workflow_center.py +0 -0
  45. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/utils/__init__.py +0 -0
  46. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/utils/download.py +0 -0
  47. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/aihub/utils/s3.py +0 -0
  48. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/intellif_aihub.egg-info/SOURCES.txt +0 -0
  49. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/intellif_aihub.egg-info/dependency_links.txt +0 -0
  50. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/src/intellif_aihub.egg-info/top_level.txt +0 -0
  51. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_artifact.py +0 -0
  52. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_data_warehouse.py +0 -0
  53. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_document_center.py +0 -0
  54. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_labelfree.py +0 -0
  55. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_model_center.py +0 -0
  56. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_model_training_platform.py +0 -0
  57. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_quota_schedule_management.py +0 -0
  58. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_s3.py +0 -0
  59. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_tag_resource_management.py +0 -0
  60. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_task_center.py +0 -0
  61. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_user_system.py +0 -0
  62. {intellif_aihub-0.1.12 → intellif_aihub-0.1.14}/tests/test_workflow_center.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intellif-aihub
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Intellif AI-hub SDK.
5
5
  Author-email: Platform Team <aihub@example.com>
6
6
  License-Expression: Apache-2.0
@@ -17,6 +17,7 @@ Requires-Dist: pyarrow>=21.0.0
17
17
  Requires-Dist: tqdm<5.0,>=4.66
18
18
  Requires-Dist: loguru>=0.7.3
19
19
  Requires-Dist: minio>=7.2.7
20
+ Requires-Dist: requests>=2.32.4
20
21
  Dynamic: license-file
21
22
 
22
23
  # Intellif AI-Hub SDK
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "intellif-aihub"
3
- version = "0.1.12"
3
+ version = "0.1.14"
4
4
  description = "Intellif AI-hub SDK."
5
5
  readme = {file = "README.md", content-type = "text/markdown"}
6
6
  requires-python = ">=3.9"
@@ -19,6 +19,7 @@ dependencies = [
19
19
  "tqdm>=4.66,<5.0",
20
20
  "loguru>=0.7.3",
21
21
  "minio>=7.2.7",
22
+ "requests>=2.32.4",
22
23
  ]
23
24
 
24
25
 
@@ -64,4 +65,4 @@ exclude = '''
64
65
  | build
65
66
  | dist
66
67
  )/
67
- '''
68
+ '''
@@ -0,0 +1 @@
1
+ __version__ = "0.1.14"
@@ -135,3 +135,71 @@ class FileUploadData(BaseModel):
135
135
  """文件上传数据"""
136
136
  path: str = Field(description="路径")
137
137
  url: str = Field(description="URL")
138
+
139
+
140
+ class ListDatasetReq(BaseModel):
141
+ """列表查询数据集请求(使用 dataset_management v2)"""
142
+ page_size: int = Field(20, alias="page_size", description="每页大小,默认20")
143
+ page_num: int = Field(1, alias="page_num", description="页码,从1开始")
144
+ name: Optional[str] = Field(None, description="数据集名称筛选")
145
+ tags: Optional[str] = Field(None, description="标签筛选")
146
+ create_by: Optional[int] = Field(None, alias="create_by", description="创建人筛选")
147
+ scope: Optional[str] = Field("all", description="范围筛选:created|shared|all")
148
+
149
+
150
+ class ListDatasetItem(BaseModel):
151
+ """列表数据集项"""
152
+ id: int = Field(description="数据集ID")
153
+ name: str = Field(description="数据集名称")
154
+ description: str = Field(description="数据集描述")
155
+ cover_img: str = Field(alias="cover_img", description="封面图片")
156
+ created_at: int = Field(alias="created_at", description="创建时间戳")
157
+ updated_at: int = Field(alias="update_at", description="更新时间戳")
158
+ user_id: int = Field(alias="user_id", description="创建人ID")
159
+ username: str = Field(description="创建人用户名")
160
+ tags: Optional[List[int]] = Field(None, description="标签列表")
161
+ access_user_ids: Optional[List[int]] = Field(None, alias="access_user_ids", description="有访问权限的用户ID列表")
162
+ is_private: bool = Field(alias="is_private", description="是否私有")
163
+
164
+
165
+ class ListDatasetResp(BaseModel):
166
+ """列表查询数据集响应"""
167
+ total: int = Field(description="总数")
168
+ page_size: int = Field(alias="page_size", description="每页大小")
169
+ page_num: int = Field(alias="page_num", description="当前页码")
170
+ data: List[ListDatasetItem] = Field(description="数据集列表")
171
+
172
+
173
+ class ListDatasetVersionReq(BaseModel):
174
+ """列表查询数据集版本请求(使用 dataset_management v2)"""
175
+ page_size: int = Field(10000000, alias="page_size", description="每页大小,默认10000000")
176
+ page_num: int = Field(1, alias="page_num", description="页码,从1开始")
177
+ dataset_id: Optional[int] = Field(None, alias="dataset_id", description="数据集ID筛选")
178
+ dataset_version_ids: Optional[str] = Field(None, alias="dataset_version_ids", description="数据集版本ID列表,逗号分隔")
179
+
180
+
181
+ class ListDatasetVersionItem(BaseModel):
182
+ """列表数据集版本项"""
183
+ id: int = Field(description="版本ID")
184
+ version: int = Field(description="版本号")
185
+ dataset_id: int = Field(alias="dataset_id", description="数据集ID")
186
+ upload_path: str = Field(alias="upload_path", description="上传路径")
187
+ upload_type: int = Field(alias="upload_type", description="上传类型")
188
+ parent_version_id: Optional[int] = Field(None, alias="parent_version_id", description="父版本ID")
189
+ description: Optional[str] = Field(None, description="版本描述")
190
+ status: int = Field(description="版本状态")
191
+ message: str = Field(description="状态信息")
192
+ created_at: int = Field(alias="created_at", description="创建时间戳")
193
+ user_id: int = Field(alias="user_id", description="创建人ID")
194
+ data_size: int = Field(alias="data_size", description="数据大小")
195
+ data_count: int = Field(alias="data_count", description="数据条数")
196
+ username: str = Field(description="创建人用户名")
197
+ dataset_name: str = Field(alias="dataset_name", description="数据集名称")
198
+
199
+
200
+ class ListDatasetVersionResp(BaseModel):
201
+ """列表查询数据集版本响应"""
202
+ total: int = Field(description="总数")
203
+ page_size: int = Field(alias="page_size", description="每页大小")
204
+ page_num: int = Field(alias="page_num", description="当前页码")
205
+ data: List[ListDatasetVersionItem] = Field(description="数据集版本列表")
@@ -7,6 +7,8 @@
7
7
  - **创建数据集及其版本**(支持本地上传和服务器现有文件两种方式)
8
8
  - **上传文件到对象存储**(大文件自动分片)
9
9
  - **查询数据集/数据集版本详情**
10
+ - **列表查询和搜索数据集**(支持分页和筛选)
11
+ - **列表查询数据集版本**(支持按数据集ID筛选和分页)
10
12
  - **按版本名称或ID下载数据集文件**
11
13
  """
12
14
 
@@ -34,6 +36,10 @@ from ..models.dataset_management import (
34
36
  DatasetVersionDetail,
35
37
  UploadDatasetVersionResponse,
36
38
  FileUploadData,
39
+ ListDatasetReq,
40
+ ListDatasetResp,
41
+ ListDatasetVersionReq,
42
+ ListDatasetVersionResp,
37
43
  )
38
44
  from ..models.dataset_management import DatasetVersionStatus
39
45
  from ..utils.download import dataset_download, zip_dir
@@ -270,6 +276,66 @@ class DatasetManagementService:
270
276
  raise APIError("parquet_index_path 为空")
271
277
  dataset_download(detail.parquet_index_path, local_dir, worker)
272
278
 
279
+ def list_datasets(
280
+ self,
281
+ *,
282
+ page_size: int = 20,
283
+ page_num: int = 1,
284
+ name: str | None = None,
285
+ tags: str | None = None,
286
+ create_by: int | None = None,
287
+ scope: str = "all"
288
+ ) -> ListDatasetResp:
289
+ """列表查询数据集
290
+
291
+ Args:
292
+ page_size: 每页大小,默认20
293
+ page_num: 页码,从1开始,默认1
294
+ name: 数据集名称筛选,可选
295
+ tags: 标签筛选,可选
296
+ create_by: 创建人筛选,可选
297
+ scope: 范围筛选:created|shared|all,默认all
298
+
299
+ Returns:
300
+ ListDatasetResp: 数据集列表响应,包含分页信息和数据集列表
301
+ """
302
+ payload = ListDatasetReq(
303
+ page_size=page_size,
304
+ page_num=page_num,
305
+ name=name,
306
+ tags=tags,
307
+ create_by=create_by,
308
+ scope=scope
309
+ )
310
+ return self._dataset.list_datasets(payload)
311
+
312
+ def list_dataset_versions(
313
+ self,
314
+ *,
315
+ page_size: int = 10000000,
316
+ page_num: int = 1,
317
+ dataset_id: int | None = None,
318
+ dataset_version_ids: str | None = None
319
+ ) -> ListDatasetVersionResp:
320
+ """列表查询数据集版本
321
+
322
+ Args:
323
+ page_size: 每页大小,默认10000000
324
+ page_num: 页码,从1开始,默认1
325
+ dataset_id: 数据集ID筛选,可选
326
+ dataset_version_ids: 数据集版本ID列表,逗号分隔,可选
327
+
328
+ Returns:
329
+ ListDatasetVersionResp: 数据集版本列表响应,包含分页信息和数据集版本列表
330
+ """
331
+ payload = ListDatasetVersionReq(
332
+ page_size=page_size,
333
+ page_num=page_num,
334
+ dataset_id=dataset_id,
335
+ dataset_version_ids=dataset_version_ids
336
+ )
337
+ return self._dataset_version.list_dataset_versions(payload)
338
+
273
339
 
274
340
  class _Dataset:
275
341
  def __init__(self, http: httpx.Client):
@@ -292,6 +358,15 @@ class _Dataset:
292
358
  raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
293
359
  return wrapper.data
294
360
 
361
+ def list_datasets(self, payload: ListDatasetReq) -> ListDatasetResp:
362
+ """列表查询数据集"""
363
+ params = payload.model_dump(by_alias=True, exclude_none=True)
364
+ resp = self._http.get(f"{_BASE}/datasets", params=params)
365
+ wrapper = APIWrapper[ListDatasetResp].model_validate(resp.json())
366
+ if wrapper.code != 0:
367
+ raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
368
+ return wrapper.data
369
+
295
370
 
296
371
  class _DatasetVersion:
297
372
  def __init__(self, http: httpx.Client):
@@ -333,6 +408,15 @@ class _DatasetVersion:
333
408
  raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
334
409
  return wrapper.data
335
410
 
411
+ def list_dataset_versions(self, payload: ListDatasetVersionReq) -> ListDatasetVersionResp:
412
+ """列表查询数据集版本"""
413
+ params = payload.model_dump(by_alias=True, exclude_none=True)
414
+ resp = self._http.get(f"{_BASE}/dataset-versions", params=params)
415
+ wrapper = APIWrapper[ListDatasetVersionResp].model_validate(resp.json())
416
+ if wrapper.code != 0:
417
+ raise APIError(f"backend code {wrapper.code}: {wrapper.msg}")
418
+ return wrapper.data
419
+
336
420
 
337
421
  class _Upload:
338
422
  def __init__(self, http: httpx.Client):
@@ -1,13 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
- import httpx
4
3
  import os
5
4
 
5
+ import requests
6
+
6
7
 
7
8
  def http_download_file(url: str, dst_path: str, chunk: int = 1 << 16) -> None:
8
9
  os.makedirs(os.path.dirname(dst_path), exist_ok=True)
9
- with httpx.stream("GET", url, follow_redirects=True, timeout=None) as r:
10
+ with requests.get(url, timeout=None, stream=True) as r:
10
11
  r.raise_for_status()
11
12
  with open(dst_path, "wb") as f:
12
- for block in r.iter_bytes(chunk):
13
+ for block in r.iter_content(chunk):
13
14
  f.write(block)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: intellif-aihub
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Intellif AI-hub SDK.
5
5
  Author-email: Platform Team <aihub@example.com>
6
6
  License-Expression: Apache-2.0
@@ -17,6 +17,7 @@ Requires-Dist: pyarrow>=21.0.0
17
17
  Requires-Dist: tqdm<5.0,>=4.66
18
18
  Requires-Dist: loguru>=0.7.3
19
19
  Requires-Dist: minio>=7.2.7
20
+ Requires-Dist: requests>=2.32.4
20
21
  Dynamic: license-file
21
22
 
22
23
  # Intellif AI-Hub SDK
@@ -5,3 +5,4 @@ pyarrow>=21.0.0
5
5
  tqdm<5.0,>=4.66
6
6
  loguru>=0.7.3
7
7
  minio>=7.2.7
8
+ requests>=2.32.4
@@ -0,0 +1,124 @@
1
+ from __future__ import annotations
2
+
3
+ import unittest
4
+ import uuid
5
+
6
+ from src.aihub.client import Client
7
+
8
+ BASE_URL = "http://192.168.13.160:30021"
9
+ TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ5MDY2ODUwODAsImlhdCI6MTc1MzA4NTA4MCwidWlkIjoxMH0.89bQ66BJDGoCzwxuxugRRt9acPFKEVmgqXMZX7ApnhM"
10
+
11
+
12
+ class TestDatasetManagement(unittest.TestCase):
13
+ def test_create_dataset_and_version(self):
14
+ client = Client(base_url=BASE_URL, token=TOKEN)
15
+ dataset_name = f"sdk_dataset_{uuid.uuid4().hex[:6]}"
16
+ dataset_id, dataset_version_id, version_tag = (
17
+ client.dataset_management.create_dataset_and_version(
18
+ dataset_name=dataset_name,
19
+ dataset_description="xxxxx",
20
+ is_local_upload=True,
21
+ local_file_path=r"C:\Users\admin\Desktop\hbase\images.zip",
22
+ server_file_path="",
23
+ version_description="yyyyy",
24
+ )
25
+ )
26
+ print("dataset_id:", dataset_id)
27
+ print("dataset_version_id:", dataset_version_id)
28
+ print("version_tag:", version_tag)
29
+
30
+ def test_run_download(self):
31
+ client = Client(base_url=BASE_URL, token=TOKEN)
32
+ client.dataset_management.run_download(
33
+ dataset_version_name="re/V12",
34
+ local_dir=r"C:\Users\admin\Downloads\ljn",
35
+ worker=4,
36
+ )
37
+ print("Done!")
38
+
39
+ def test_upload_dir(self):
40
+ client = Client(base_url=BASE_URL, token=TOKEN)
41
+ dataset_name = f"sdk_dataset_{uuid.uuid4().hex[:6]}"
42
+ dataset_id, dataset_version_id, version_tag = (
43
+ client.dataset_management.create_dataset_and_version(
44
+ dataset_name=dataset_name,
45
+ dataset_description="xxxxx",
46
+ is_local_upload=True,
47
+ local_file_path="./data",
48
+ server_file_path="",
49
+ version_description="yyyyy",
50
+ )
51
+ )
52
+ print("dataset_id:", dataset_id)
53
+ print("dataset_version_id:", dataset_version_id)
54
+ print("version_tag:", version_tag)
55
+
56
+ def test_list_datasets(self):
57
+ client = Client(base_url=BASE_URL, token=TOKEN)
58
+
59
+ # Test basic list with default parameters
60
+ datasets_resp = client.dataset_management.list_datasets()
61
+
62
+ print(f"Total datasets: {datasets_resp.total}")
63
+ print(f"Page size: {datasets_resp.page_size}")
64
+ print(f"Page number: {datasets_resp.page_num}")
65
+ print(f"Number of datasets in current page: {len(datasets_resp.data)}")
66
+
67
+ # Print first few datasets if any
68
+ for i, dataset in enumerate(datasets_resp.data[:3]):
69
+ print(f"Dataset {i+1}: ID={dataset.id}, Name={dataset.name}, Description={dataset.description}")
70
+
71
+ # Test with custom page size
72
+ datasets_resp_custom = client.dataset_management.list_datasets(page_size=5, page_num=1)
73
+ print(f"Custom page size test - Total: {datasets_resp_custom.total}, Page size: {datasets_resp_custom.page_size}")
74
+
75
+ # Test with name filter
76
+ if datasets_resp.data:
77
+ first_dataset_name = datasets_resp.data[0].name
78
+ datasets_resp_filtered = client.dataset_management.list_datasets(name=first_dataset_name)
79
+ print(f"Filtered by name '{first_dataset_name}': {len(datasets_resp_filtered.data)} results")
80
+
81
+ def test_list_dataset_versions(self):
82
+ client = Client(base_url=BASE_URL, token=TOKEN)
83
+
84
+ # First get list of datasets to find a valid dataset_id
85
+ datasets_resp = client.dataset_management.list_datasets(page_size=5)
86
+
87
+ if not datasets_resp.data:
88
+ print("No datasets found, creating one for testing...")
89
+ # Create a dataset for testing
90
+ dataset_name = f"test_list_versions_{uuid.uuid4().hex[:6]}"
91
+ dataset_id, _, _ = client.dataset_management.create_dataset_and_version(
92
+ dataset_name=dataset_name,
93
+ dataset_description="Test dataset for list versions",
94
+ is_local_upload=True,
95
+ local_file_path="./data",
96
+ version_description="Test version",
97
+ )
98
+ else:
99
+ dataset_id = datasets_resp.data[0].id
100
+ print(f"Using existing dataset ID: {dataset_id}")
101
+
102
+ # Test basic list without dataset_id filter (all versions)
103
+ versions_resp = client.dataset_management.list_dataset_versions()
104
+
105
+ print(f"Total dataset versions: {versions_resp.total}")
106
+ print(f"Page size: {versions_resp.page_size}")
107
+ print(f"Page number: {versions_resp.page_num}")
108
+ print(f"Number of versions in current page: {len(versions_resp.data)}")
109
+
110
+ # Print first few versions if any
111
+ for i, version in enumerate(versions_resp.data[:3]):
112
+ print(f"Version {i+1}: ID={version.id}, Dataset={version.dataset_name}, Version={version.version}, Status={version.status}")
113
+
114
+ # Test with dataset_id filter
115
+ versions_resp_filtered = client.dataset_management.list_dataset_versions(dataset_id=dataset_id)
116
+ print(f"Versions for dataset {dataset_id}: {len(versions_resp_filtered.data)} results")
117
+
118
+ if versions_resp_filtered.data:
119
+ for version in versions_resp_filtered.data:
120
+ print(f" Version: ID={version.id}, Version={version.version}, Status={version.status}")
121
+
122
+ # Test with custom page size
123
+ versions_resp_custom = client.dataset_management.list_dataset_versions(page_size=10, page_num=1)
124
+ print(f"Custom page size test - Total: {versions_resp_custom.total}, Page size: {versions_resp_custom.page_size}")
@@ -1 +0,0 @@
1
- __version__ = "0.1.12"
@@ -1,54 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import unittest
4
- import uuid
5
-
6
- from src.aihub.client import Client
7
-
8
- BASE_URL = "http://192.168.13.160:30021"
9
- TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ5MDY2ODUwODAsImlhdCI6MTc1MzA4NTA4MCwidWlkIjoxMH0.89bQ66BJDGoCzwxuxugRRt9acPFKEVmgqXMZX7ApnhM"
10
-
11
-
12
- class TestDatasetManagement(unittest.TestCase):
13
- def test_create_dataset_and_version(self):
14
- client = Client(base_url=BASE_URL, token=TOKEN)
15
- dataset_name = f"sdk_dataset_{uuid.uuid4().hex[:6]}"
16
- dataset_id, dataset_version_id, version_tag = (
17
- client.dataset_management.create_dataset_and_version(
18
- dataset_name=dataset_name,
19
- dataset_description="xxxxx",
20
- is_local_upload=True,
21
- local_file_path=r"C:\Users\admin\Desktop\hbase\images.zip",
22
- server_file_path="",
23
- version_description="yyyyy",
24
- )
25
- )
26
- print("dataset_id:", dataset_id)
27
- print("dataset_version_id:", dataset_version_id)
28
- print("version_tag:", version_tag)
29
-
30
- def test_run_download(self):
31
- client = Client(base_url=BASE_URL, token=TOKEN)
32
- client.dataset_management.run_download(
33
- dataset_version_name="re/V12",
34
- local_dir=r"C:\Users\admin\Downloads\ljn",
35
- worker=4,
36
- )
37
- print("Done!")
38
-
39
- def test_upload_dir(self):
40
- client = Client(base_url=BASE_URL, token=TOKEN)
41
- dataset_name = f"sdk_dataset_{uuid.uuid4().hex[:6]}"
42
- dataset_id, dataset_version_id, version_tag = (
43
- client.dataset_management.create_dataset_and_version(
44
- dataset_name=dataset_name,
45
- dataset_description="xxxxx",
46
- is_local_upload=True,
47
- local_file_path="./data",
48
- server_file_path="",
49
- version_description="yyyyy",
50
- )
51
- )
52
- print("dataset_id:", dataset_id)
53
- print("dataset_version_id:", dataset_version_id)
54
- print("version_tag:", version_tag)
File without changes