unitlab 2.1.2__tar.gz → 2.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {unitlab-2.1.2/src/unitlab.egg-info → unitlab-2.1.4}/PKG-INFO +1 -1
- {unitlab-2.1.2 → unitlab-2.1.4}/setup.py +1 -1
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/client.py +57 -31
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/dataset.py +15 -35
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/main.py +16 -5
- {unitlab-2.1.2 → unitlab-2.1.4/src/unitlab.egg-info}/PKG-INFO +1 -1
- {unitlab-2.1.2 → unitlab-2.1.4}/LICENSE.md +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/README.md +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/setup.cfg +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/__init__.py +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/__main__.py +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/exceptions.py +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab/utils.py +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab.egg-info/SOURCES.txt +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab.egg-info/dependency_links.txt +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab.egg-info/entry_points.txt +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab.egg-info/requires.txt +0 -0
- {unitlab-2.1.2 → unitlab-2.1.4}/src/unitlab.egg-info/top_level.txt +0 -0
@@ -244,38 +244,13 @@ class UnitlabClient:
|
|
244
244
|
|
245
245
|
asyncio.run(main())
|
246
246
|
|
247
|
-
def
|
248
|
-
response = self._post(
|
249
|
-
"/api/sdk/datasets/create/",
|
250
|
-
data={
|
251
|
-
"name": name,
|
252
|
-
"annotation_type": annotation_type,
|
253
|
-
"classes": [
|
254
|
-
{"name": category["name"], "value": category["id"]}
|
255
|
-
for category in categories
|
256
|
-
],
|
257
|
-
"license": license_id,
|
258
|
-
},
|
259
|
-
)
|
260
|
-
return response["pk"]
|
261
|
-
|
262
|
-
def finalize_dataset(self, dataset_id):
|
247
|
+
def _finalize_dataset(self, dataset_id):
|
263
248
|
return self._post(f"/api/sdk/datasets/{dataset_id}/finalize/")
|
264
249
|
|
265
|
-
def
|
266
|
-
self,
|
267
|
-
name,
|
268
|
-
annotation_type,
|
269
|
-
annotation_path,
|
270
|
-
data_path,
|
271
|
-
license_id=None,
|
272
|
-
batch_size=15,
|
250
|
+
def _dataset_data_upload(
|
251
|
+
self, dataset_id, upload_handler: DatasetUploadHandler, batch_size=15
|
273
252
|
):
|
274
|
-
|
275
|
-
dataset_id = self.create_dataset(
|
276
|
-
name, annotation_type, handler.categories, license_id=license_id
|
277
|
-
)
|
278
|
-
image_ids = handler.getImgIds()
|
253
|
+
image_ids = upload_handler.getImgIds()
|
279
254
|
url = urllib.parse.urljoin(
|
280
255
|
self.api_url, f"/api/sdk/datasets/{dataset_id}/upload/"
|
281
256
|
)
|
@@ -294,7 +269,7 @@ class UnitlabClient:
|
|
294
269
|
)
|
295
270
|
]:
|
296
271
|
tasks.append(
|
297
|
-
|
272
|
+
upload_handler.upload_image(session, url, image_id)
|
298
273
|
)
|
299
274
|
for f in asyncio.as_completed(tasks):
|
300
275
|
try:
|
@@ -305,4 +280,55 @@ class UnitlabClient:
|
|
305
280
|
raise e
|
306
281
|
|
307
282
|
asyncio.run(main())
|
308
|
-
|
283
|
+
|
284
|
+
def dataset_upload(
|
285
|
+
self,
|
286
|
+
name,
|
287
|
+
annotation_type,
|
288
|
+
annotation_path,
|
289
|
+
data_path,
|
290
|
+
license_id=None,
|
291
|
+
batch_size=15,
|
292
|
+
):
|
293
|
+
upload_handler = DatasetUploadHandler(
|
294
|
+
annotation_type, annotation_path, data_path
|
295
|
+
)
|
296
|
+
dataset_id = self._post(
|
297
|
+
"/api/sdk/datasets/create/",
|
298
|
+
data={
|
299
|
+
"name": name,
|
300
|
+
"annotation_type": annotation_type,
|
301
|
+
"classes": [
|
302
|
+
{"name": category["name"], "value": category["id"]}
|
303
|
+
for category in upload_handler.categories
|
304
|
+
],
|
305
|
+
"license": license_id,
|
306
|
+
},
|
307
|
+
)["pk"]
|
308
|
+
self._dataset_data_upload(dataset_id, upload_handler, batch_size=batch_size)
|
309
|
+
self._finalize_dataset(dataset_id)
|
310
|
+
|
311
|
+
def dataset_update(self, pk, annotation_path, data_path, batch_size=15):
|
312
|
+
dataset = self._get(f"api/sdk/datasets/{pk}/")
|
313
|
+
upload_handler = DatasetUploadHandler(
|
314
|
+
dataset["annotation_type"], annotation_path, data_path
|
315
|
+
)
|
316
|
+
new_dataset = self._post(
|
317
|
+
f"/api/sdk/datasets/{pk}/update/",
|
318
|
+
data={
|
319
|
+
"classes": [
|
320
|
+
{"name": category["name"], "value": category["id"]}
|
321
|
+
for category in sorted(
|
322
|
+
upload_handler.loadCats(upload_handler.getCatIds()),
|
323
|
+
key=lambda x: x["id"],
|
324
|
+
)
|
325
|
+
]
|
326
|
+
},
|
327
|
+
)
|
328
|
+
upload_handler.original_category_referecences = {
|
329
|
+
int(k): v for k, v in new_dataset["original_category_referecences"].items()
|
330
|
+
}
|
331
|
+
self._dataset_data_upload(
|
332
|
+
new_dataset["pk"], upload_handler, batch_size=batch_size
|
333
|
+
)
|
334
|
+
self._finalize_dataset(new_dataset["pk"])
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
import copy
|
2
3
|
import itertools
|
3
4
|
import json
|
4
5
|
import logging
|
@@ -39,18 +40,6 @@ class COCO:
|
|
39
40
|
self.data_path
|
40
41
|
)
|
41
42
|
)
|
42
|
-
if self.annotation_type not in [
|
43
|
-
"img_bbox",
|
44
|
-
"img_semantic_segmentation",
|
45
|
-
"img_instance_segmentation",
|
46
|
-
"img_polygon",
|
47
|
-
"img_keypoints",
|
48
|
-
]:
|
49
|
-
raise ValueError(
|
50
|
-
"Invalid annotation type '{}'. Supported types are: ['img_bbox', 'img_semantic_segmentation', 'img_polygon', 'img_keypoints']".format(
|
51
|
-
self.annotation_type
|
52
|
-
)
|
53
|
-
)
|
54
43
|
for required_key in ["images", "annotations", "categories"]:
|
55
44
|
if required_key not in self.dataset.keys():
|
56
45
|
raise KeyError(
|
@@ -91,7 +80,9 @@ class COCO:
|
|
91
80
|
self.catToImgs = catToImgs
|
92
81
|
self.imgs = imgs
|
93
82
|
self.cats = cats
|
94
|
-
self.categories = sorted(
|
83
|
+
self.categories = sorted(
|
84
|
+
copy.deepcopy(self.loadCats(self.getCatIds())), key=lambda x: x["id"]
|
85
|
+
)
|
95
86
|
self.classes = [cat["name"] for cat in self.categories]
|
96
87
|
self.original_category_referecences = dict()
|
97
88
|
for i, category in enumerate(self.categories):
|
@@ -284,30 +275,16 @@ class DatasetUploadHandler(COCO):
|
|
284
275
|
)
|
285
276
|
|
286
277
|
def get_img_instance_segmentation_payload(self, anns):
|
287
|
-
|
288
|
-
annotations = []
|
289
|
-
for ann in anns:
|
290
|
-
annotations.append(
|
291
|
-
{
|
292
|
-
"segmentation": ann["segmentation"],
|
293
|
-
"category_id": self.original_category_referecences.get(
|
294
|
-
ann["category_id"]
|
295
|
-
),
|
296
|
-
}
|
297
|
-
)
|
298
|
-
predicted_classes.add(
|
299
|
-
self.original_category_referecences.get(ann["category_id"])
|
300
|
-
)
|
301
|
-
return json.dumps(
|
302
|
-
{
|
303
|
-
"annotations": annotations,
|
304
|
-
"predicted_classes": list(predicted_classes),
|
305
|
-
"classes": self.classes,
|
306
|
-
}
|
307
|
-
)
|
278
|
+
return self.get_img_semantic_segmentation_payload(anns)
|
308
279
|
|
309
280
|
def get_img_polygon_payload(self, anns):
|
310
|
-
|
281
|
+
return self.get_img_semantic_segmentation_payload(anns)
|
282
|
+
|
283
|
+
def get_img_line_payload(self, anns):
|
284
|
+
return self.get_img_semantic_segmentation_payload(anns)
|
285
|
+
|
286
|
+
def get_img_point_payload(self, anns):
|
287
|
+
return self.get_img_semantic_segmentation_payload(anns)
|
311
288
|
|
312
289
|
def get_img_skeleton_payload(self, anns):
|
313
290
|
logger.warning("Not implemented yet")
|
@@ -347,6 +324,9 @@ class DatasetUploadHandler(COCO):
|
|
347
324
|
raise SubscriptionError(
|
348
325
|
"You have reached the maximum number of datasources for your subscription."
|
349
326
|
)
|
327
|
+
elif response.status == 400:
|
328
|
+
logger.error(await response.text())
|
329
|
+
return 0
|
350
330
|
response.raise_for_status()
|
351
331
|
return 1
|
352
332
|
except SubscriptionError as e:
|
@@ -32,9 +32,11 @@ class DownloadType(str, Enum):
|
|
32
32
|
|
33
33
|
class AnnotationType(str, Enum):
|
34
34
|
IMG_BBOX = "img_bbox"
|
35
|
-
IMG_POLYGON = "img_polygon"
|
36
35
|
IMG_SEMANTIC_SEGMENTATION = "img_semantic_segmentation"
|
37
36
|
IMG_INSTANCE_SEGMENTATION = "img_instance_segmentation"
|
37
|
+
IMG_POLYGON = "img_polygon"
|
38
|
+
IMG_LINE = "img_line"
|
39
|
+
IMG_POINT = "img_point"
|
38
40
|
IMG_SKELETON = "img_skeleton"
|
39
41
|
|
40
42
|
|
@@ -104,10 +106,7 @@ def dataset_upload(
|
|
104
106
|
help_prompt = ", ".join(
|
105
107
|
f"{idx}: {license['name']}" for idx, license in enumerate(licenses)
|
106
108
|
)
|
107
|
-
chosen_license = typer.prompt(
|
108
|
-
f"Select license {help_prompt}",
|
109
|
-
type=LicenseEnum,
|
110
|
-
)
|
109
|
+
chosen_license = typer.prompt(f"Select license {help_prompt}", type=LicenseEnum)
|
111
110
|
client.dataset_upload(
|
112
111
|
name,
|
113
112
|
annotation_type.value,
|
@@ -117,6 +116,18 @@ def dataset_upload(
|
|
117
116
|
)
|
118
117
|
|
119
118
|
|
119
|
+
@dataset_app.command(name="update", help="Update dataset")
|
120
|
+
def dataset_update(
|
121
|
+
pk: UUID,
|
122
|
+
api_key: API_KEY,
|
123
|
+
annotation_path: Annotated[Path, typer.Option(help="Path to the COCO json file")],
|
124
|
+
data_path: Annotated[
|
125
|
+
Path, typer.Option(help="Directory containing the data to be uploaded")
|
126
|
+
],
|
127
|
+
):
|
128
|
+
get_client(api_key).dataset_update(pk, annotation_path, data_path)
|
129
|
+
|
130
|
+
|
120
131
|
@dataset_app.command(name="download", help="Download dataset")
|
121
132
|
def dataset_download(
|
122
133
|
pk: UUID,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|