unitlab 2.1.9__tar.gz → 2.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {unitlab-2.1.9/src/unitlab.egg-info → unitlab-2.3.0}/PKG-INFO +11 -4
- {unitlab-2.1.9 → unitlab-2.3.0}/setup.py +2 -3
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/client.py +3 -97
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/main.py +0 -43
- {unitlab-2.1.9 → unitlab-2.3.0/src/unitlab.egg-info}/PKG-INFO +11 -4
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab.egg-info/SOURCES.txt +0 -1
- unitlab-2.1.9/src/unitlab/dataset.py +0 -333
- {unitlab-2.1.9 → unitlab-2.3.0}/LICENSE.md +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/README.md +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/setup.cfg +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/__init__.py +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/__main__.py +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/exceptions.py +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab/utils.py +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab.egg-info/dependency_links.txt +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab.egg-info/entry_points.txt +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab.egg-info/requires.txt +0 -0
- {unitlab-2.1.9 → unitlab-2.3.0}/src/unitlab.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: unitlab
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.3.0
|
4
4
|
Home-page: https://github.com/teamunitlab/unitlab-sdk
|
5
5
|
Author: Unitlab Inc.
|
6
6
|
Author-email: team@unitlab.ai
|
@@ -9,12 +9,11 @@ Keywords: unitlab-sdk
|
|
9
9
|
Classifier: Development Status :: 4 - Beta
|
10
10
|
Classifier: Intended Audience :: Developers
|
11
11
|
Classifier: License :: OSI Approved :: MIT License
|
12
|
-
Classifier: Programming Language :: Python :: 3
|
13
|
-
Classifier: Programming Language :: Python :: 3.8
|
14
12
|
Classifier: Programming Language :: Python :: 3.9
|
15
13
|
Classifier: Programming Language :: Python :: 3.10
|
16
14
|
Classifier: Programming Language :: Python :: 3.11
|
17
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
18
17
|
License-File: LICENSE.md
|
19
18
|
Requires-Dist: aiohttp
|
20
19
|
Requires-Dist: aiofiles
|
@@ -22,3 +21,11 @@ Requires-Dist: requests
|
|
22
21
|
Requires-Dist: tqdm
|
23
22
|
Requires-Dist: typer
|
24
23
|
Requires-Dist: validators
|
24
|
+
Dynamic: author
|
25
|
+
Dynamic: author-email
|
26
|
+
Dynamic: classifier
|
27
|
+
Dynamic: home-page
|
28
|
+
Dynamic: keywords
|
29
|
+
Dynamic: license
|
30
|
+
Dynamic: license-file
|
31
|
+
Dynamic: requires-dist
|
@@ -2,7 +2,7 @@ from setuptools import find_packages, setup
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name="unitlab",
|
5
|
-
version="2.
|
5
|
+
version="2.3.0",
|
6
6
|
license="MIT",
|
7
7
|
author="Unitlab Inc.",
|
8
8
|
author_email="team@unitlab.ai",
|
@@ -13,12 +13,11 @@ setup(
|
|
13
13
|
"Development Status :: 4 - Beta",
|
14
14
|
"Intended Audience :: Developers",
|
15
15
|
"License :: OSI Approved :: MIT License",
|
16
|
-
"Programming Language :: Python :: 3",
|
17
|
-
"Programming Language :: Python :: 3.8",
|
18
16
|
"Programming Language :: Python :: 3.9",
|
19
17
|
"Programming Language :: Python :: 3.10",
|
20
18
|
"Programming Language :: Python :: 3.11",
|
21
19
|
"Programming Language :: Python :: 3.12",
|
20
|
+
"Programming Language :: Python :: 3.13",
|
22
21
|
],
|
23
22
|
package_dir={"": "src"},
|
24
23
|
url="https://github.com/teamunitlab/unitlab-sdk",
|
@@ -9,9 +9,7 @@ import aiohttp
|
|
9
9
|
import requests
|
10
10
|
import tqdm
|
11
11
|
|
12
|
-
from . import
|
13
|
-
from .dataset import DatasetUploadHandler
|
14
|
-
from .utils import handle_exceptions
|
12
|
+
from .utils import get_api_url, handle_exceptions
|
15
13
|
|
16
14
|
logger = logging.getLogger(__name__)
|
17
15
|
|
@@ -49,9 +47,9 @@ class UnitlabClient:
|
|
49
47
|
:exc:`~unitlab.exceptions.AuthenticationError`: If an invalid API key is used or (when not passing the API key directly) if ``UNITLAB_API_KEY`` is not found in your environment.
|
50
48
|
"""
|
51
49
|
|
52
|
-
def __init__(self, api_key, api_url
|
50
|
+
def __init__(self, api_key, api_url=None):
|
53
51
|
self.api_key = api_key
|
54
|
-
self.api_url = api_url
|
52
|
+
self.api_url = api_url or get_api_url()
|
55
53
|
self.api_session = requests.Session()
|
56
54
|
adapter = requests.adapters.HTTPAdapter(max_retries=3)
|
57
55
|
self.api_session.mount("http://", adapter)
|
@@ -182,9 +180,6 @@ class UnitlabClient:
|
|
182
180
|
def datasets(self, pretty=0):
|
183
181
|
return self._get(f"/api/sdk/datasets/?pretty={pretty}")
|
184
182
|
|
185
|
-
def licenses(self):
|
186
|
-
return self._get("/api/sdk/licenses/")
|
187
|
-
|
188
183
|
def dataset_download(self, dataset_id, export_type):
|
189
184
|
response = self._post(
|
190
185
|
f"/api/sdk/datasets/{dataset_id}/",
|
@@ -239,92 +234,3 @@ class UnitlabClient:
|
|
239
234
|
pbar.update(await f)
|
240
235
|
|
241
236
|
asyncio.run(main())
|
242
|
-
|
243
|
-
def _finalize_dataset(self, dataset_id):
|
244
|
-
return self._post(f"/api/sdk/datasets/{dataset_id}/finalize/")
|
245
|
-
|
246
|
-
def _dataset_data_upload(
|
247
|
-
self, dataset_id, upload_handler: DatasetUploadHandler, batch_size=15
|
248
|
-
):
|
249
|
-
image_ids = upload_handler.getImgIds()
|
250
|
-
url = urllib.parse.urljoin(
|
251
|
-
self.api_url, f"/api/sdk/datasets/{dataset_id}/upload/"
|
252
|
-
)
|
253
|
-
|
254
|
-
async def main():
|
255
|
-
with tqdm.tqdm(total=len(image_ids), ncols=80) as pbar:
|
256
|
-
async with aiohttp.ClientSession(
|
257
|
-
headers=self._get_headers()
|
258
|
-
) as session:
|
259
|
-
try:
|
260
|
-
for i in range((len(image_ids) + batch_size - 1) // batch_size):
|
261
|
-
tasks = []
|
262
|
-
for image_id in image_ids[
|
263
|
-
i * batch_size : min(
|
264
|
-
(i + 1) * batch_size, len(image_ids)
|
265
|
-
)
|
266
|
-
]:
|
267
|
-
tasks.append(
|
268
|
-
upload_handler.upload_image(session, url, image_id)
|
269
|
-
)
|
270
|
-
for f in asyncio.as_completed(tasks):
|
271
|
-
try:
|
272
|
-
pbar.update(await f)
|
273
|
-
except exceptions.SubscriptionError as e:
|
274
|
-
raise e
|
275
|
-
except exceptions.SubscriptionError as e:
|
276
|
-
raise e
|
277
|
-
|
278
|
-
asyncio.run(main())
|
279
|
-
|
280
|
-
def dataset_upload(
|
281
|
-
self,
|
282
|
-
name,
|
283
|
-
annotation_type,
|
284
|
-
annotation_path,
|
285
|
-
data_path,
|
286
|
-
license_id=None,
|
287
|
-
batch_size=15,
|
288
|
-
):
|
289
|
-
upload_handler = DatasetUploadHandler(
|
290
|
-
annotation_type, annotation_path, data_path
|
291
|
-
)
|
292
|
-
dataset_id = self._post(
|
293
|
-
"/api/sdk/datasets/create/",
|
294
|
-
data={
|
295
|
-
"name": name,
|
296
|
-
"annotation_type": annotation_type,
|
297
|
-
"classes": [
|
298
|
-
{"name": category["name"], "value": category["id"]}
|
299
|
-
for category in upload_handler.categories
|
300
|
-
],
|
301
|
-
"license": license_id,
|
302
|
-
},
|
303
|
-
)["pk"]
|
304
|
-
self._dataset_data_upload(dataset_id, upload_handler, batch_size=batch_size)
|
305
|
-
self._finalize_dataset(dataset_id)
|
306
|
-
|
307
|
-
def dataset_update(self, pk, annotation_path, data_path, batch_size=15):
|
308
|
-
dataset = self._get(f"api/sdk/datasets/{pk}/")
|
309
|
-
upload_handler = DatasetUploadHandler(
|
310
|
-
dataset["annotation_type"], annotation_path, data_path
|
311
|
-
)
|
312
|
-
new_dataset = self._post(
|
313
|
-
f"/api/sdk/datasets/{pk}/update/",
|
314
|
-
data={
|
315
|
-
"classes": [
|
316
|
-
{"name": category["name"], "value": category["id"]}
|
317
|
-
for category in sorted(
|
318
|
-
upload_handler.loadCats(upload_handler.getCatIds()),
|
319
|
-
key=lambda x: x["id"],
|
320
|
-
)
|
321
|
-
]
|
322
|
-
},
|
323
|
-
)
|
324
|
-
upload_handler.original_category_referecences = {
|
325
|
-
int(k): v for k, v in new_dataset["original_category_referecences"].items()
|
326
|
-
}
|
327
|
-
self._dataset_data_upload(
|
328
|
-
new_dataset["pk"], upload_handler, batch_size=batch_size
|
329
|
-
)
|
330
|
-
self._finalize_dataset(new_dataset["pk"])
|
@@ -84,49 +84,6 @@ def dataset_list(api_key: API_KEY):
|
|
84
84
|
print(get_client(api_key).datasets(pretty=1))
|
85
85
|
|
86
86
|
|
87
|
-
@dataset_app.command(name="upload", help="Upload dataset")
|
88
|
-
def dataset_upload(
|
89
|
-
api_key: API_KEY,
|
90
|
-
name: Annotated[str, typer.Option(help="Name of the dataset")],
|
91
|
-
annotation_type: Annotated[AnnotationType, typer.Option(help="Annotation format")],
|
92
|
-
annotation_path: Annotated[Path, typer.Option(help="Path to the COCO json file")],
|
93
|
-
data_path: Annotated[
|
94
|
-
Path, typer.Option(help="Directory containing the data to be uploaded")
|
95
|
-
],
|
96
|
-
):
|
97
|
-
client = get_client(api_key)
|
98
|
-
licenses = client.licenses()
|
99
|
-
chosen_license = None
|
100
|
-
if licenses:
|
101
|
-
LicenseEnum = Enum(
|
102
|
-
"LicenseEnum",
|
103
|
-
{license["pk"]: str(idx) for idx, license in enumerate(licenses)},
|
104
|
-
)
|
105
|
-
help_prompt = ", ".join(
|
106
|
-
f"{idx}: {license['name']}" for idx, license in enumerate(licenses)
|
107
|
-
)
|
108
|
-
chosen_license = typer.prompt(f"Select license {help_prompt}", type=LicenseEnum)
|
109
|
-
client.dataset_upload(
|
110
|
-
name,
|
111
|
-
annotation_type.value,
|
112
|
-
annotation_path,
|
113
|
-
data_path,
|
114
|
-
license_id=chosen_license.name if chosen_license else None,
|
115
|
-
)
|
116
|
-
|
117
|
-
|
118
|
-
@dataset_app.command(name="update", help="Update dataset")
|
119
|
-
def dataset_update(
|
120
|
-
pk: UUID,
|
121
|
-
api_key: API_KEY,
|
122
|
-
annotation_path: Annotated[Path, typer.Option(help="Path to the COCO json file")],
|
123
|
-
data_path: Annotated[
|
124
|
-
Path, typer.Option(help="Directory containing the data to be uploaded")
|
125
|
-
],
|
126
|
-
):
|
127
|
-
get_client(api_key).dataset_update(pk, annotation_path, data_path)
|
128
|
-
|
129
|
-
|
130
87
|
@dataset_app.command(name="download", help="Download dataset")
|
131
88
|
def dataset_download(
|
132
89
|
pk: UUID,
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: unitlab
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.3.0
|
4
4
|
Home-page: https://github.com/teamunitlab/unitlab-sdk
|
5
5
|
Author: Unitlab Inc.
|
6
6
|
Author-email: team@unitlab.ai
|
@@ -9,12 +9,11 @@ Keywords: unitlab-sdk
|
|
9
9
|
Classifier: Development Status :: 4 - Beta
|
10
10
|
Classifier: Intended Audience :: Developers
|
11
11
|
Classifier: License :: OSI Approved :: MIT License
|
12
|
-
Classifier: Programming Language :: Python :: 3
|
13
|
-
Classifier: Programming Language :: Python :: 3.8
|
14
12
|
Classifier: Programming Language :: Python :: 3.9
|
15
13
|
Classifier: Programming Language :: Python :: 3.10
|
16
14
|
Classifier: Programming Language :: Python :: 3.11
|
17
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
18
17
|
License-File: LICENSE.md
|
19
18
|
Requires-Dist: aiohttp
|
20
19
|
Requires-Dist: aiofiles
|
@@ -22,3 +21,11 @@ Requires-Dist: requests
|
|
22
21
|
Requires-Dist: tqdm
|
23
22
|
Requires-Dist: typer
|
24
23
|
Requires-Dist: validators
|
24
|
+
Dynamic: author
|
25
|
+
Dynamic: author-email
|
26
|
+
Dynamic: classifier
|
27
|
+
Dynamic: home-page
|
28
|
+
Dynamic: keywords
|
29
|
+
Dynamic: license
|
30
|
+
Dynamic: license-file
|
31
|
+
Dynamic: requires-dist
|
@@ -1,333 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import copy
|
3
|
-
import itertools
|
4
|
-
import json
|
5
|
-
import logging
|
6
|
-
import os
|
7
|
-
from collections import defaultdict
|
8
|
-
|
9
|
-
import aiofiles
|
10
|
-
import aiohttp
|
11
|
-
|
12
|
-
from .exceptions import SubscriptionError
|
13
|
-
|
14
|
-
logger = logging.getLogger(__name__)
|
15
|
-
|
16
|
-
|
17
|
-
class COCO:
|
18
|
-
def __init__(self, annotation_type, annotation_path, data_path):
|
19
|
-
"""
|
20
|
-
:param annotation_type (str): one of ['img_bbox', 'img_semantic_segmentation', 'img_polygon', 'img_keypoints']
|
21
|
-
:param annotation_path (str): location of annotation file
|
22
|
-
:param data_path (str): directory containing the images
|
23
|
-
:return:
|
24
|
-
"""
|
25
|
-
self.annotation_type = annotation_type
|
26
|
-
self.annotation_path = annotation_path
|
27
|
-
self.data_path = data_path
|
28
|
-
self.anns, self.cats, self.imgs = dict(), dict(), dict()
|
29
|
-
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
|
30
|
-
self._load_dataset()
|
31
|
-
|
32
|
-
@staticmethod
|
33
|
-
def _is_array_like(obj):
|
34
|
-
return hasattr(obj, "__iter__") and hasattr(obj, "__len__")
|
35
|
-
|
36
|
-
def _validate(self):
|
37
|
-
if not os.path.isdir(self.data_path):
|
38
|
-
raise ValueError(
|
39
|
-
"Data path '{}' does not exist or is not a directory".format(
|
40
|
-
self.data_path
|
41
|
-
)
|
42
|
-
)
|
43
|
-
for required_key in ["images", "annotations", "categories"]:
|
44
|
-
if required_key not in self.dataset.keys():
|
45
|
-
raise KeyError(
|
46
|
-
"Required key '{}' not found in the COCO dataset".format(
|
47
|
-
required_key
|
48
|
-
)
|
49
|
-
)
|
50
|
-
if len(self.dataset[required_key]) == 0:
|
51
|
-
raise ValueError(
|
52
|
-
"Required key '{}' does not contain values".format(required_key)
|
53
|
-
)
|
54
|
-
|
55
|
-
def _load_dataset(self):
|
56
|
-
with open(self.annotation_path, "r") as f:
|
57
|
-
self.dataset = json.load(f)
|
58
|
-
self._validate()
|
59
|
-
self.createIndex()
|
60
|
-
|
61
|
-
def createIndex(self):
|
62
|
-
anns, cats, imgs = {}, {}, {}
|
63
|
-
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
|
64
|
-
for ann in self.dataset["annotations"]:
|
65
|
-
imgToAnns[ann["image_id"]].append(ann)
|
66
|
-
anns[ann["id"]] = ann
|
67
|
-
|
68
|
-
for img in self.dataset["images"]:
|
69
|
-
imgs[img["id"]] = img
|
70
|
-
|
71
|
-
for cat in self.dataset["categories"]:
|
72
|
-
cats[cat["id"]] = cat
|
73
|
-
|
74
|
-
for ann in self.dataset["annotations"]:
|
75
|
-
catToImgs[ann["category_id"]].append(ann["image_id"])
|
76
|
-
|
77
|
-
# create class members
|
78
|
-
self.anns = anns
|
79
|
-
self.imgToAnns = imgToAnns
|
80
|
-
self.catToImgs = catToImgs
|
81
|
-
self.imgs = imgs
|
82
|
-
self.cats = cats
|
83
|
-
self.categories = sorted(
|
84
|
-
copy.deepcopy(self.loadCats(self.getCatIds())), key=lambda x: x["id"]
|
85
|
-
)
|
86
|
-
self.classes = [cat["name"] for cat in self.categories]
|
87
|
-
self.original_category_referecences = dict()
|
88
|
-
for i, category in enumerate(self.categories):
|
89
|
-
self.original_category_referecences[category["id"]] = i
|
90
|
-
category["id"] = i
|
91
|
-
|
92
|
-
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
|
93
|
-
"""
|
94
|
-
Get ann ids that satisfy given filter conditions. default skips that filter
|
95
|
-
:param imgIds (int array) : get anns for given imgs
|
96
|
-
catIds (int array) : get anns for given cats
|
97
|
-
areaRng (float array) : get anns for given area range (e.g. [0 inf])
|
98
|
-
iscrowd (boolean) : get anns for given crowd label (False or True)
|
99
|
-
:return: ids (int array) : integer array of ann ids
|
100
|
-
"""
|
101
|
-
imgIds = imgIds if self._is_array_like(imgIds) else [imgIds]
|
102
|
-
catIds = catIds if self._is_array_like(catIds) else [catIds]
|
103
|
-
|
104
|
-
if len(imgIds) == len(catIds) == len(areaRng) == 0:
|
105
|
-
anns = self.dataset["annotations"]
|
106
|
-
else:
|
107
|
-
if not len(imgIds) == 0:
|
108
|
-
lists = [
|
109
|
-
self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns
|
110
|
-
]
|
111
|
-
anns = list(itertools.chain.from_iterable(lists))
|
112
|
-
else:
|
113
|
-
anns = self.dataset["annotations"]
|
114
|
-
anns = (
|
115
|
-
anns
|
116
|
-
if len(catIds) == 0
|
117
|
-
else [ann for ann in anns if ann["category_id"] in catIds]
|
118
|
-
)
|
119
|
-
anns = (
|
120
|
-
anns
|
121
|
-
if len(areaRng) == 0
|
122
|
-
else [
|
123
|
-
ann
|
124
|
-
for ann in anns
|
125
|
-
if ann["area"] > areaRng[0] and ann["area"] < areaRng[1]
|
126
|
-
]
|
127
|
-
)
|
128
|
-
if iscrowd:
|
129
|
-
ids = [ann["id"] for ann in anns if ann["iscrowd"] == iscrowd]
|
130
|
-
else:
|
131
|
-
ids = [ann["id"] for ann in anns]
|
132
|
-
return ids
|
133
|
-
|
134
|
-
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
|
135
|
-
"""
|
136
|
-
filtering parameters. default skips that filter.
|
137
|
-
:param catNms (str array) : get cats for given cat names
|
138
|
-
:param supNms (str array) : get cats for given supercategory names
|
139
|
-
:param catIds (int array) : get cats for given cat ids
|
140
|
-
:return: ids (int array) : integer array of cat ids
|
141
|
-
"""
|
142
|
-
catNms = catNms if self._is_array_like(catNms) else [catNms]
|
143
|
-
supNms = supNms if self._is_array_like(supNms) else [supNms]
|
144
|
-
catIds = catIds if self._is_array_like(catIds) else [catIds]
|
145
|
-
|
146
|
-
if len(catNms) == len(supNms) == len(catIds) == 0:
|
147
|
-
cats = self.dataset["categories"]
|
148
|
-
else:
|
149
|
-
cats = self.dataset["categories"]
|
150
|
-
cats = (
|
151
|
-
cats
|
152
|
-
if len(catNms) == 0
|
153
|
-
else [cat for cat in cats if cat["name"] in catNms]
|
154
|
-
)
|
155
|
-
cats = (
|
156
|
-
cats
|
157
|
-
if len(supNms) == 0
|
158
|
-
else [cat for cat in cats if cat["supercategory"] in supNms]
|
159
|
-
)
|
160
|
-
cats = (
|
161
|
-
cats
|
162
|
-
if len(catIds) == 0
|
163
|
-
else [cat for cat in cats if cat["id"] in catIds]
|
164
|
-
)
|
165
|
-
ids = [cat["id"] for cat in cats]
|
166
|
-
return ids
|
167
|
-
|
168
|
-
def getImgIds(self, imgIds=[], catIds=[]):
|
169
|
-
"""
|
170
|
-
Get img ids that satisfy given filter conditions.
|
171
|
-
:param imgIds (int array) : get imgs for given ids
|
172
|
-
:param catIds (int array) : get imgs with all given cats
|
173
|
-
:return: ids (int array) : integer array of img ids
|
174
|
-
"""
|
175
|
-
imgIds = imgIds if self._is_array_like(imgIds) else [imgIds]
|
176
|
-
catIds = catIds if self._is_array_like(catIds) else [catIds]
|
177
|
-
|
178
|
-
if len(imgIds) == len(catIds) == 0:
|
179
|
-
ids = self.imgs.keys()
|
180
|
-
else:
|
181
|
-
ids = set(imgIds)
|
182
|
-
for i, catId in enumerate(catIds):
|
183
|
-
if i == 0 and len(ids) == 0:
|
184
|
-
ids = set(self.catToImgs[catId])
|
185
|
-
else:
|
186
|
-
ids &= set(self.catToImgs[catId])
|
187
|
-
return list(ids)
|
188
|
-
|
189
|
-
def loadAnns(self, ids=[]):
|
190
|
-
"""
|
191
|
-
Load anns with the specified ids.
|
192
|
-
:param ids (int array) : integer ids specifying anns
|
193
|
-
:return: anns (object array) : loaded ann objects
|
194
|
-
"""
|
195
|
-
if self._is_array_like(ids):
|
196
|
-
return [self.anns[id] for id in ids]
|
197
|
-
elif isinstance(ids, int):
|
198
|
-
return [self.anns[ids]]
|
199
|
-
|
200
|
-
def loadCats(self, ids=[]):
|
201
|
-
"""
|
202
|
-
Load cats with the specified ids.
|
203
|
-
:param ids (int array) : integer ids specifying cats
|
204
|
-
:return: cats (object array) : loaded cat objects
|
205
|
-
"""
|
206
|
-
if self._is_array_like(ids):
|
207
|
-
return [self.cats[id] for id in ids]
|
208
|
-
elif isinstance(ids, int):
|
209
|
-
return [self.cats[ids]]
|
210
|
-
|
211
|
-
def loadImgs(self, ids=[]):
|
212
|
-
"""
|
213
|
-
Load anns with the specified ids.
|
214
|
-
:param ids (int array) : integer ids specifying img
|
215
|
-
:return: imgs (object array) : loaded img objects
|
216
|
-
"""
|
217
|
-
if self._is_array_like(ids):
|
218
|
-
return [self.imgs[id] for id in ids]
|
219
|
-
elif isinstance(ids, int):
|
220
|
-
return [self.imgs[ids]]
|
221
|
-
|
222
|
-
|
223
|
-
class DatasetUploadHandler(COCO):
|
224
|
-
def get_img_bbox_payload(self, anns):
|
225
|
-
predicted_classes = set()
|
226
|
-
bboxes = []
|
227
|
-
for ann in anns:
|
228
|
-
bbox = ann["bbox"]
|
229
|
-
bboxes.append(
|
230
|
-
{
|
231
|
-
"point": [
|
232
|
-
[bbox[0], bbox[1]],
|
233
|
-
[bbox[0] + bbox[2], bbox[1]],
|
234
|
-
[bbox[0] + bbox[2], bbox[1] + bbox[3]],
|
235
|
-
[bbox[0], bbox[1] + bbox[3]],
|
236
|
-
],
|
237
|
-
"class": self.original_category_referecences.get(
|
238
|
-
ann["category_id"]
|
239
|
-
),
|
240
|
-
"recognition": ann.get("recognition", ""),
|
241
|
-
}
|
242
|
-
)
|
243
|
-
predicted_classes.add(
|
244
|
-
self.original_category_referecences.get(ann["category_id"])
|
245
|
-
)
|
246
|
-
return json.dumps(
|
247
|
-
{
|
248
|
-
"bboxes": [bboxes],
|
249
|
-
"predicted_classes": list(predicted_classes),
|
250
|
-
"classes": self.classes,
|
251
|
-
}
|
252
|
-
)
|
253
|
-
|
254
|
-
def get_img_semantic_segmentation_payload(self, anns):
|
255
|
-
predicted_classes = set()
|
256
|
-
annotations = []
|
257
|
-
for ann in anns:
|
258
|
-
annotations.append(
|
259
|
-
{
|
260
|
-
"segmentation": ann["segmentation"],
|
261
|
-
"category_id": self.original_category_referecences.get(
|
262
|
-
ann["category_id"]
|
263
|
-
),
|
264
|
-
}
|
265
|
-
)
|
266
|
-
predicted_classes.add(
|
267
|
-
self.original_category_referecences.get(ann["category_id"])
|
268
|
-
)
|
269
|
-
return json.dumps(
|
270
|
-
{
|
271
|
-
"annotations": annotations,
|
272
|
-
"predicted_classes": list(predicted_classes),
|
273
|
-
"classes": self.classes,
|
274
|
-
}
|
275
|
-
)
|
276
|
-
|
277
|
-
def get_img_instance_segmentation_payload(self, anns):
|
278
|
-
return self.get_img_semantic_segmentation_payload(anns)
|
279
|
-
|
280
|
-
def get_img_polygon_payload(self, anns):
|
281
|
-
return self.get_img_semantic_segmentation_payload(anns)
|
282
|
-
|
283
|
-
def get_img_line_payload(self, anns):
|
284
|
-
return self.get_img_semantic_segmentation_payload(anns)
|
285
|
-
|
286
|
-
def get_img_point_payload(self, anns):
|
287
|
-
return self.get_img_semantic_segmentation_payload(anns)
|
288
|
-
|
289
|
-
def get_payload(self, img_id):
|
290
|
-
image = self.imgs[img_id]
|
291
|
-
ann_ids = self.getAnnIds(imgIds=img_id)
|
292
|
-
anns = self.loadAnns(ann_ids)
|
293
|
-
if not os.path.isfile(os.path.join(self.data_path, image["file_name"])):
|
294
|
-
logger.warning(
|
295
|
-
"Image file not found: {}".format(
|
296
|
-
os.path.join(self.data_path, image["file_name"])
|
297
|
-
)
|
298
|
-
)
|
299
|
-
return
|
300
|
-
if len(anns) == 0:
|
301
|
-
logger.warning("No annotations found for image: {}".format(img_id))
|
302
|
-
return
|
303
|
-
return getattr(self, f"get_{self.annotation_type}_payload")(anns)
|
304
|
-
|
305
|
-
async def upload_image(self, session, url, image_id):
|
306
|
-
image = self.loadImgs(image_id)[0]
|
307
|
-
file_name = image["file_name"]
|
308
|
-
payload = self.get_payload(image_id)
|
309
|
-
if payload:
|
310
|
-
async with aiofiles.open(
|
311
|
-
os.path.join(self.data_path, file_name), "rb"
|
312
|
-
) as f:
|
313
|
-
form_data = aiohttp.FormData()
|
314
|
-
form_data.add_field("file", await f.read(), filename=file_name)
|
315
|
-
form_data.add_field("result", self.get_payload(image_id))
|
316
|
-
try:
|
317
|
-
# rate limiting
|
318
|
-
await asyncio.sleep(0.1)
|
319
|
-
async with session.post(url, data=form_data) as response:
|
320
|
-
if response.status == 403:
|
321
|
-
raise SubscriptionError(
|
322
|
-
"You have reached the maximum number of datasources for your subscription."
|
323
|
-
)
|
324
|
-
elif response.status == 400:
|
325
|
-
logger.error(await response.text())
|
326
|
-
return 0
|
327
|
-
response.raise_for_status()
|
328
|
-
return 1
|
329
|
-
except SubscriptionError as e:
|
330
|
-
raise e
|
331
|
-
except Exception as e:
|
332
|
-
logger.error(f"Error uploading file {file_name} - {e}")
|
333
|
-
return 0
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|