supervisely 6.73.461__py3-none-any.whl → 6.73.470__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/dataset_api.py +74 -12
- supervisely/app/widgets/__init__.py +1 -0
- supervisely/app/widgets/fast_table/fast_table.py +164 -74
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +523 -0
- supervisely/app/widgets/heatmap/script.js +378 -0
- supervisely/app/widgets/heatmap/style.css +227 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +10 -2
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
- supervisely/nn/inference/inference.py +266 -9
- supervisely/nn/inference/inference_request.py +3 -9
- supervisely/nn/inference/predict_app/gui/input_selector.py +53 -27
- supervisely/nn/inference/session.py +43 -35
- supervisely/video/sampling.py +41 -21
- supervisely/video/video.py +25 -10
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/METADATA +1 -1
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/RECORD +25 -20
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/LICENSE +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/WHEEL +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# isort: skip_file
|
|
2
|
+
|
|
1
3
|
import copy
|
|
2
4
|
import io
|
|
3
5
|
|
|
@@ -54,9 +56,8 @@ class PackerUnpacker:
|
|
|
54
56
|
|
|
55
57
|
@staticmethod
|
|
56
58
|
def pandas_unpacker(data: pd.DataFrame):
|
|
57
|
-
data
|
|
58
|
-
#
|
|
59
|
-
|
|
59
|
+
# Keep None/NaN values in source data, don't replace them
|
|
60
|
+
# They will be converted to "" only when sending to frontend
|
|
60
61
|
unpacked_data = {
|
|
61
62
|
"columns": data.columns.to_list(),
|
|
62
63
|
"data": data.values.tolist(),
|
|
@@ -169,9 +170,35 @@ class Table(Widget):
|
|
|
169
170
|
|
|
170
171
|
super().__init__(widget_id=widget_id, file_path=__file__)
|
|
171
172
|
|
|
173
|
+
def _prepare_data_for_frontend(self, data_dict):
|
|
174
|
+
"""Convert None and NaN values to empty strings for frontend display.
|
|
175
|
+
This preserves the original None/NaN values in _parsed_data.
|
|
176
|
+
"""
|
|
177
|
+
import math
|
|
178
|
+
|
|
179
|
+
display_data = copy.deepcopy(data_dict)
|
|
180
|
+
|
|
181
|
+
# Convert None/NaN in data rows
|
|
182
|
+
for row in display_data.get("data", []):
|
|
183
|
+
for i in range(len(row)):
|
|
184
|
+
value = row[i]
|
|
185
|
+
# Check for None or NaN (NaN is a float that doesn't equal itself)
|
|
186
|
+
if value is None or (isinstance(value, float) and math.isnan(value)):
|
|
187
|
+
row[i] = ""
|
|
188
|
+
|
|
189
|
+
# Convert None/NaN in summary row if present
|
|
190
|
+
if "summaryRow" in display_data and display_data["summaryRow"] is not None:
|
|
191
|
+
summary_row = display_data["summaryRow"]
|
|
192
|
+
for i in range(len(summary_row)):
|
|
193
|
+
value = summary_row[i]
|
|
194
|
+
if value is None or (isinstance(value, float) and math.isnan(value)):
|
|
195
|
+
summary_row[i] = ""
|
|
196
|
+
|
|
197
|
+
return display_data
|
|
198
|
+
|
|
172
199
|
def get_json_data(self):
|
|
173
200
|
return {
|
|
174
|
-
"table_data": self._parsed_data,
|
|
201
|
+
"table_data": self._prepare_data_for_frontend(self._parsed_data),
|
|
175
202
|
"table_options": {
|
|
176
203
|
"perPage": self._per_page,
|
|
177
204
|
"pageSizes": self._page_sizes,
|
|
@@ -255,13 +282,17 @@ class Table(Widget):
|
|
|
255
282
|
|
|
256
283
|
def read_json(self, value: dict) -> None:
|
|
257
284
|
self._update_table_data(input_data=value)
|
|
258
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
285
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
286
|
+
self._parsed_data
|
|
287
|
+
)
|
|
259
288
|
DataJson().send_changes()
|
|
260
289
|
self.clear_selection()
|
|
261
290
|
|
|
262
291
|
def read_pandas(self, value: pd.DataFrame) -> None:
|
|
263
292
|
self._update_table_data(input_data=value)
|
|
264
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
293
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
294
|
+
self._parsed_data
|
|
295
|
+
)
|
|
265
296
|
DataJson().send_changes()
|
|
266
297
|
self.clear_selection()
|
|
267
298
|
|
|
@@ -272,7 +303,9 @@ class Table(Widget):
|
|
|
272
303
|
index = len(table_data) if index > len(table_data) or index < 0 else index
|
|
273
304
|
|
|
274
305
|
self._parsed_data["data"].insert(index, data)
|
|
275
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
306
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
307
|
+
self._parsed_data
|
|
308
|
+
)
|
|
276
309
|
DataJson().send_changes()
|
|
277
310
|
|
|
278
311
|
def pop_row(self, index=-1):
|
|
@@ -284,7 +317,9 @@ class Table(Widget):
|
|
|
284
317
|
|
|
285
318
|
if len(self._parsed_data["data"]) != 0:
|
|
286
319
|
popped_row = self._parsed_data["data"].pop(index)
|
|
287
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
320
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
321
|
+
self._parsed_data
|
|
322
|
+
)
|
|
288
323
|
DataJson().send_changes()
|
|
289
324
|
return popped_row
|
|
290
325
|
|
|
@@ -382,11 +417,27 @@ class Table(Widget):
|
|
|
382
417
|
StateJson()[self.widget_id]["selected_row"] = {}
|
|
383
418
|
StateJson().send_changes()
|
|
384
419
|
|
|
420
|
+
@staticmethod
|
|
421
|
+
def _values_equal(val1, val2):
|
|
422
|
+
"""Compare two values, handling NaN specially."""
|
|
423
|
+
import math
|
|
424
|
+
|
|
425
|
+
# Check if both are NaN
|
|
426
|
+
is_nan1 = isinstance(val1, float) and math.isnan(val1)
|
|
427
|
+
is_nan2 = isinstance(val2, float) and math.isnan(val2)
|
|
428
|
+
if is_nan1 and is_nan2:
|
|
429
|
+
return True
|
|
430
|
+
# Check if both are None
|
|
431
|
+
if val1 is None and val2 is None:
|
|
432
|
+
return True
|
|
433
|
+
# Regular comparison
|
|
434
|
+
return val1 == val2
|
|
435
|
+
|
|
385
436
|
def delete_row(self, key_column_name, key_cell_value):
|
|
386
437
|
col_index = self._parsed_data["columns"].index(key_column_name)
|
|
387
438
|
row_indices = []
|
|
388
439
|
for idx, row in enumerate(self._parsed_data["data"]):
|
|
389
|
-
if row[col_index]
|
|
440
|
+
if self._values_equal(row[col_index], key_cell_value):
|
|
390
441
|
row_indices.append(idx)
|
|
391
442
|
if len(row_indices) == 0:
|
|
392
443
|
raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
|
|
@@ -400,7 +451,7 @@ class Table(Widget):
|
|
|
400
451
|
key_col_index = self._parsed_data["columns"].index(key_column_name)
|
|
401
452
|
row_indices = []
|
|
402
453
|
for idx, row in enumerate(self._parsed_data["data"]):
|
|
403
|
-
if row[key_col_index]
|
|
454
|
+
if self._values_equal(row[key_col_index], key_cell_value):
|
|
404
455
|
row_indices.append(idx)
|
|
405
456
|
if len(row_indices) == 0:
|
|
406
457
|
raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
|
|
@@ -411,20 +462,24 @@ class Table(Widget):
|
|
|
411
462
|
|
|
412
463
|
col_index = self._parsed_data["columns"].index(column_name)
|
|
413
464
|
self._parsed_data["data"][row_indices[0]][col_index] = new_value
|
|
414
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
465
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
466
|
+
self._parsed_data
|
|
467
|
+
)
|
|
415
468
|
DataJson().send_changes()
|
|
416
469
|
|
|
417
470
|
def update_matching_cells(self, key_column_name, key_cell_value, column_name, new_value):
|
|
418
471
|
key_col_index = self._parsed_data["columns"].index(key_column_name)
|
|
419
472
|
row_indices = []
|
|
420
473
|
for idx, row in enumerate(self._parsed_data["data"]):
|
|
421
|
-
if row[key_col_index]
|
|
474
|
+
if self._values_equal(row[key_col_index], key_cell_value):
|
|
422
475
|
row_indices.append(idx)
|
|
423
476
|
|
|
424
477
|
col_index = self._parsed_data["columns"].index(column_name)
|
|
425
478
|
for row_idx in row_indices:
|
|
426
479
|
self._parsed_data["data"][row_idx][col_index] = new_value
|
|
427
|
-
DataJson()[self.widget_id]["table_data"] = self.
|
|
480
|
+
DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
|
|
481
|
+
self._parsed_data
|
|
482
|
+
)
|
|
428
483
|
DataJson().send_changes()
|
|
429
484
|
|
|
430
485
|
def sort(self, column_id: int = None, direction: Optional[Literal["asc", "desc"]] = None):
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import uuid
|
|
2
3
|
from typing import Dict, List, Optional
|
|
3
4
|
|
|
4
5
|
import supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_helper as helpers
|
|
5
6
|
import supervisely.io.fs as fs
|
|
6
|
-
from supervisely import PointcloudAnnotation, PointcloudObject
|
|
7
|
+
from supervisely import KeyIdMap, PointcloudAnnotation, PointcloudObject
|
|
7
8
|
from supervisely._utils import is_development
|
|
8
9
|
from supervisely.annotation.obj_class import ObjClass
|
|
9
10
|
from supervisely.annotation.tag_meta import TagMeta, TagValueType
|
|
@@ -30,19 +31,6 @@ from supervisely.sly_logger import logger
|
|
|
30
31
|
class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
31
32
|
"""Converter for NuScenes pointcloud format."""
|
|
32
33
|
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
input_data: str,
|
|
36
|
-
labeling_interface: str,
|
|
37
|
-
upload_as_links: bool,
|
|
38
|
-
remote_files_map: Optional[Dict[str, str]] = None,
|
|
39
|
-
):
|
|
40
|
-
super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
|
|
41
|
-
self._nuscenes = None
|
|
42
|
-
|
|
43
|
-
def __str__(self) -> str:
|
|
44
|
-
return AvailablePointcloudConverters.NUSCENES
|
|
45
|
-
|
|
46
34
|
def to_supervisely(
|
|
47
35
|
self,
|
|
48
36
|
scene_sample: helpers.Sample,
|
|
@@ -69,14 +57,21 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
69
57
|
return PointcloudAnnotation(PointcloudObjectCollection(objs), figures)
|
|
70
58
|
|
|
71
59
|
def upload_dataset(self, api: Api, dataset_id: int, batch_size: int = 1, log_progress=True):
|
|
72
|
-
nuscenes =
|
|
60
|
+
from nuscenes.nuscenes import NuScenes # pylint: disable=import-error
|
|
61
|
+
|
|
62
|
+
nuscenes: NuScenes = self._nuscenes
|
|
63
|
+
|
|
64
|
+
key_id_map = KeyIdMap()
|
|
73
65
|
|
|
74
66
|
tag_metas = [TagMeta(attr["name"], TagValueType.NONE) for attr in nuscenes.attribute]
|
|
75
67
|
obj_classes = []
|
|
68
|
+
classes_token_map = {}
|
|
76
69
|
for category in nuscenes.category:
|
|
77
70
|
color = nuscenes.colormap[category["name"]]
|
|
78
71
|
description = helpers.trim_description(category["description"])
|
|
79
72
|
obj_classes.append(ObjClass(category["name"], Cuboid3d, color, description=description))
|
|
73
|
+
classes_token_map[category["token"]] = category["name"]
|
|
74
|
+
self._custom_data["classes_token_map"] = classes_token_map
|
|
80
75
|
|
|
81
76
|
self._meta = ProjectMeta(obj_classes, tag_metas)
|
|
82
77
|
meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
|
|
@@ -108,6 +103,7 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
108
103
|
else:
|
|
109
104
|
progress_cb = None
|
|
110
105
|
|
|
106
|
+
self._custom_data["frame_token_map"] = {}
|
|
111
107
|
for scene in nuscenes.scene:
|
|
112
108
|
current_dataset_id = scene_name_to_dataset[scene["name"]].id
|
|
113
109
|
|
|
@@ -116,8 +112,10 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
116
112
|
|
|
117
113
|
# * Extract scene's samples
|
|
118
114
|
scene_samples: List[helpers.Sample] = []
|
|
115
|
+
frame_token_map = {}
|
|
119
116
|
for i in range(scene["nbr_samples"]):
|
|
120
117
|
sample = nuscenes.get("sample", sample_token)
|
|
118
|
+
frame_token_map[sample["token"]] = i
|
|
121
119
|
lidar_path, boxes, _ = nuscenes.get_sample_data(sample["data"]["LIDAR_TOP"])
|
|
122
120
|
if not os.path.exists(lidar_path):
|
|
123
121
|
logger.warning(f'Scene "{scene["name"]}" has no LIDAR data.')
|
|
@@ -137,9 +135,11 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
137
135
|
]
|
|
138
136
|
visibility = nuscenes.get("visibility", ann["visibility_token"])["level"]
|
|
139
137
|
|
|
138
|
+
ann_uuid = uuid.UUID(ann["token"])
|
|
140
139
|
ann = helpers.AnnotationObject(
|
|
141
140
|
name=name,
|
|
142
141
|
bbox=box,
|
|
142
|
+
token=ann_uuid,
|
|
143
143
|
instance_token=current_instance_token,
|
|
144
144
|
parent_token=parent_token,
|
|
145
145
|
category=category,
|
|
@@ -162,6 +162,7 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
162
162
|
]
|
|
163
163
|
scene_samples.append(helpers.Sample(timestamp, lidar_path, anns, camera_data))
|
|
164
164
|
sample_token = sample["next"]
|
|
165
|
+
self._custom_data["frame_token_map"][current_dataset_id] = frame_token_map
|
|
165
166
|
|
|
166
167
|
# * Convert and upload pointclouds w/ annotations
|
|
167
168
|
for idx, sample in enumerate(scene_samples):
|
|
@@ -182,7 +183,7 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
182
183
|
pcd_id = info.id
|
|
183
184
|
# * Upload pointcloud annotation
|
|
184
185
|
try:
|
|
185
|
-
api.pointcloud.annotation.append(pcd_id, pcd_ann)
|
|
186
|
+
api.pointcloud.annotation.append(pcd_id, pcd_ann, key_id_map)
|
|
186
187
|
except Exception as e:
|
|
187
188
|
error_msg = getattr(getattr(e, "response", e), "text", str(e))
|
|
188
189
|
logger.warning(
|
|
@@ -213,6 +214,16 @@ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
|
|
|
213
214
|
|
|
214
215
|
logger.info(f"Dataset ID:{current_dataset_id} has been successfully uploaded.")
|
|
215
216
|
|
|
217
|
+
key_id_map = key_id_map.to_dict()
|
|
218
|
+
key_id_map.pop("tags")
|
|
219
|
+
key_id_map.pop("videos")
|
|
220
|
+
self._custom_data["key_id_map"] = key_id_map
|
|
221
|
+
|
|
222
|
+
project_id = dataset_info.project_id
|
|
223
|
+
current_custom_data = api.project.get_custom_data(project_id)
|
|
224
|
+
current_custom_data.update(self._custom_data)
|
|
225
|
+
api.project.update_custom_data(project_id, current_custom_data)
|
|
226
|
+
|
|
216
227
|
if log_progress:
|
|
217
228
|
if is_development():
|
|
218
229
|
progress.close()
|
|
@@ -1,4 +1,7 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import os
|
|
4
|
+
import uuid
|
|
2
5
|
from os import path as osp
|
|
3
6
|
from pathlib import Path
|
|
4
7
|
from typing import Dict, List, Optional
|
|
@@ -37,19 +40,14 @@ from supervisely.pointcloud_annotation.pointcloud_figure import PointcloudFigure
|
|
|
37
40
|
from supervisely.project.project_meta import ProjectMeta
|
|
38
41
|
from supervisely.sly_logger import logger
|
|
39
42
|
from supervisely.tiny_timer import TinyTimer
|
|
43
|
+
from supervisely.video_annotation.key_id_map import KeyIdMap
|
|
40
44
|
|
|
41
45
|
|
|
42
46
|
class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
43
47
|
"""Converter for NuScenes pointcloud episodes format."""
|
|
44
48
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
input_data: str,
|
|
48
|
-
labeling_interface: str,
|
|
49
|
-
upload_as_links: bool,
|
|
50
|
-
remote_files_map: Optional[Dict[str, str]] = None,
|
|
51
|
-
):
|
|
52
|
-
super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
|
|
49
|
+
_nuscenes: "NuScenes" = None # type: ignore
|
|
50
|
+
_custom_data: Dict = {}
|
|
53
51
|
|
|
54
52
|
def __str__(self) -> str:
|
|
55
53
|
return AvailablePointcloudConverters.NUSCENES
|
|
@@ -61,8 +59,10 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
61
59
|
logger.warning("Please, run 'pip install nuscenes-devkit' to import NuScenes data.")
|
|
62
60
|
return False
|
|
63
61
|
|
|
64
|
-
|
|
65
|
-
|
|
62
|
+
table_json_filenames = [f"{name}.json" for name in helpers.TABLE_NAMES]
|
|
63
|
+
|
|
64
|
+
def _contains_tables(dir_path: str) -> bool:
|
|
65
|
+
return all(fs.file_exists(osp.join(dir_path, table)) for table in table_json_filenames)
|
|
66
66
|
|
|
67
67
|
def _filter_fn(path):
|
|
68
68
|
has_tables = False
|
|
@@ -89,11 +89,13 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
89
89
|
logger.debug(f"Failed to initialize NuScenes: {e}")
|
|
90
90
|
return False
|
|
91
91
|
|
|
92
|
+
self._custom_data["nuscenes_version"] = version
|
|
93
|
+
self._custom_data["dataroot"] = input_path
|
|
92
94
|
return True
|
|
93
95
|
|
|
94
96
|
def to_supervisely(
|
|
95
97
|
self,
|
|
96
|
-
scene_samples:
|
|
98
|
+
scene_samples: Dict[str, helpers.Sample],
|
|
97
99
|
meta: ProjectMeta,
|
|
98
100
|
renamed_classes: dict = {},
|
|
99
101
|
renamed_tags: dict = {},
|
|
@@ -101,9 +103,13 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
101
103
|
token_to_obj = {}
|
|
102
104
|
frames = []
|
|
103
105
|
tags = []
|
|
104
|
-
|
|
106
|
+
frame_idx_to_scene_sample_token = {}
|
|
107
|
+
if "frame_token_map" not in self._custom_data:
|
|
108
|
+
self._custom_data["frame_token_map"] = {}
|
|
109
|
+
for sample_i, (token, sample) in enumerate(scene_samples.items()):
|
|
105
110
|
figures = []
|
|
106
111
|
for obj in sample.anns:
|
|
112
|
+
ann_token = uuid.UUID(obj.token)
|
|
107
113
|
instance_token = obj.instance_token
|
|
108
114
|
class_name = obj.category
|
|
109
115
|
parent_obj_token = obj.parent_token
|
|
@@ -113,7 +119,9 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
113
119
|
obj_class_name = renamed_classes.get(class_name, class_name)
|
|
114
120
|
obj_class = meta.get_obj_class(obj_class_name)
|
|
115
121
|
obj_tags = None # ! TODO: fix tags
|
|
116
|
-
pcd_ep_obj = PointcloudEpisodeObject(
|
|
122
|
+
pcd_ep_obj = PointcloudEpisodeObject(
|
|
123
|
+
obj_class, obj_tags, uuid.UUID(instance_token)
|
|
124
|
+
)
|
|
117
125
|
# * Assign the object to the starting token
|
|
118
126
|
token_to_obj[instance_token] = pcd_ep_obj
|
|
119
127
|
parent_object = pcd_ep_obj
|
|
@@ -122,29 +130,41 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
122
130
|
token_to_obj[instance_token] = token_to_obj[parent_obj_token]
|
|
123
131
|
parent_object = token_to_obj[parent_obj_token]
|
|
124
132
|
geom = obj.to_supervisely()
|
|
125
|
-
pcd_figure = PointcloudFigure(parent_object, geom, sample_i)
|
|
133
|
+
pcd_figure = PointcloudFigure(parent_object, geom, sample_i, ann_token)
|
|
126
134
|
figures.append(pcd_figure)
|
|
135
|
+
frame_idx_to_scene_sample_token[sample_i] = token
|
|
127
136
|
frame = PointcloudEpisodeFrame(sample_i, figures)
|
|
128
137
|
frames.append(frame)
|
|
129
138
|
tag_collection = PointcloudEpisodeTagCollection(tags) if len(tags) > 0 else None
|
|
139
|
+
self._custom_data["frame_token_map"][self._current_ds_id] = frame_idx_to_scene_sample_token
|
|
140
|
+
key_uuid = uuid.UUID(token)
|
|
130
141
|
return PointcloudEpisodeAnnotation(
|
|
131
142
|
len(frames),
|
|
132
143
|
PointcloudEpisodeObjectCollection(list(set(token_to_obj.values()))),
|
|
133
144
|
PointcloudEpisodeFrameCollection(frames),
|
|
134
145
|
tag_collection,
|
|
146
|
+
key=key_uuid,
|
|
135
147
|
)
|
|
136
148
|
|
|
137
149
|
def upload_dataset(self, api: Api, dataset_id: int, batch_size: int = 1, log_progress=True):
|
|
138
|
-
nuscenes =
|
|
150
|
+
from nuscenes import NuScenes # pylint: disable=import-error
|
|
151
|
+
|
|
152
|
+
nuscenes: NuScenes = self._nuscenes
|
|
153
|
+
key_id_map = KeyIdMap()
|
|
139
154
|
|
|
140
155
|
tag_metas = [TagMeta(attr["name"], TagValueType.NONE) for attr in nuscenes.attribute]
|
|
141
|
-
obj_classes =
|
|
156
|
+
obj_classes = {}
|
|
142
157
|
for category in nuscenes.category:
|
|
143
158
|
color = nuscenes.colormap[category["name"]]
|
|
144
159
|
description = helpers.trim_description(category.get("description", ""))
|
|
145
|
-
|
|
160
|
+
token = category["token"]
|
|
161
|
+
obj_classes[token] = ObjClass(
|
|
162
|
+
category["name"], Cuboid3d, color, description=description
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
self._custom_data["classes_token_map"] = {k: v.name for k, v in obj_classes.items()}
|
|
146
166
|
|
|
147
|
-
self._meta = ProjectMeta(obj_classes, tag_metas)
|
|
167
|
+
self._meta = ProjectMeta(list(obj_classes.values()), tag_metas)
|
|
148
168
|
meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
|
|
149
169
|
|
|
150
170
|
dataset_info = api.dataset.get_info_by_id(dataset_id)
|
|
@@ -178,12 +198,13 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
178
198
|
|
|
179
199
|
for scene in nuscenes.scene:
|
|
180
200
|
current_dataset_id = scene_name_to_dataset[scene["name"]].id
|
|
201
|
+
self._current_ds_id = current_dataset_id
|
|
181
202
|
|
|
182
203
|
log = nuscenes.get("log", scene["log_token"])
|
|
183
204
|
sample_token = scene["first_sample_token"]
|
|
184
205
|
|
|
185
206
|
# * Extract scene's samples
|
|
186
|
-
scene_samples:
|
|
207
|
+
scene_samples: Dict[str, helpers.Sample] = {}
|
|
187
208
|
for i in range(scene["nbr_samples"]):
|
|
188
209
|
sample = nuscenes.get("sample", sample_token)
|
|
189
210
|
lidar_path, boxes, _ = nuscenes.get_sample_data(sample["data"]["LIDAR_TOP"])
|
|
@@ -203,10 +224,12 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
203
224
|
nuscenes.get("attribute", attr)["name"] for attr in ann["attribute_tokens"]
|
|
204
225
|
]
|
|
205
226
|
visibility = nuscenes.get("visibility", ann["visibility_token"])["level"]
|
|
227
|
+
ann_token = ann["token"]
|
|
206
228
|
|
|
207
229
|
ann = helpers.AnnotationObject(
|
|
208
230
|
name=name,
|
|
209
231
|
bbox=box,
|
|
232
|
+
token=ann_token,
|
|
210
233
|
instance_token=current_instance_token,
|
|
211
234
|
parent_token=parent_token,
|
|
212
235
|
category=category,
|
|
@@ -227,12 +250,15 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
227
250
|
for sensor, token in sample["data"].items()
|
|
228
251
|
if sensor.startswith("CAM")
|
|
229
252
|
]
|
|
230
|
-
|
|
253
|
+
sample_token = sample["token"]
|
|
254
|
+
scene_samples[sample_token] = helpers.Sample(
|
|
255
|
+
timestamp, lidar_path, anns, camera_data
|
|
256
|
+
)
|
|
231
257
|
sample_token = sample["next"]
|
|
232
258
|
|
|
233
259
|
# * Convert and upload pointclouds
|
|
234
260
|
frame_to_pointcloud_ids = {}
|
|
235
|
-
for idx, sample in enumerate(scene_samples):
|
|
261
|
+
for idx, sample in enumerate(scene_samples.values()):
|
|
236
262
|
pcd_path = sample.convert_lidar_to_supervisely()
|
|
237
263
|
|
|
238
264
|
pcd_name = fs.get_file_name_with_ext(pcd_path)
|
|
@@ -275,9 +301,10 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
275
301
|
|
|
276
302
|
# * Convert and upload annotations
|
|
277
303
|
pcd_ann = self.to_supervisely(scene_samples, meta, renamed_classes, renamed_tags)
|
|
304
|
+
|
|
278
305
|
try:
|
|
279
306
|
api.pointcloud_episode.annotation.append(
|
|
280
|
-
current_dataset_id, pcd_ann, frame_to_pointcloud_ids
|
|
307
|
+
current_dataset_id, pcd_ann, frame_to_pointcloud_ids, key_id_map=key_id_map
|
|
281
308
|
)
|
|
282
309
|
logger.info(f"Dataset ID:{current_dataset_id} has been successfully uploaded.")
|
|
283
310
|
except Exception as e:
|
|
@@ -285,6 +312,15 @@ class NuscenesEpisodesConverter(PointcloudEpisodeConverter):
|
|
|
285
312
|
logger.warning(
|
|
286
313
|
f"Failed to upload annotation for scene: {scene['name']}. Message: {error_msg}"
|
|
287
314
|
)
|
|
315
|
+
key_id_map = key_id_map.to_dict()
|
|
316
|
+
key_id_map.pop("tags")
|
|
317
|
+
key_id_map.pop("videos")
|
|
318
|
+
self._custom_data["key_id_map"] = key_id_map
|
|
319
|
+
|
|
320
|
+
project_id = dataset_info.project_id
|
|
321
|
+
current_custom_data = api.project.get_custom_data(project_id)
|
|
322
|
+
current_custom_data.update(self._custom_data)
|
|
323
|
+
api.project.update_custom_data(project_id, current_custom_data)
|
|
288
324
|
|
|
289
325
|
if log_progress:
|
|
290
326
|
if is_development():
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
1
2
|
from datetime import datetime
|
|
2
3
|
from os import path as osp
|
|
3
4
|
from pathlib import Path
|
|
@@ -52,6 +53,7 @@ def trim_description(description: str, max_length: int = 255) -> str:
|
|
|
52
53
|
return description
|
|
53
54
|
|
|
54
55
|
|
|
56
|
+
@dataclass
|
|
55
57
|
class AnnotationObject:
|
|
56
58
|
"""
|
|
57
59
|
A class to represent an annotation object in the NuScenes dataset.
|
|
@@ -60,6 +62,8 @@ class AnnotationObject:
|
|
|
60
62
|
:type name: str
|
|
61
63
|
:param bbox: The bounding box coordinates in NuScenes format
|
|
62
64
|
:type bbox: np.ndarray
|
|
65
|
+
:param token: The unique token identifying the annotation object
|
|
66
|
+
:type token: str
|
|
63
67
|
:param instance_token: The instance token associated with the annotation object
|
|
64
68
|
:type instance_token: str
|
|
65
69
|
:param parent_token: The token of instance preceding the current object instance
|
|
@@ -71,25 +75,14 @@ class AnnotationObject:
|
|
|
71
75
|
:param visibility: The visibility level of the annotation object
|
|
72
76
|
:type visibility: str
|
|
73
77
|
"""
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
attributes: List[str],
|
|
83
|
-
visibility: str,
|
|
84
|
-
):
|
|
85
|
-
self.name = name
|
|
86
|
-
self.bbox = bbox
|
|
87
|
-
self.instance_token = instance_token
|
|
88
|
-
self.parent_token = parent_token
|
|
89
|
-
|
|
90
|
-
self.category = category
|
|
91
|
-
self.attributes = attributes
|
|
92
|
-
self.visibility = visibility
|
|
78
|
+
name: str
|
|
79
|
+
bbox: np.ndarray
|
|
80
|
+
token: str
|
|
81
|
+
instance_token: str
|
|
82
|
+
parent_token: str
|
|
83
|
+
category: str
|
|
84
|
+
attributes: List[str]
|
|
85
|
+
visibility: str
|
|
93
86
|
|
|
94
87
|
def to_supervisely(self) -> Cuboid3d:
|
|
95
88
|
box = self.convert_nuscenes_to_BEVBox3D()
|
|
@@ -213,38 +206,19 @@ class CamData:
|
|
|
213
206
|
return (sly_path_img, img_info)
|
|
214
207
|
|
|
215
208
|
|
|
209
|
+
@dataclass
|
|
216
210
|
class Sample:
|
|
217
211
|
"""
|
|
218
212
|
A class to represent a sample from the NuScenes dataset.
|
|
219
213
|
"""
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
self._timestamp = datetime.utcfromtimestamp(timestamp / 1e6).isoformat()
|
|
229
|
-
self._lidar_path = lidar_path
|
|
230
|
-
self._anns = anns
|
|
231
|
-
self._cam_data = cam_data
|
|
232
|
-
|
|
233
|
-
@property
|
|
234
|
-
def timestamp(self) -> str:
|
|
235
|
-
return self._timestamp
|
|
236
|
-
|
|
237
|
-
@property
|
|
238
|
-
def lidar_path(self) -> str:
|
|
239
|
-
return self._lidar_path
|
|
240
|
-
|
|
241
|
-
@property
|
|
242
|
-
def anns(self) -> List[AnnotationObject]:
|
|
243
|
-
return self._anns
|
|
244
|
-
|
|
245
|
-
@property
|
|
246
|
-
def cam_data(self) -> List[CamData]:
|
|
247
|
-
return self._cam_data
|
|
214
|
+
timestamp_us: float
|
|
215
|
+
lidar_path: str
|
|
216
|
+
anns: List[AnnotationObject]
|
|
217
|
+
cam_data: List[CamData]
|
|
218
|
+
timestamp: str = field(init=False)
|
|
219
|
+
|
|
220
|
+
def __post_init__(self):
|
|
221
|
+
self.timestamp = datetime.utcfromtimestamp(self.timestamp_us / 1e6).isoformat()
|
|
248
222
|
|
|
249
223
|
@staticmethod
|
|
250
224
|
def generate_boxes(nuscenes, boxes: List) -> Generator:
|