supervisely 6.73.438__py3-none-any.whl → 6.73.513__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervisely/__init__.py +137 -1
- supervisely/_utils.py +81 -0
- supervisely/annotation/annotation.py +8 -2
- supervisely/annotation/json_geometries_map.py +14 -11
- supervisely/annotation/label.py +80 -3
- supervisely/api/annotation_api.py +14 -11
- supervisely/api/api.py +59 -38
- supervisely/api/app_api.py +11 -2
- supervisely/api/dataset_api.py +74 -12
- supervisely/api/entities_collection_api.py +10 -0
- supervisely/api/entity_annotation/figure_api.py +52 -4
- supervisely/api/entity_annotation/object_api.py +3 -3
- supervisely/api/entity_annotation/tag_api.py +63 -12
- supervisely/api/guides_api.py +210 -0
- supervisely/api/image_api.py +72 -1
- supervisely/api/labeling_job_api.py +83 -1
- supervisely/api/labeling_queue_api.py +33 -7
- supervisely/api/module_api.py +9 -0
- supervisely/api/project_api.py +71 -26
- supervisely/api/storage_api.py +3 -1
- supervisely/api/task_api.py +13 -2
- supervisely/api/team_api.py +4 -3
- supervisely/api/video/video_annotation_api.py +119 -3
- supervisely/api/video/video_api.py +65 -14
- supervisely/api/video/video_figure_api.py +24 -11
- supervisely/app/__init__.py +1 -1
- supervisely/app/content.py +23 -7
- supervisely/app/development/development.py +18 -2
- supervisely/app/fastapi/__init__.py +1 -0
- supervisely/app/fastapi/custom_static_files.py +1 -1
- supervisely/app/fastapi/multi_user.py +105 -0
- supervisely/app/fastapi/subapp.py +88 -42
- supervisely/app/fastapi/websocket.py +77 -9
- supervisely/app/singleton.py +21 -0
- supervisely/app/v1/app_service.py +18 -2
- supervisely/app/v1/constants.py +7 -1
- supervisely/app/widgets/__init__.py +6 -0
- supervisely/app/widgets/activity_feed/__init__.py +0 -0
- supervisely/app/widgets/activity_feed/activity_feed.py +239 -0
- supervisely/app/widgets/activity_feed/style.css +78 -0
- supervisely/app/widgets/activity_feed/template.html +22 -0
- supervisely/app/widgets/card/card.py +20 -0
- supervisely/app/widgets/classes_list_selector/classes_list_selector.py +121 -9
- supervisely/app/widgets/classes_list_selector/template.html +60 -93
- supervisely/app/widgets/classes_mapping/classes_mapping.py +13 -12
- supervisely/app/widgets/classes_table/classes_table.py +1 -0
- supervisely/app/widgets/deploy_model/deploy_model.py +56 -35
- supervisely/app/widgets/dialog/dialog.py +12 -0
- supervisely/app/widgets/dialog/template.html +2 -1
- supervisely/app/widgets/ecosystem_model_selector/ecosystem_model_selector.py +1 -1
- supervisely/app/widgets/experiment_selector/experiment_selector.py +8 -0
- supervisely/app/widgets/fast_table/fast_table.py +184 -60
- supervisely/app/widgets/fast_table/template.html +1 -1
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +564 -0
- supervisely/app/widgets/heatmap/script.js +533 -0
- supervisely/app/widgets/heatmap/style.css +233 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/modal/__init__.py +0 -0
- supervisely/app/widgets/modal/modal.py +198 -0
- supervisely/app/widgets/modal/template.html +10 -0
- supervisely/app/widgets/object_class_view/object_class_view.py +3 -0
- supervisely/app/widgets/radio_tabs/radio_tabs.py +18 -2
- supervisely/app/widgets/radio_tabs/template.html +1 -0
- supervisely/app/widgets/select/select.py +6 -3
- supervisely/app/widgets/select_class/__init__.py +0 -0
- supervisely/app/widgets/select_class/select_class.py +363 -0
- supervisely/app/widgets/select_class/template.html +50 -0
- supervisely/app/widgets/select_cuda/select_cuda.py +22 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +65 -7
- supervisely/app/widgets/select_tag/__init__.py +0 -0
- supervisely/app/widgets/select_tag/select_tag.py +352 -0
- supervisely/app/widgets/select_tag/template.html +64 -0
- supervisely/app/widgets/select_team/select_team.py +37 -4
- supervisely/app/widgets/select_team/template.html +4 -5
- supervisely/app/widgets/select_user/__init__.py +0 -0
- supervisely/app/widgets/select_user/select_user.py +270 -0
- supervisely/app/widgets/select_user/template.html +13 -0
- supervisely/app/widgets/select_workspace/select_workspace.py +59 -10
- supervisely/app/widgets/select_workspace/template.html +9 -12
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/app/widgets/tree_select/tree_select.py +2 -0
- supervisely/aug/aug.py +6 -2
- supervisely/convert/base_converter.py +1 -0
- supervisely/convert/converter.py +2 -2
- supervisely/convert/image/csv/csv_converter.py +24 -15
- supervisely/convert/image/image_converter.py +3 -1
- supervisely/convert/image/image_helper.py +48 -4
- supervisely/convert/image/label_studio/label_studio_converter.py +2 -0
- supervisely/convert/image/medical2d/medical2d_helper.py +2 -24
- supervisely/convert/image/multispectral/multispectral_converter.py +6 -0
- supervisely/convert/image/pascal_voc/pascal_voc_converter.py +8 -5
- supervisely/convert/image/pascal_voc/pascal_voc_helper.py +7 -0
- supervisely/convert/pointcloud/kitti_3d/kitti_3d_converter.py +33 -3
- supervisely/convert/pointcloud/kitti_3d/kitti_3d_helper.py +12 -5
- supervisely/convert/pointcloud/las/las_converter.py +13 -1
- supervisely/convert/pointcloud/las/las_helper.py +110 -11
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
- supervisely/convert/pointcloud/pointcloud_converter.py +91 -3
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
- supervisely/convert/video/__init__.py +1 -0
- supervisely/convert/video/multi_view/__init__.py +0 -0
- supervisely/convert/video/multi_view/multi_view.py +543 -0
- supervisely/convert/video/sly/sly_video_converter.py +359 -3
- supervisely/convert/video/video_converter.py +24 -4
- supervisely/convert/volume/dicom/dicom_converter.py +13 -5
- supervisely/convert/volume/dicom/dicom_helper.py +30 -18
- supervisely/geometry/constants.py +1 -0
- supervisely/geometry/geometry.py +4 -0
- supervisely/geometry/helpers.py +5 -1
- supervisely/geometry/oriented_bbox.py +676 -0
- supervisely/geometry/polyline_3d.py +110 -0
- supervisely/geometry/rectangle.py +2 -1
- supervisely/io/env.py +76 -1
- supervisely/io/fs.py +21 -0
- supervisely/nn/benchmark/base_evaluator.py +104 -11
- supervisely/nn/benchmark/instance_segmentation/evaluator.py +1 -8
- supervisely/nn/benchmark/object_detection/evaluator.py +20 -4
- supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py +10 -5
- supervisely/nn/benchmark/semantic_segmentation/evaluator.py +34 -16
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/confusion_matrix.py +1 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/frequently_confused.py +1 -1
- supervisely/nn/benchmark/semantic_segmentation/vis_metrics/overview.py +1 -1
- supervisely/nn/benchmark/visualization/evaluation_result.py +66 -4
- supervisely/nn/inference/cache.py +43 -18
- supervisely/nn/inference/gui/serving_gui_template.py +5 -2
- supervisely/nn/inference/inference.py +916 -222
- supervisely/nn/inference/inference_request.py +55 -10
- supervisely/nn/inference/predict_app/gui/classes_selector.py +83 -12
- supervisely/nn/inference/predict_app/gui/gui.py +676 -488
- supervisely/nn/inference/predict_app/gui/input_selector.py +205 -26
- supervisely/nn/inference/predict_app/gui/model_selector.py +2 -4
- supervisely/nn/inference/predict_app/gui/output_selector.py +46 -6
- supervisely/nn/inference/predict_app/gui/settings_selector.py +756 -59
- supervisely/nn/inference/predict_app/gui/tags_selector.py +1 -1
- supervisely/nn/inference/predict_app/gui/utils.py +236 -119
- supervisely/nn/inference/predict_app/predict_app.py +2 -2
- supervisely/nn/inference/session.py +43 -35
- supervisely/nn/inference/tracking/bbox_tracking.py +118 -35
- supervisely/nn/inference/tracking/point_tracking.py +5 -1
- supervisely/nn/inference/tracking/tracker_interface.py +10 -1
- supervisely/nn/inference/uploader.py +139 -12
- supervisely/nn/live_training/__init__.py +7 -0
- supervisely/nn/live_training/api_server.py +111 -0
- supervisely/nn/live_training/artifacts_utils.py +243 -0
- supervisely/nn/live_training/checkpoint_utils.py +229 -0
- supervisely/nn/live_training/dynamic_sampler.py +44 -0
- supervisely/nn/live_training/helpers.py +14 -0
- supervisely/nn/live_training/incremental_dataset.py +146 -0
- supervisely/nn/live_training/live_training.py +497 -0
- supervisely/nn/live_training/loss_plateau_detector.py +111 -0
- supervisely/nn/live_training/request_queue.py +52 -0
- supervisely/nn/model/model_api.py +9 -0
- supervisely/nn/model/prediction.py +2 -1
- supervisely/nn/model/prediction_session.py +26 -14
- supervisely/nn/prediction_dto.py +19 -1
- supervisely/nn/tracker/base_tracker.py +11 -1
- supervisely/nn/tracker/botsort/botsort_config.yaml +0 -1
- supervisely/nn/tracker/botsort/tracker/mc_bot_sort.py +7 -4
- supervisely/nn/tracker/botsort_tracker.py +94 -65
- supervisely/nn/tracker/utils.py +4 -5
- supervisely/nn/tracker/visualize.py +93 -93
- supervisely/nn/training/gui/classes_selector.py +16 -1
- supervisely/nn/training/gui/train_val_splits_selector.py +52 -31
- supervisely/nn/training/train_app.py +46 -31
- supervisely/project/data_version.py +115 -51
- supervisely/project/download.py +1 -1
- supervisely/project/pointcloud_episode_project.py +37 -8
- supervisely/project/pointcloud_project.py +30 -2
- supervisely/project/project.py +14 -2
- supervisely/project/project_meta.py +27 -1
- supervisely/project/project_settings.py +32 -18
- supervisely/project/versioning/__init__.py +1 -0
- supervisely/project/versioning/common.py +20 -0
- supervisely/project/versioning/schema_fields.py +35 -0
- supervisely/project/versioning/video_schema.py +221 -0
- supervisely/project/versioning/volume_schema.py +87 -0
- supervisely/project/video_project.py +717 -15
- supervisely/project/volume_project.py +623 -5
- supervisely/template/experiment/experiment.html.jinja +4 -4
- supervisely/template/experiment/experiment_generator.py +14 -21
- supervisely/template/live_training/__init__.py +0 -0
- supervisely/template/live_training/header.html.jinja +96 -0
- supervisely/template/live_training/live_training.html.jinja +51 -0
- supervisely/template/live_training/live_training_generator.py +464 -0
- supervisely/template/live_training/sly-style.css +402 -0
- supervisely/template/live_training/template.html.jinja +18 -0
- supervisely/versions.json +28 -26
- supervisely/video/sampling.py +39 -20
- supervisely/video/video.py +41 -12
- supervisely/video_annotation/video_figure.py +38 -4
- supervisely/video_annotation/video_object.py +29 -4
- supervisely/volume/stl_converter.py +2 -0
- supervisely/worker_api/agent_rpc.py +24 -1
- supervisely/worker_api/rpc_servicer.py +31 -7
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/METADATA +58 -40
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/RECORD +203 -155
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/WHEEL +1 -1
- supervisely_lib/__init__.py +6 -1
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info/licenses}/LICENSE +0 -0
- {supervisely-6.73.438.dist-info → supervisely-6.73.513.dist-info}/top_level.txt +0 -0
|
@@ -24,6 +24,7 @@ from supervisely.io.fs import (
|
|
|
24
24
|
get_file_name_with_ext,
|
|
25
25
|
list_files_recursively,
|
|
26
26
|
)
|
|
27
|
+
from supervisely.io.env import team_id
|
|
27
28
|
from supervisely.io.json import load_json_file
|
|
28
29
|
from supervisely.project.project_settings import LabelingInterface
|
|
29
30
|
|
|
@@ -78,16 +79,16 @@ class CSVConverter(ImageConverter):
|
|
|
78
79
|
}
|
|
79
80
|
|
|
80
81
|
def __init__(
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
82
|
+
self,
|
|
83
|
+
input_data: str,
|
|
84
|
+
labeling_interface: Optional[Union[LabelingInterface, str]],
|
|
85
|
+
upload_as_links: bool,
|
|
86
|
+
remote_files_map: Optional[Dict[str, str]] = None,
|
|
86
87
|
):
|
|
87
88
|
super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
|
|
88
89
|
|
|
90
|
+
self._supports_links = True
|
|
89
91
|
self._csv_reader = None
|
|
90
|
-
self._team_id = None
|
|
91
92
|
|
|
92
93
|
def __str__(self):
|
|
93
94
|
return AvailableImageConverters.CSV
|
|
@@ -121,6 +122,12 @@ class CSVConverter(ImageConverter):
|
|
|
121
122
|
|
|
122
123
|
full_path = valid_files[0]
|
|
123
124
|
|
|
125
|
+
if self.upload_as_links and self._supports_links:
|
|
126
|
+
for local_path, remote_path in self._remote_files_map.items():
|
|
127
|
+
if local_path.endswith(full_path):
|
|
128
|
+
self._api.storage.download(self._team_id, remote_path, local_path)
|
|
129
|
+
break
|
|
130
|
+
|
|
124
131
|
file_ext = get_file_ext(full_path)
|
|
125
132
|
if file_ext in self.conversion_functions:
|
|
126
133
|
csv_full_path = os.path.splitext(full_path)[0] + ".csv"
|
|
@@ -147,7 +154,7 @@ class CSVConverter(ImageConverter):
|
|
|
147
154
|
team_files = False
|
|
148
155
|
break
|
|
149
156
|
if item_path is None:
|
|
150
|
-
logger.
|
|
157
|
+
logger.warning(f"Failed to find image path in row: {row}. Skipping.")
|
|
151
158
|
continue
|
|
152
159
|
ann_data = row.get("tag")
|
|
153
160
|
item = CSVConverter.Item(
|
|
@@ -192,7 +199,7 @@ class CSVConverter(ImageConverter):
|
|
|
192
199
|
ann_json = csv_helper.rename_in_json(ann_json, renamed_classes, renamed_tags)
|
|
193
200
|
return Annotation.from_json(ann_json, meta)
|
|
194
201
|
except Exception as e:
|
|
195
|
-
logger.
|
|
202
|
+
logger.warning(f"Failed to convert annotation: {repr(e)}")
|
|
196
203
|
return item.create_empty_annotation()
|
|
197
204
|
|
|
198
205
|
def process_remote_image(
|
|
@@ -209,19 +216,21 @@ class CSVConverter(ImageConverter):
|
|
|
209
216
|
image_path = image_path.strip()
|
|
210
217
|
if is_team_file:
|
|
211
218
|
if not api.file.exists(team_id, image_path):
|
|
212
|
-
logger.
|
|
219
|
+
logger.warning(f"File {image_path} not found in Team Files. Skipping...")
|
|
213
220
|
return None
|
|
214
221
|
team_file_image_info = api.file.list(team_id, image_path)
|
|
215
222
|
image_path = team_file_image_info[0]["fullStorageUrl"]
|
|
216
223
|
if not image_path:
|
|
217
|
-
logger.
|
|
224
|
+
logger.warning(
|
|
225
|
+
f"Failed to get full storage URL for file '{image_path}'. Skipping..."
|
|
226
|
+
)
|
|
218
227
|
return None
|
|
219
228
|
|
|
220
229
|
extension = os.path.splitext(image_path)[1]
|
|
221
230
|
if not extension:
|
|
222
|
-
logger.
|
|
231
|
+
logger.warning(f"FYI: Image [{image_path}] doesn't have extension.")
|
|
223
232
|
elif extension.lower() not in SUPPORTED_IMG_EXTS:
|
|
224
|
-
logger.
|
|
233
|
+
logger.warning(
|
|
225
234
|
f"Image [{image_path}] has unsupported extension [{extension}]. Skipping..."
|
|
226
235
|
)
|
|
227
236
|
return None
|
|
@@ -234,7 +243,7 @@ class CSVConverter(ImageConverter):
|
|
|
234
243
|
force_metadata_for_links=force_metadata,
|
|
235
244
|
)
|
|
236
245
|
except Exception:
|
|
237
|
-
logger.
|
|
246
|
+
logger.warning(f"Failed to link image {image_name}. Skipping...")
|
|
238
247
|
return None
|
|
239
248
|
if progress_cb is not None:
|
|
240
249
|
progress_cb(1)
|
|
@@ -312,7 +321,7 @@ class CSVConverter(ImageConverter):
|
|
|
312
321
|
success = False
|
|
313
322
|
continue
|
|
314
323
|
if item.name not in info.name:
|
|
315
|
-
logger.
|
|
324
|
+
logger.warning(
|
|
316
325
|
f"Batched image with name '{item.name}' doesn't match uploaded image name '{info.name}'"
|
|
317
326
|
)
|
|
318
327
|
success = False
|
|
@@ -339,4 +348,4 @@ class CSVConverter(ImageConverter):
|
|
|
339
348
|
if success:
|
|
340
349
|
logger.info(f"Dataset ID:'{dataset_id}' has been successfully uploaded.")
|
|
341
350
|
else:
|
|
342
|
-
logger.
|
|
351
|
+
logger.warning(f"Dataset ID:'{dataset_id}' has been uploaded.")
|
|
@@ -136,10 +136,12 @@ class ImageConverter(BaseConverter):
|
|
|
136
136
|
item_names = []
|
|
137
137
|
item_paths = []
|
|
138
138
|
item_metas = []
|
|
139
|
+
valid_batch_items = []
|
|
139
140
|
for item in batch:
|
|
140
141
|
item.path = self.validate_image(item.path)
|
|
141
142
|
if item.path is None:
|
|
142
143
|
continue # image has failed validation
|
|
144
|
+
valid_batch_items.append(item)
|
|
143
145
|
name = f"{get_file_name(item.path)}{get_file_ext(item.path).lower()}"
|
|
144
146
|
|
|
145
147
|
item.name = generate_free_name(
|
|
@@ -180,7 +182,7 @@ class ImageConverter(BaseConverter):
|
|
|
180
182
|
|
|
181
183
|
anns = []
|
|
182
184
|
if not (self.upload_as_links and not self.supports_links):
|
|
183
|
-
for info, item in zip(img_infos,
|
|
185
|
+
for info, item in zip(img_infos, valid_batch_items):
|
|
184
186
|
if self._force_shape_for_links:
|
|
185
187
|
item.set_shape((info.height, info.width))
|
|
186
188
|
anns.append(self.to_supervisely(item, meta, renamed_classes, renamed_tags))
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
import mimetypes
|
|
2
|
+
import re
|
|
2
3
|
from pathlib import Path
|
|
4
|
+
from typing import List, Union
|
|
3
5
|
|
|
4
6
|
import magic
|
|
5
7
|
import numpy as np
|
|
6
8
|
from PIL import Image
|
|
7
|
-
from typing import Union, List
|
|
8
9
|
|
|
9
|
-
from supervisely import
|
|
10
|
+
from supervisely import Label, Rectangle, logger
|
|
11
|
+
from supervisely.geometry.oriented_bbox import OrientedBBox
|
|
10
12
|
from supervisely.imaging.image import read, write
|
|
11
13
|
from supervisely.io.fs import (
|
|
12
14
|
get_file_ext,
|
|
@@ -88,7 +90,14 @@ def read_tiff_image(path: str) -> Union[np.ndarray, None]:
|
|
|
88
90
|
import tifffile
|
|
89
91
|
|
|
90
92
|
logger.debug(f"Found tiff file: {path}.")
|
|
91
|
-
|
|
93
|
+
try:
|
|
94
|
+
image = tifffile.imread(path)
|
|
95
|
+
except Exception as e:
|
|
96
|
+
logger.warning(
|
|
97
|
+
f"tifffile failed to read TIFF, trying Pillow fallback: {repr(e)}",
|
|
98
|
+
extra={"file_path": path},
|
|
99
|
+
)
|
|
100
|
+
image = _read_tiff_image_fallback(path)
|
|
92
101
|
name = get_file_name_with_ext(path)
|
|
93
102
|
if image is not None:
|
|
94
103
|
tiff_shape = image.shape
|
|
@@ -100,11 +109,46 @@ def read_tiff_image(path: str) -> Union[np.ndarray, None]:
|
|
|
100
109
|
return image
|
|
101
110
|
|
|
102
111
|
|
|
112
|
+
def _read_tiff_image_fallback(path: str) -> Union[np.ndarray, None]:
|
|
113
|
+
"""
|
|
114
|
+
Fallback method to read tiff image using Pillow.
|
|
115
|
+
"""
|
|
116
|
+
from PIL import ImageSequence
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
with Image.open(path) as pil_img:
|
|
120
|
+
frames = [np.asarray(frame) for frame in ImageSequence.Iterator(pil_img)]
|
|
121
|
+
if not frames:
|
|
122
|
+
return None
|
|
123
|
+
if len(frames) == 1:
|
|
124
|
+
return frames[0]
|
|
125
|
+
|
|
126
|
+
if all(frame.shape == frames[0].shape for frame in frames):
|
|
127
|
+
return np.stack(frames, axis=0)
|
|
128
|
+
|
|
129
|
+
logger.warning(
|
|
130
|
+
"TIFF has multiple pages with different shapes; using the first page only.",
|
|
131
|
+
extra={"file_path": path},
|
|
132
|
+
)
|
|
133
|
+
return frames[0]
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.warning(
|
|
136
|
+
f"Pillow failed to read TIFF: {repr(e)}",
|
|
137
|
+
extra={"file_path": path},
|
|
138
|
+
)
|
|
139
|
+
return None
|
|
140
|
+
|
|
141
|
+
|
|
103
142
|
def validate_image_bounds(labels: List[Label], img_rect: Rectangle) -> List[Label]:
|
|
104
143
|
"""
|
|
105
144
|
Check if labels are localed inside the image canvas, print a warning and skip them if not.
|
|
106
145
|
"""
|
|
107
|
-
new_labels = [
|
|
146
|
+
new_labels = []
|
|
147
|
+
for label in labels:
|
|
148
|
+
if isinstance(label.geometry, OrientedBBox):
|
|
149
|
+
new_labels.append(label)
|
|
150
|
+
elif img_rect.contains(label.geometry.to_bbox()):
|
|
151
|
+
new_labels.append(label)
|
|
108
152
|
if new_labels != labels:
|
|
109
153
|
logger.warning(
|
|
110
154
|
f"{len(labels) - len(new_labels)} annotation objects are out of image bounds. Skipping..."
|
|
@@ -31,6 +31,8 @@ class LabelStudioConverter(ImageConverter):
|
|
|
31
31
|
return False
|
|
32
32
|
if len(raw_ann) == 0:
|
|
33
33
|
return False
|
|
34
|
+
if not all([isinstance(ann, dict) for ann in raw_ann]):
|
|
35
|
+
return False
|
|
34
36
|
if not all([isinstance(ann.get("data"), dict) for ann in raw_ann]):
|
|
35
37
|
return False
|
|
36
38
|
anns = []
|
|
@@ -8,10 +8,9 @@ import nrrd
|
|
|
8
8
|
import numpy as np
|
|
9
9
|
import pydicom
|
|
10
10
|
from pydicom import FileDataset
|
|
11
|
-
from tqdm import tqdm
|
|
12
|
-
|
|
13
11
|
from supervisely import image, logger, volume
|
|
14
12
|
from supervisely.annotation.tag import Tag
|
|
13
|
+
from supervisely.convert.volume.dicom.dicom_helper import convert_to_monochrome2
|
|
15
14
|
from supervisely.io.fs import (
|
|
16
15
|
dir_exists,
|
|
17
16
|
get_file_ext,
|
|
@@ -19,6 +18,7 @@ from supervisely.io.fs import (
|
|
|
19
18
|
get_file_name_with_ext,
|
|
20
19
|
mkdir,
|
|
21
20
|
)
|
|
21
|
+
from tqdm import tqdm
|
|
22
22
|
|
|
23
23
|
_MEDICAL_DEFAULT_GROUP_TAG_NAMES = [
|
|
24
24
|
"StudyInstanceUID",
|
|
@@ -135,28 +135,6 @@ def create_pixel_data_set(dcm: FileDataset, frame_axis: int) -> Tuple[List[np.nd
|
|
|
135
135
|
list_of_images = np.split(pixel_array, int(dcm.NumberOfFrames), axis=frame_axis)
|
|
136
136
|
return list_of_images, frame_axis
|
|
137
137
|
|
|
138
|
-
def convert_to_monochrome2(dcm_path: str, dcm: FileDataset) -> FileDataset:
|
|
139
|
-
if getattr(dcm, "PhotometricInterpretation", None) == "YBR_FULL_422":
|
|
140
|
-
# * Convert dicom to monochrome
|
|
141
|
-
if len(dcm.pixel_array.shape) == 4 and dcm.pixel_array.shape[-1] == 3:
|
|
142
|
-
monochrome = dcm.pixel_array[..., 0].astype(np.uint8)
|
|
143
|
-
else:
|
|
144
|
-
logger.warn("Unexpected shape for YBR_FULL_422 data: " + str(dcm.pixel_array.shape))
|
|
145
|
-
|
|
146
|
-
try:
|
|
147
|
-
dcm.SamplesPerPixel = 1
|
|
148
|
-
dcm.PhotometricInterpretation = "MONOCHROME2"
|
|
149
|
-
dcm.PlanarConfiguration = 0
|
|
150
|
-
if len(monochrome.shape) == 3:
|
|
151
|
-
dcm.NumberOfFrames = str(monochrome.shape[0])
|
|
152
|
-
dcm.Rows, dcm.Columns = monochrome.shape[1:3]
|
|
153
|
-
dcm.PixelData = monochrome.tobytes()
|
|
154
|
-
except AttributeError as ae:
|
|
155
|
-
logger.error(f"Error occurred while converting dicom to monochrome: {ae}")
|
|
156
|
-
|
|
157
|
-
logger.info("Rewriting DICOM file with monochrome2 format")
|
|
158
|
-
dcm.save_as(dcm_path)
|
|
159
|
-
return dcm
|
|
160
138
|
|
|
161
139
|
def convert_dcm_to_nrrd(
|
|
162
140
|
image_path: str, converted_dir: str, group_tag_name: Optional[list] = None
|
|
@@ -132,4 +132,10 @@ class MultiSpectralImageConverter(ImageConverter):
|
|
|
132
132
|
logger.warning(f"Failed to read image {file_path}.")
|
|
133
133
|
return
|
|
134
134
|
|
|
135
|
+
# Handle single-channel images (grayscale)
|
|
136
|
+
if len(image.shape) == 2:
|
|
137
|
+
logger.debug(f"Image {file_path} has single channel.")
|
|
138
|
+
return [image]
|
|
139
|
+
|
|
140
|
+
# Handle multi-channel images
|
|
135
141
|
return [image[:, :, i] for i in range(image.shape[2])]
|
|
@@ -180,11 +180,14 @@ class PascalVOCConverter(ImageConverter):
|
|
|
180
180
|
if tag_meta is not None:
|
|
181
181
|
continue
|
|
182
182
|
if tag_name in pascal_voc_helper.DEFAULT_SUBCLASSES:
|
|
183
|
-
if
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
)
|
|
187
|
-
|
|
183
|
+
if tag_name == "pose":
|
|
184
|
+
tag_meta = TagMeta(tag_name, TagValueType.ANY_STRING)
|
|
185
|
+
else:
|
|
186
|
+
if values.difference({"0", "1"}):
|
|
187
|
+
logger.warning(
|
|
188
|
+
f"Tag '{tag_name}' has non-binary values.", extra={"values": values}
|
|
189
|
+
)
|
|
190
|
+
tag_meta = TagMeta(tag_name, TagValueType.NONE)
|
|
188
191
|
elif tag_name in object_class_names:
|
|
189
192
|
tag_meta = TagMeta(
|
|
190
193
|
tag_name,
|
|
@@ -231,11 +231,18 @@ def xml_to_sly_labels(
|
|
|
231
231
|
if tag_meta is None:
|
|
232
232
|
logger.warning(f"Tag meta for '{field_name}' is not found in meta. Skipping.")
|
|
233
233
|
continue
|
|
234
|
+
|
|
234
235
|
if tag_meta.value_type == TagValueType.ANY_STRING:
|
|
235
236
|
if not isinstance(value, str):
|
|
236
237
|
value = str(value)
|
|
237
238
|
tags.append(Tag(tag_meta, value))
|
|
238
239
|
elif tag_meta.value_type == TagValueType.NONE:
|
|
240
|
+
# check if value is numeric
|
|
241
|
+
try:
|
|
242
|
+
int(value)
|
|
243
|
+
except ValueError:
|
|
244
|
+
logger.warning(f"Tag value is not numeric: '{value}'. Skipping.")
|
|
245
|
+
continue
|
|
239
246
|
if int(value) == 1:
|
|
240
247
|
tags.append(Tag(tag_meta))
|
|
241
248
|
else:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from pathlib import Path
|
|
3
|
+
from typing import Optional
|
|
3
4
|
|
|
4
5
|
from supervisely import PointcloudAnnotation, ProjectMeta, is_development, logger
|
|
5
6
|
from supervisely.api.api import Api, ApiField
|
|
@@ -28,12 +29,32 @@ class KITTI3DConverter(PointcloudConverter):
|
|
|
28
29
|
def key_file_ext(self) -> str:
|
|
29
30
|
return ".bin"
|
|
30
31
|
|
|
32
|
+
@property
|
|
33
|
+
def ann_ext(self) -> str:
|
|
34
|
+
return ".txt"
|
|
35
|
+
|
|
31
36
|
def validate_format(self) -> bool:
|
|
37
|
+
def _calib_file_filter_fn(file_path):
|
|
38
|
+
return get_file_ext(file_path).lower() == self.ann_ext
|
|
39
|
+
|
|
32
40
|
def _file_filter_fn(file_path):
|
|
33
41
|
return get_file_ext(file_path).lower() == self.key_file_ext
|
|
34
42
|
|
|
35
43
|
def _dir_filter_fn(path):
|
|
36
|
-
return all(
|
|
44
|
+
return all(
|
|
45
|
+
[_resolve_dir(path, name) is not None for name in kitti_3d_helper.FOLDER_NAMES]
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
def _resolve_dir(base_dir: str, expected_name: str) -> Optional[str]:
|
|
49
|
+
prefix = expected_name.split("_")[0]
|
|
50
|
+
exact_path = os.path.join(base_dir, expected_name)
|
|
51
|
+
if os.path.isdir(exact_path):
|
|
52
|
+
return exact_path
|
|
53
|
+
for entry in sorted(os.listdir(base_dir)):
|
|
54
|
+
candidate = os.path.join(base_dir, entry)
|
|
55
|
+
if entry.lower().startswith(prefix) and os.path.isdir(candidate):
|
|
56
|
+
return candidate
|
|
57
|
+
return None
|
|
37
58
|
|
|
38
59
|
input_paths = [d for d in dirs_filter(self._input_data, _dir_filter_fn)]
|
|
39
60
|
if len(input_paths) == 0:
|
|
@@ -41,15 +62,22 @@ class KITTI3DConverter(PointcloudConverter):
|
|
|
41
62
|
|
|
42
63
|
input_path = input_paths[0]
|
|
43
64
|
velodyne_dir = os.path.join(input_path, "velodyne")
|
|
44
|
-
image_2_dir =
|
|
45
|
-
label_2_dir =
|
|
65
|
+
image_2_dir = _resolve_dir(input_path, "image_2")
|
|
66
|
+
label_2_dir = _resolve_dir(input_path, "label_2")
|
|
46
67
|
calib_dir = os.path.join(input_path, "calib")
|
|
47
68
|
|
|
48
69
|
self._items = []
|
|
70
|
+
|
|
49
71
|
velodyne_files = list_files(velodyne_dir, filter_fn=_file_filter_fn)
|
|
50
72
|
if len(velodyne_files) == 0:
|
|
51
73
|
return False
|
|
52
74
|
|
|
75
|
+
calib_files = list_files(calib_dir, filter_fn=_calib_file_filter_fn)
|
|
76
|
+
if len(calib_files) == 0:
|
|
77
|
+
raise RuntimeError(
|
|
78
|
+
f"Calibration directory '{calib_dir}' does not contain any .txt files, which are required for KITTI 3D format."
|
|
79
|
+
)
|
|
80
|
+
|
|
53
81
|
kitti_labels = []
|
|
54
82
|
for velodyne_path in velodyne_files:
|
|
55
83
|
file_name = get_file_name(velodyne_path)
|
|
@@ -67,6 +95,8 @@ class KITTI3DConverter(PointcloudConverter):
|
|
|
67
95
|
continue
|
|
68
96
|
|
|
69
97
|
label = kitti_3d_helper.read_kitti_label(label_path, calib_path)
|
|
98
|
+
if label is None:
|
|
99
|
+
continue
|
|
70
100
|
kitti_labels.append(label)
|
|
71
101
|
self._items.append(self.Item(velodyne_path, label, (image_path, calib_path)))
|
|
72
102
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
|
|
3
|
-
from supervisely import ObjClass, ObjClassCollection, ProjectMeta
|
|
3
|
+
from supervisely import ObjClass, ObjClassCollection, ProjectMeta, logger
|
|
4
4
|
from supervisely.geometry.cuboid_3d import Cuboid3d
|
|
5
5
|
from supervisely.geometry.point_3d import Vector3d
|
|
6
6
|
from supervisely.pointcloud_annotation.pointcloud_figure import PointcloudFigure
|
|
@@ -10,11 +10,18 @@ FOLDER_NAMES = ["velodyne", "image_2", "label_2", "calib"]
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def read_kitti_label(label_path, calib_path):
|
|
13
|
-
|
|
13
|
+
"""
|
|
14
|
+
Read KITTI label file with calibration.
|
|
15
|
+
"""
|
|
14
16
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
17
|
+
import open3d as o3d # pylint: disable=import-error
|
|
18
|
+
try:
|
|
19
|
+
calib = o3d.ml.datasets.KITTI.read_calib(calib_path)
|
|
20
|
+
label = o3d.ml.datasets.KITTI.read_label(label_path, calib)
|
|
21
|
+
return label
|
|
22
|
+
except Exception as e:
|
|
23
|
+
logger.warning(f"Failed to read KITTI label or calibration: {e}")
|
|
24
|
+
return None
|
|
18
25
|
|
|
19
26
|
|
|
20
27
|
def convert_labels_to_meta(labels):
|
|
@@ -26,12 +26,24 @@ class LasConverter(PointcloudConverter):
|
|
|
26
26
|
|
|
27
27
|
# create Items
|
|
28
28
|
self._items = []
|
|
29
|
+
|
|
30
|
+
# Warning about coordinate shift
|
|
31
|
+
if len(las_list) > 0:
|
|
32
|
+
logger.info(
|
|
33
|
+
"⚠️ IMPORTANT: Coordinate shift will be applied to all LAS/LAZ files during conversion to PCD format. "
|
|
34
|
+
"This is necessary to avoid floating-point precision issues and visual artifacts. "
|
|
35
|
+
"The shift values (X, Y, Z offsets) will be logged for each file. "
|
|
36
|
+
"If you need to convert annotations back to original LAS coordinates or use them with original LAS files, "
|
|
37
|
+
"you MUST add these shift values back to the PCD/annotation coordinates. "
|
|
38
|
+
"Check the logs for 'Applied coordinate shift' messages for each file."
|
|
39
|
+
)
|
|
40
|
+
|
|
29
41
|
for las_path in las_list:
|
|
30
42
|
ext = get_file_ext(las_path)
|
|
31
43
|
pcd_path = las_path.replace(ext, ".pcd")
|
|
32
44
|
las_helper.las2pcd(las_path, pcd_path)
|
|
33
45
|
if not os.path.exists(pcd_path):
|
|
34
|
-
logger.
|
|
46
|
+
logger.warning(f"Failed to convert LAS/LAZ to PCD. Skipping: {las_path}")
|
|
35
47
|
continue
|
|
36
48
|
item = self.Item(pcd_path)
|
|
37
49
|
self._items.append(item)
|
|
@@ -1,20 +1,39 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
1
3
|
from supervisely import logger
|
|
4
|
+
from supervisely.io.fs import get_file_name_with_ext
|
|
2
5
|
|
|
3
|
-
import numpy as np
|
|
4
6
|
|
|
7
|
+
def las2pcd(input_path: str, output_path: str) -> None:
|
|
8
|
+
"""
|
|
9
|
+
Convert a LAS/LAZ point cloud to PCD format.
|
|
5
10
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
11
|
+
The function reads a LAS/LAZ file, applies coordinate scaling and offsets,
|
|
12
|
+
recenters the point cloud to improve numerical stability, and writes
|
|
13
|
+
the result to a PCD file compatible with common point cloud viewers.
|
|
9
14
|
|
|
15
|
+
:param input_path: Path to the input LAS/LAZ file.
|
|
16
|
+
:type input_path: str
|
|
17
|
+
:param output_path: Path where the output PCD file will be written.
|
|
18
|
+
:type output_path: str
|
|
19
|
+
:return: None
|
|
20
|
+
"""
|
|
21
|
+
import laspy # pylint: disable=import-error
|
|
22
|
+
from pypcd4 import Encoding # pylint: disable=import-error
|
|
23
|
+
from pypcd4 import PointCloud as pypcd4_pcd # pylint: disable=import-error
|
|
24
|
+
|
|
25
|
+
# Read LAS file
|
|
10
26
|
try:
|
|
27
|
+
input_file_name = get_file_name_with_ext(input_path)
|
|
28
|
+
logger.info(f"Start processing file: {input_file_name}")
|
|
11
29
|
las = laspy.read(input_path)
|
|
12
30
|
except Exception as e:
|
|
13
31
|
if "buffer size must be a multiple of element size" in str(e):
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
"
|
|
32
|
+
logger.warning(
|
|
33
|
+
f"{input_file_name} file read failed due to buffer size mismatch with EXTRA_BYTES. "
|
|
34
|
+
"Retrying with EXTRA_BYTES disabled as a workaround..."
|
|
17
35
|
)
|
|
36
|
+
from laspy.point.record import PackedPointRecord # pylint: disable=import-error
|
|
18
37
|
|
|
19
38
|
@classmethod
|
|
20
39
|
def from_buffer_without_extra_bytes(cls, buffer, point_format, count=-1, offset=0):
|
|
@@ -23,10 +42,90 @@ def las2pcd(input_path, output_path):
|
|
|
23
42
|
points_dtype = point_format.dtype()
|
|
24
43
|
data = np.frombuffer(buffer, dtype=points_dtype, offset=offset, count=count)
|
|
25
44
|
return cls(data, point_format)
|
|
45
|
+
|
|
26
46
|
PackedPointRecord.from_buffer = from_buffer_without_extra_bytes
|
|
27
47
|
las = laspy.read(input_path)
|
|
28
48
|
else:
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
49
|
+
logger.error(f"Failed to read {input_file_name}: {e}")
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
# Use scaled coordinates (scale and offset applied)
|
|
54
|
+
x = np.asarray(las.x, dtype=np.float64)
|
|
55
|
+
y = np.asarray(las.y, dtype=np.float64)
|
|
56
|
+
z = np.asarray(las.z, dtype=np.float64)
|
|
57
|
+
|
|
58
|
+
# Check for empty point cloud
|
|
59
|
+
if len(x) == 0:
|
|
60
|
+
logger.warning(f"{input_file_name} file is empty (0 points).")
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
# Recenter point cloud to reduce floating point precision issues
|
|
64
|
+
# Calculate shift for each axis independently (avoids creating intermediate pts array)
|
|
65
|
+
shift_x = x.mean()
|
|
66
|
+
shift_y = y.mean()
|
|
67
|
+
shift_z = z.mean()
|
|
68
|
+
|
|
69
|
+
logger.info(
|
|
70
|
+
f"Applied coordinate shift for {input_file_name}: "
|
|
71
|
+
f"X={shift_x}, Y={shift_y}, Z={shift_z}"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Base PCD fields - apply shift and convert to float32 in one operation
|
|
75
|
+
data = {
|
|
76
|
+
"x": (x - shift_x).astype(np.float32),
|
|
77
|
+
"y": (y - shift_y).astype(np.float32),
|
|
78
|
+
"z": (z - shift_z).astype(np.float32),
|
|
79
|
+
"intensity": las.intensity.astype(np.float32),
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Handle RGB attributes if present
|
|
83
|
+
if hasattr(las, "red") and hasattr(las, "green") and hasattr(las, "blue"):
|
|
84
|
+
# Convert LAS colors to 8-bit.
|
|
85
|
+
# Some files store 0–255 values in 16-bit fields; detect this and only shift when needed.
|
|
86
|
+
r_raw = np.asarray(las.red)
|
|
87
|
+
g_raw = np.asarray(las.green)
|
|
88
|
+
b_raw = np.asarray(las.blue)
|
|
89
|
+
|
|
90
|
+
# Determine if the values are full 16-bit range (0–65535) or already 0–255.
|
|
91
|
+
max_rgb = max(
|
|
92
|
+
r_raw.max(initial=0),
|
|
93
|
+
g_raw.max(initial=0),
|
|
94
|
+
b_raw.max(initial=0),
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if max_rgb > 255:
|
|
98
|
+
# Typical LAS case: 16-bit colors; downscale to 8-bit.
|
|
99
|
+
r = (r_raw >> 8).astype(np.uint32)
|
|
100
|
+
g = (g_raw >> 8).astype(np.uint32)
|
|
101
|
+
b = (b_raw >> 8).astype(np.uint32)
|
|
102
|
+
else:
|
|
103
|
+
# Values are already in 0–255 range; use as-is.
|
|
104
|
+
r = r_raw.astype(np.uint32)
|
|
105
|
+
g = g_raw.astype(np.uint32)
|
|
106
|
+
b = b_raw.astype(np.uint32)
|
|
107
|
+
|
|
108
|
+
# Pack RGB into a single float field (PCL-compatible)
|
|
109
|
+
rgb = (r << 16) | (g << 8) | b
|
|
110
|
+
data["rgb"] = rgb.view(np.float32)
|
|
111
|
+
|
|
112
|
+
# Write PCD file
|
|
113
|
+
# Create structured array for pypcd4
|
|
114
|
+
field_names = ["x", "y", "z", "intensity"]
|
|
115
|
+
types = [np.float32, np.float32, np.float32, np.float32]
|
|
116
|
+
|
|
117
|
+
if "rgb" in data:
|
|
118
|
+
field_names.append("rgb")
|
|
119
|
+
types.append(np.float32)
|
|
120
|
+
|
|
121
|
+
arrays = [data[field] for field in field_names]
|
|
122
|
+
except Exception as e:
|
|
123
|
+
logger.error(f"Error processing {input_file_name}: {e}")
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
pd = pypcd4_pcd.from_points(arrays, field_names, types)
|
|
128
|
+
pd.save(output_path, encoding=Encoding.BINARY_COMPRESSED)
|
|
129
|
+
except Exception as e:
|
|
130
|
+
logger.error(f"Failed to write PCD file for {input_file_name}: {e}")
|
|
131
|
+
return
|