supervisely 6.73.274__py3-none-any.whl → 6.73.276__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (20) hide show
  1. supervisely/app/widgets/custom_models_selector/custom_models_selector.py +54 -44
  2. supervisely/convert/__init__.py +4 -1
  3. supervisely/convert/base_converter.py +2 -9
  4. supervisely/convert/pointcloud/nuscenes_conv/__init__.py +0 -0
  5. supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +227 -0
  6. supervisely/convert/pointcloud/pointcloud_converter.py +52 -1
  7. supervisely/convert/pointcloud/sly/sly_pointcloud_converter.py +8 -21
  8. supervisely/convert/pointcloud/sly/sly_pointcloud_helper.py +4 -2
  9. supervisely/convert/pointcloud_episodes/lyft/lyft_converter.py +19 -20
  10. supervisely/convert/pointcloud_episodes/nuscenes_conv/__init__.py +0 -0
  11. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +305 -0
  12. supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +265 -0
  13. supervisely/convert/pointcloud_episodes/pointcloud_episodes_converter.py +82 -27
  14. supervisely/convert/pointcloud_episodes/sly/sly_pointcloud_episodes_converter.py +9 -8
  15. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/METADATA +1 -1
  16. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/RECORD +20 -15
  17. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/LICENSE +0 -0
  18. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/WHEEL +0 -0
  19. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/entry_points.txt +0 -0
  20. {supervisely-6.73.274.dist-info → supervisely-6.73.276.dist-info}/top_level.txt +0 -0
@@ -25,6 +25,7 @@ from supervisely.app.widgets import (
25
25
  )
26
26
  from supervisely.io.fs import get_file_name_with_ext
27
27
  from supervisely.nn.artifacts.artifacts import TrainInfo
28
+ import supervisely.io.env as sly_env
28
29
 
29
30
  WEIGHTS_DIR = "weights"
30
31
 
@@ -69,18 +70,34 @@ class CustomModelsSelector(Widget):
69
70
  # col 1 task
70
71
  self._task_id = task_id
71
72
  self._task_path = train_info.artifacts_folder
72
- task_info = self._api.task.get_info_by_id(task_id)
73
- self._task_date_iso = task_info["startedAt"]
74
- self._task_date = self._normalize_date()
75
- self._task_link = self._create_task_link()
73
+ try:
74
+ self._task_info = self._api.task.get_info_by_id(task_id)
75
+ except:
76
+ self._task_info = None
77
+
78
+ if self._task_info is not None:
79
+ self._task_date_iso = self._task_info["startedAt"]
80
+ self._task_date = self._normalize_date()
81
+ self._task_link = self._create_task_link()
82
+ else:
83
+ self._task_date_iso = None
84
+ self._task_date = None
85
+ self._task_link = None
76
86
  self._config_path = train_info.config_path
77
87
 
78
88
  # col 2 project
79
89
  self._training_project_name = train_info.project_name
80
- project_info = self._api.project.get_info_by_name(
81
- task_info["workspaceId"], self._training_project_name
90
+
91
+ workspace_id = (
92
+ self._task_info["workspaceId"]
93
+ if self._task_info
94
+ else sly_env.workspace_id(raise_not_found=False)
95
+ )
96
+ self._training_project_info = (
97
+ self._api.project.get_info_by_name(workspace_id, self._training_project_name)
98
+ if workspace_id
99
+ else None
82
100
  )
83
- self._training_project_info = project_info
84
101
 
85
102
  # col 3 checkpoints
86
103
  self._checkpoints = train_info.checkpoints
@@ -175,30 +192,36 @@ class CustomModelsSelector(Widget):
175
192
  return ""
176
193
 
177
194
  def _create_task_widget(self) -> Flexbox:
178
- task_widget = Container(
179
- [
180
- Text(
181
- f"<i class='zmdi zmdi-folder' style='color: #7f858e'></i> <a href='{self._task_link}'>{self._task_id}</a>",
182
- "text",
183
- ),
184
- Text(
185
- f"<span class='field-description text-muted' style='color: #7f858e'>{self._task_date}</span>",
186
- "text",
187
- font_size=13,
188
- ),
189
- ],
190
- gap=0,
191
- )
195
+ if self._task_info is not None:
196
+ task_widget = Container(
197
+ [
198
+ Text(
199
+ f"<i class='zmdi zmdi-folder' style='color: #7f858e'></i> <a href='{self._task_link}'>{self._task_id}</a>",
200
+ "text",
201
+ ),
202
+ Text(
203
+ f"<span class='field-description text-muted' style='color: #7f858e'>{self._task_date}</span>",
204
+ "text",
205
+ font_size=13,
206
+ ),
207
+ ],
208
+ gap=0,
209
+ )
210
+ else:
211
+ task_widget = Text(
212
+ f"<span class='field-description text-muted' style='color: #7f858e'>Task was archived (ID: '{self._task_id}')</span>",
213
+ "text",
214
+ )
192
215
  return task_widget
193
216
 
194
217
  def _create_training_project_widget(self) -> Union[ProjectThumbnail, Text]:
195
- if self.training_project_info is not None:
218
+ if self._training_project_info is not None:
196
219
  training_project_widget = ProjectThumbnail(
197
220
  self._training_project_info, remove_margins=True
198
221
  )
199
222
  else:
200
223
  training_project_widget = Text(
201
- f"<span class='field-description text-muted' style='color: #7f858e'>Project was deleted</span>",
224
+ f"<span class='field-description text-muted' style='color: #7f858e'>Project was archived</span>",
202
225
  "text",
203
226
  font_size=13,
204
227
  )
@@ -209,15 +232,11 @@ class CustomModelsSelector(Widget):
209
232
  for checkpoint_info in self._checkpoints:
210
233
  if isinstance(checkpoint_info, dict):
211
234
  checkpoint_selector_items.append(
212
- Select.Item(
213
- value=checkpoint_info["path"], label=checkpoint_info["name"]
214
- )
235
+ Select.Item(value=checkpoint_info["path"], label=checkpoint_info["name"])
215
236
  )
216
237
  elif isinstance(checkpoint_info, FileInfo):
217
238
  checkpoint_selector_items.append(
218
- Select.Item(
219
- value=checkpoint_info.path, label=checkpoint_info.name
220
- )
239
+ Select.Item(value=checkpoint_info.path, label=checkpoint_info.name)
221
240
  )
222
241
 
223
242
  checkpoint_selector = Select(items=checkpoint_selector_items)
@@ -282,9 +301,7 @@ class CustomModelsSelector(Widget):
282
301
  )
283
302
 
284
303
  file_api = FileApi(self._api)
285
- self._model_path_input = Input(
286
- placeholder="Path to model file in Team Files"
287
- )
304
+ self._model_path_input = Input(placeholder="Path to model file in Team Files")
288
305
 
289
306
  @self._model_path_input.value_changed
290
307
  def change_folder(value):
@@ -322,9 +339,7 @@ class CustomModelsSelector(Widget):
322
339
 
323
340
  self.custom_tab_widgets.hide()
324
341
 
325
- self.show_custom_checkpoint_path_checkbox = Checkbox(
326
- "Use custom checkpoint", False
327
- )
342
+ self.show_custom_checkpoint_path_checkbox = Checkbox("Use custom checkpoint", False)
328
343
 
329
344
  @self.show_custom_checkpoint_path_checkbox.value_changed
330
345
  def show_custom_checkpoint_path_checkbox_changed(is_checked):
@@ -399,9 +414,7 @@ class CustomModelsSelector(Widget):
399
414
  self.disable_table()
400
415
  super().disable()
401
416
 
402
- def _generate_table_rows(
403
- self, train_infos: List[TrainInfo]
404
- ) -> Dict[str, List[ModelRow]]:
417
+ def _generate_table_rows(self, train_infos: List[TrainInfo]) -> Dict[str, List[ModelRow]]:
405
418
  """Method to generate table rows from remote path to training app save directory"""
406
419
 
407
420
  def process_train_info(train_info):
@@ -414,7 +427,7 @@ class CustomModelsSelector(Widget):
414
427
  )
415
428
  return train_info.task_type, model_row
416
429
  except Exception as e:
417
- logger.warn(f"Failed to process train info: {train_info}")
430
+ logger.warning(f"Failed to process train info: {train_info}. Error: {repr(e)}")
418
431
  return None, None
419
432
 
420
433
  table_rows = defaultdict(list)
@@ -448,8 +461,7 @@ class CustomModelsSelector(Widget):
448
461
  if "pose estimation" in task_types:
449
462
  sorted_tt.append("pose estimation")
450
463
  other_tasks = sorted(
451
- set(task_types)
452
- - set(["object detection", "instance segmentation", "pose estimation"])
464
+ set(task_types) - set(["object detection", "instance segmentation", "pose estimation"])
453
465
  )
454
466
  sorted_tt.extend(other_tasks)
455
467
  return sorted_tt
@@ -536,9 +548,7 @@ class CustomModelsSelector(Widget):
536
548
 
537
549
  def set_custom_checkpoint_task_type(self, task_type: str) -> None:
538
550
  if self.use_custom_checkpoint_path():
539
- available_task_types = (
540
- self.custom_checkpoint_task_type_selector.get_labels()
541
- )
551
+ available_task_types = self.custom_checkpoint_task_type_selector.get_labels()
542
552
  if task_type not in available_task_types:
543
553
  raise ValueError(f'"{task_type}" is not available task type')
544
554
  self.custom_checkpoint_task_type_selector.set_value(task_type)
@@ -38,7 +38,7 @@ from supervisely.convert.pointcloud.las.las_converter import LasConverter
38
38
  from supervisely.convert.pointcloud.ply.ply_converter import PlyConverter
39
39
  from supervisely.convert.pointcloud.bag.bag_converter import BagConverter
40
40
  from supervisely.convert.pointcloud.lyft.lyft_converter import LyftConverter
41
-
41
+ from supervisely.convert.pointcloud.nuscenes_conv.nuscenes_converter import NuscenesConverter
42
42
 
43
43
  # Pointcloud Episodes
44
44
  from supervisely.convert.pointcloud_episodes.sly.sly_pointcloud_episodes_converter import (
@@ -46,6 +46,9 @@ from supervisely.convert.pointcloud_episodes.sly.sly_pointcloud_episodes_convert
46
46
  )
47
47
  from supervisely.convert.pointcloud_episodes.bag.bag_converter import BagEpisodesConverter
48
48
  from supervisely.convert.pointcloud_episodes.lyft.lyft_converter import LyftEpisodesConverter
49
+ from supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_converter import (
50
+ NuscenesEpisodesConverter,
51
+ )
49
52
 
50
53
  # Video
51
54
  from supervisely.convert.video.mot.mot_converter import MOTConverter
@@ -55,6 +55,7 @@ class AvailablePointcloudConverters:
55
55
  PLY = "ply"
56
56
  BAG = "rosbag"
57
57
  LYFT = "lyft"
58
+ NUSCENES = "nuscenes"
58
59
 
59
60
 
60
61
  class AvailablePointcloudEpisodesConverters:
@@ -301,23 +302,15 @@ class BaseConverter:
301
302
  return found_formats[0]
302
303
 
303
304
  def _collect_items_if_format_not_detected(self):
304
- from supervisely.convert.pointcloud_episodes.pointcloud_episodes_converter import (
305
- PointcloudEpisodeConverter,
306
- )
307
-
308
305
  only_modality_items = True
309
306
  unsupported_exts = set()
310
307
  items = []
311
- is_episode = isinstance(self, PointcloudEpisodeConverter)
312
308
  for root, _, files in os.walk(self._input_data):
313
309
  for file in files:
314
310
  full_path = os.path.join(root, file)
315
311
  ext = get_file_ext(full_path)
316
312
  if ext.lower() in self.allowed_exts: # pylint: disable=no-member
317
- if is_episode:
318
- items.append(self.Item(full_path, len(items))) # pylint: disable=no-member
319
- else:
320
- items.append(self.Item(full_path)) # pylint: disable=no-member
313
+ items.append(self.Item(full_path)) # pylint: disable=no-member
321
314
  continue
322
315
  only_modality_items = False
323
316
  if ext.lower() in self.unsupported_exts:
@@ -0,0 +1,227 @@
1
+ import os
2
+ from typing import Dict, Optional
3
+
4
+ import supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_helper as helpers
5
+ import supervisely.io.fs as fs
6
+ from supervisely import PointcloudAnnotation, PointcloudObject
7
+ from supervisely._utils import is_development
8
+ from supervisely.annotation.obj_class import ObjClass
9
+ from supervisely.annotation.tag_meta import TagMeta, TagValueType
10
+ from supervisely.api.api import Api, ApiField
11
+ from supervisely.convert.base_converter import AvailablePointcloudConverters
12
+ from supervisely.convert.pointcloud.pointcloud_converter import PointcloudConverter
13
+ from supervisely.convert.pointcloud_episodes.nuscenes_conv.nuscenes_converter import (
14
+ NuscenesEpisodesConverter,
15
+ )
16
+ from supervisely.geometry.cuboid_3d import Cuboid3d
17
+ from supervisely.pointcloud_annotation.pointcloud_figure import PointcloudFigure
18
+ from supervisely.pointcloud_annotation.pointcloud_object_collection import (
19
+ PointcloudObjectCollection,
20
+ )
21
+ from supervisely.pointcloud_annotation.pointcloud_tag import PointcloudTag
22
+ from supervisely.pointcloud_annotation.pointcloud_tag_collection import (
23
+ PointcloudTagCollection,
24
+ )
25
+ from supervisely.project.project_meta import ProjectMeta
26
+ from supervisely.sly_logger import logger
27
+
28
+
29
+ class NuscenesConverter(NuscenesEpisodesConverter, PointcloudConverter):
30
+ """Converter for NuScenes pointcloud format."""
31
+
32
+ def __init__(
33
+ self,
34
+ input_data: str,
35
+ labeling_interface: str,
36
+ upload_as_links: bool,
37
+ remote_files_map: Optional[Dict[str, str]] = None,
38
+ ):
39
+ super().__init__(input_data, labeling_interface, upload_as_links, remote_files_map)
40
+ self._nuscenes = None
41
+
42
+ def __str__(self) -> str:
43
+ return AvailablePointcloudConverters.NUSCENES
44
+
45
+ def to_supervisely(
46
+ self,
47
+ scene_sample,
48
+ meta: ProjectMeta,
49
+ renamed_classes: dict = {},
50
+ renamed_tags: dict = {},
51
+ ) -> PointcloudAnnotation:
52
+ bevbox_objs = [obj.convert_nuscenes_to_BEVBox3D() for obj in scene_sample.anns]
53
+ geoms = [obj.to_supervisely() for obj in scene_sample.anns]
54
+ attrs = [obj.attributes for obj in scene_sample.anns]
55
+
56
+ figures = []
57
+ objs = []
58
+ for label, geom, attributes in zip(bevbox_objs, geoms, attrs):
59
+ class_name = renamed_classes.get(label.label_class, label.label_class)
60
+ tag_col = None
61
+ if len(attributes) > 0 and all([tag_name is not None for tag_name in attributes]):
62
+ tag_meta_names = [renamed_tags.get(name, name) for name in attributes]
63
+ tag_metas = [meta.get_tag_meta(tag_meta_name) for tag_meta_name in tag_meta_names]
64
+ tag_col = PointcloudTagCollection([PointcloudTag(meta, None) for meta in tag_metas])
65
+ pcobj = PointcloudObject(meta.get_obj_class(class_name), tag_col)
66
+ figures.append(PointcloudFigure(pcobj, geom))
67
+ objs.append(pcobj)
68
+ return PointcloudAnnotation(PointcloudObjectCollection(objs), figures)
69
+
70
+ def upload_dataset(self, api: Api, dataset_id: int, batch_size: int = 1, log_progress=True):
71
+ nuscenes = self._nuscenes
72
+
73
+ tag_metas = [TagMeta(attr["name"], TagValueType.NONE) for attr in nuscenes.attribute]
74
+ obj_classes = []
75
+ for category in nuscenes.category:
76
+ color = nuscenes.colormap[category["name"]]
77
+ description = category["description"]
78
+ if len(description) > 255:
79
+ # * Trim description to fit into 255 characters limit
80
+ sentences = description.split(".")
81
+ trimmed_description = ""
82
+ for sentence in sentences:
83
+ if len(trimmed_description) + len(sentence) + 1 > 255:
84
+ break
85
+ trimmed_description += sentence + "."
86
+ description = trimmed_description.strip()
87
+ obj_classes.append(ObjClass(category["name"], Cuboid3d, color, description=description))
88
+
89
+ self._meta = ProjectMeta(obj_classes, tag_metas)
90
+ meta, renamed_classes, renamed_tags = self.merge_metas_with_conflicts(api, dataset_id)
91
+
92
+ dataset_info = api.dataset.get_info_by_id(dataset_id)
93
+ scene_name_to_dataset = {}
94
+
95
+ scene_names = [scene["name"] for scene in nuscenes.scene]
96
+ scene_cnt = len(scene_names)
97
+ total_sample_cnt = sum([scene["nbr_samples"] for scene in nuscenes.scene])
98
+
99
+ multiple_scenes = len(scene_names) > 1
100
+ if multiple_scenes:
101
+ logger.info(f"Found {scene_cnt} scenes ({total_sample_cnt} samples) in the input data.")
102
+ # * Create a nested dataset for each scene
103
+ for name in scene_names:
104
+ ds = api.dataset.create(
105
+ dataset_info.project_id,
106
+ name,
107
+ change_name_if_conflict=True,
108
+ parent_id=dataset_id,
109
+ )
110
+ scene_name_to_dataset[name] = ds
111
+ else:
112
+ scene_name_to_dataset[scene_names[0]] = dataset_info
113
+
114
+ if log_progress:
115
+ progress, progress_cb = self.get_progress(total_sample_cnt, "Converting pointclouds...")
116
+ else:
117
+ progress_cb = None
118
+
119
+ for scene in nuscenes.scene:
120
+ current_dataset_id = scene_name_to_dataset[scene["name"]].id
121
+
122
+ log = nuscenes.get("log", scene["log_token"])
123
+ sample_token = scene["first_sample_token"]
124
+
125
+ # * Extract scene's samples
126
+ scene_samples = []
127
+ for i in range(scene["nbr_samples"]):
128
+ sample = nuscenes.get("sample", sample_token)
129
+ lidar_path, boxes, _ = nuscenes.get_sample_data(sample["data"]["LIDAR_TOP"])
130
+ if not os.path.exists(lidar_path):
131
+ logger.warning(f'Scene "{scene["name"]}" has no LIDAR data.')
132
+ continue
133
+
134
+ timestamp = sample["timestamp"]
135
+ anns = []
136
+ for box, name, inst_token in helpers.Sample.generate_boxes(nuscenes, boxes):
137
+ current_instance_token = inst_token["token"]
138
+ parent_token = inst_token["prev"]
139
+
140
+ # get category, attributes and visibility
141
+ ann = nuscenes.get("sample_annotation", current_instance_token)
142
+ category = ann["category_name"]
143
+ attributes = [
144
+ nuscenes.get("attribute", attr)["name"] for attr in ann["attribute_tokens"]
145
+ ]
146
+ visibility = nuscenes.get("visibility", ann["visibility_token"])["level"]
147
+
148
+ anns.append(
149
+ helpers.AnnotationObject(
150
+ name,
151
+ box,
152
+ current_instance_token,
153
+ parent_token,
154
+ category,
155
+ attributes,
156
+ visibility,
157
+ )
158
+ )
159
+
160
+ # get camera data
161
+ sample_data = nuscenes.get("sample_data", sample["data"]["LIDAR_TOP"])
162
+ cal_sensor = nuscenes.get(
163
+ "calibrated_sensor", sample_data["calibrated_sensor_token"]
164
+ )
165
+ ego_pose = nuscenes.get("ego_pose", sample_data["ego_pose_token"])
166
+
167
+ camera_data = [
168
+ helpers.CamData(nuscenes, sensor, token, cal_sensor, ego_pose)
169
+ for sensor, token in sample["data"].items()
170
+ if sensor.startswith("CAM")
171
+ ]
172
+ scene_samples.append(helpers.Sample(timestamp, lidar_path, anns, camera_data))
173
+ sample_token = sample["next"]
174
+
175
+ # * Convert and upload pointclouds w/ annotations
176
+ for idx, sample in enumerate(scene_samples):
177
+ pcd_ann = self.to_supervisely(sample, meta, renamed_classes, renamed_tags)
178
+
179
+ pcd_path = sample.convert_lidar_to_supervisely()
180
+ pcd_name = fs.get_file_name(pcd_path)
181
+ pcd_meta = {
182
+ "frame": idx,
183
+ "vehicle": log["vehicle"],
184
+ "date": log["date_captured"],
185
+ "location": log["location"],
186
+ "description": scene["description"],
187
+ }
188
+ info = api.pointcloud.upload_path(current_dataset_id, pcd_name, pcd_path, pcd_meta)
189
+ fs.silent_remove(pcd_path)
190
+
191
+ pcd_id = info.id
192
+ # * Upload pointcloud annotation
193
+ try:
194
+ api.pointcloud.annotation.append(pcd_id, pcd_ann)
195
+ except Exception as e:
196
+ error_msg = getattr(getattr(e, "response", e), "text", str(e))
197
+ logger.warning(
198
+ f"Failed to upload annotation for scene: {scene['name']}. Message: {error_msg}"
199
+ )
200
+
201
+ # * Upload related images
202
+ image_jsons = []
203
+ camera_names = []
204
+ for img_path, rimage_info in [
205
+ data.get_info(sample.timestamp) for data in sample.cam_data
206
+ ]:
207
+ img = api.pointcloud.upload_related_image(img_path)
208
+ image_jsons.append(
209
+ {
210
+ ApiField.ENTITY_ID: pcd_id,
211
+ ApiField.NAME: rimage_info[ApiField.NAME],
212
+ ApiField.HASH: img,
213
+ ApiField.META: rimage_info[ApiField.META],
214
+ }
215
+ )
216
+ camera_names.append(rimage_info[ApiField.META]["deviceId"])
217
+ if len(image_jsons) > 0:
218
+ api.pointcloud.add_related_images(image_jsons, camera_names)
219
+
220
+ if log_progress:
221
+ progress_cb(1)
222
+
223
+ logger.info(f"Dataset ID:{current_dataset_id} has been successfully uploaded.")
224
+
225
+ if log_progress:
226
+ if is_development():
227
+ progress.close()
@@ -1,5 +1,8 @@
1
- from typing import Optional, Tuple
1
+ import imghdr
2
+ import os
3
+ from typing import List, Optional, Set, Tuple
2
4
 
5
+ import supervisely.convert.pointcloud.sly.sly_pointcloud_helper as helpers
3
6
  from supervisely import (
4
7
  Api,
5
8
  PointcloudAnnotation,
@@ -10,8 +13,10 @@ from supervisely import (
10
13
  )
11
14
  from supervisely.api.module_api import ApiField
12
15
  from supervisely.convert.base_converter import BaseConverter
16
+ from supervisely.io.fs import get_file_ext, get_file_name
13
17
  from supervisely.io.json import load_json_file
14
18
  from supervisely.pointcloud.pointcloud import ALLOWED_POINTCLOUD_EXTENSIONS
19
+ from supervisely.pointcloud.pointcloud import validate_ext as validate_pcd_ext
15
20
 
16
21
 
17
22
  class PointcloudConverter(BaseConverter):
@@ -134,3 +139,49 @@ class PointcloudConverter(BaseConverter):
134
139
  if is_development():
135
140
  progress.close()
136
141
  logger.info(f"Dataset ID:{dataset_id} has been successfully uploaded.")
142
+
143
+ def _collect_items_if_format_not_detected(self) -> Tuple[List[Item], bool, Set[str]]:
144
+ only_modality_items = True
145
+ unsupported_exts = set()
146
+ pcd_list, rimg_dict, rimg_ann_dict = [], {}, {}
147
+ used_img_ext = set()
148
+ for root, _, files in os.walk(self._input_data):
149
+ for file in files:
150
+ full_path = os.path.join(root, file)
151
+ if file in ["key_id_map.json", "meta.json"]:
152
+ continue
153
+
154
+ ext = get_file_ext(full_path)
155
+ if ext == ".json":
156
+ dir_name = os.path.basename(root)
157
+ parent_dir_name = os.path.basename(os.path.dirname(root))
158
+ if any(
159
+ p.replace("_", " ") in ["images", "related images", "photo context"]
160
+ for p in [dir_name, parent_dir_name]
161
+ ) or dir_name.endswith("_pcd"):
162
+ rimg_ann_dict[file] = full_path
163
+ elif imghdr.what(full_path):
164
+ rimg_dict[file] = full_path
165
+ if ext not in used_img_ext:
166
+ used_img_ext.add(ext)
167
+ elif ext.lower() in self.allowed_exts:
168
+ try:
169
+ validate_pcd_ext(ext)
170
+ pcd_list.append(full_path)
171
+ except:
172
+ pass
173
+ else:
174
+ only_modality_items = False
175
+ unsupported_exts.add(ext)
176
+
177
+ # create Items
178
+ items = []
179
+ for pcd_path in pcd_list:
180
+ item = self.Item(pcd_path)
181
+ rimg, rimg_ann = helpers.find_related_items(
182
+ item.name, used_img_ext, rimg_dict, rimg_ann_dict
183
+ )
184
+ if rimg is not None and rimg_ann is not None:
185
+ item.set_related_images((rimg, rimg_ann))
186
+ items.append(item)
187
+ return items, only_modality_items, unsupported_exts
@@ -6,7 +6,7 @@ import supervisely.convert.pointcloud.sly.sly_pointcloud_helper as helpers
6
6
  from supervisely import PointcloudAnnotation, ProjectMeta, logger
7
7
  from supervisely.convert.base_converter import AvailablePointcloudConverters
8
8
  from supervisely.convert.pointcloud.pointcloud_converter import PointcloudConverter
9
- from supervisely.io.fs import JUNK_FILES, get_file_ext, get_file_name
9
+ from supervisely.io.fs import get_file_ext, get_file_name
10
10
  from supervisely.io.json import load_json_file
11
11
  from supervisely.pointcloud.pointcloud import validate_ext as validate_pcd_ext
12
12
 
@@ -46,7 +46,6 @@ class SLYPointcloudConverter(PointcloudConverter):
46
46
  return False
47
47
 
48
48
  def validate_format(self) -> bool:
49
- detected_ann_cnt = 0
50
49
  pcd_list, ann_dict, rimg_dict, rimg_ann_dict = [], {}, {}, {}
51
50
  used_img_ext = []
52
51
  for root, _, files in os.walk(self._input_data):
@@ -60,9 +59,7 @@ class SLYPointcloudConverter(PointcloudConverter):
60
59
  continue
61
60
 
62
61
  ext = get_file_ext(full_path)
63
- if file in JUNK_FILES: # add better check
64
- continue
65
- elif ext in self.ann_ext:
62
+ if ext in self.ann_ext:
66
63
  dir_name = os.path.basename(root)
67
64
  parent_dir_name = os.path.basename(os.path.dirname(root))
68
65
  if any(
@@ -83,16 +80,11 @@ class SLYPointcloudConverter(PointcloudConverter):
83
80
  except:
84
81
  continue
85
82
 
86
- if self._meta is not None:
87
- meta = self._meta
88
- else:
89
- meta = ProjectMeta()
90
-
91
83
  # create Items
92
84
  self._items = []
85
+ sly_ann_detected = False
93
86
  for pcd_path in pcd_list:
94
87
  name_noext = get_file_name(pcd_path)
95
- ann_or_rimg_detected = False
96
88
  item = self.Item(pcd_path)
97
89
  ann_name = f"{item.name}.json"
98
90
  if ann_name not in ann_dict:
@@ -100,23 +92,18 @@ class SLYPointcloudConverter(PointcloudConverter):
100
92
  if ann_name in ann_dict:
101
93
  ann_path = ann_dict[ann_name]
102
94
  if self._meta is None:
103
- meta = self.generate_meta_from_annotation(ann_path, meta)
104
- is_valid = self.validate_ann_file(ann_path, meta)
95
+ self._meta = self.generate_meta_from_annotation(ann_path, self._meta)
96
+ is_valid = self.validate_ann_file(ann_path, self._meta)
105
97
  if is_valid:
106
98
  item.ann_data = ann_path
107
- ann_or_rimg_detected = True
99
+ sly_ann_detected = True
108
100
  rimg, rimg_ann = helpers.find_related_items(
109
101
  item.name, used_img_ext, rimg_dict, rimg_ann_dict
110
102
  )
111
103
  if rimg is not None and rimg_ann is not None:
112
104
  item.set_related_images((rimg, rimg_ann))
113
- ann_or_rimg_detected = True
114
-
115
- if ann_or_rimg_detected:
116
- detected_ann_cnt += 1
117
105
  self._items.append(item)
118
- self._meta = meta
119
- return detected_ann_cnt > 0
106
+ return sly_ann_detected
120
107
 
121
108
  def to_supervisely(
122
109
  self,
@@ -140,5 +127,5 @@ class SLYPointcloudConverter(PointcloudConverter):
140
127
  ann_json = helpers.rename_in_json(ann_json, renamed_classes, renamed_tags)
141
128
  return PointcloudAnnotation.from_json(ann_json, meta)
142
129
  except Exception as e:
143
- logger.warn(f"Failed to convert annotation: {repr(e)}")
130
+ logger.warning(f"Failed to convert annotation: {repr(e)}")
144
131
  return item.create_empty_annotation()
@@ -1,4 +1,4 @@
1
- from typing import Dict, List
1
+ from typing import Dict, List, Union
2
2
 
3
3
  from supervisely import (
4
4
  AnyGeometry,
@@ -16,7 +16,9 @@ from supervisely.io.json import load_json_file
16
16
  SLY_ANN_KEYS = ["figures", "objects", "tags"]
17
17
 
18
18
 
19
- def get_meta_from_annotation(ann_path: str, meta: ProjectMeta) -> ProjectMeta:
19
+ def get_meta_from_annotation(ann_path: str, meta: Union[ProjectMeta, None]) -> ProjectMeta:
20
+ if meta is None:
21
+ meta = ProjectMeta()
20
22
  ann_json = load_json_file(ann_path)
21
23
  if "annotation" in ann_json:
22
24
  ann_json = ann_json["annotation"]