supervisely 6.73.353__py3-none-any.whl → 6.73.354__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -58,12 +58,11 @@ def validate_mimetypes(name: str, path: str) -> list:
58
58
  if new_img_ext == ".bin" or new_img_ext is None:
59
59
  new_img_ext = ".jpeg"
60
60
  new_img_name = f"{get_file_name(name)}{new_img_ext}"
61
- logger.info(
62
- f"Image {name} with mimetype {mimetype} will be converted to {new_img_ext}"
63
- )
61
+ logger.info(f"Image {name} with mimetype {mimetype} will be converted to {new_img_ext}")
64
62
 
65
63
  return new_img_name
66
64
 
65
+
67
66
  def convert_to_jpg(path) -> tuple:
68
67
  """Convert image to jpg."""
69
68
 
@@ -79,6 +78,7 @@ def convert_to_jpg(path) -> tuple:
79
78
  silent_remove(path)
80
79
  return new_path
81
80
 
81
+
82
82
  def read_tiff_image(path: str) -> Union[np.ndarray, None]:
83
83
  """
84
84
  Read tiff image.
@@ -95,9 +95,7 @@ def read_tiff_image(path: str) -> Union[np.ndarray, None]:
95
95
  if image.ndim == 3:
96
96
  if tiff_shape[0] < tiff_shape[1] and tiff_shape[0] < tiff_shape[2]:
97
97
  image = image.transpose(1, 2, 0)
98
- logger.warning(
99
- f"{name}: transposed shape from {tiff_shape} to {image.shape}"
100
- )
98
+ logger.warning(f"{name}: transposed shape from {tiff_shape} to {image.shape}")
101
99
 
102
100
  return image
103
101
 
@@ -71,7 +71,7 @@ class NiiPlaneStructuredConverter(NiiConverter, VolumeConverter):
71
71
 
72
72
  def create_empty_annotation(self):
73
73
  return VolumeAnnotation(self.volume_meta)
74
-
74
+
75
75
  def __str__(self):
76
76
  return "nii_custom"
77
77
 
@@ -89,16 +89,21 @@ class NiiPlaneStructuredConverter(NiiConverter, VolumeConverter):
89
89
  for file in files:
90
90
  path = os.path.join(root, file)
91
91
  if is_nifti_file(path):
92
- full_name = get_file_name(path)
93
- if full_name.endswith(".nii"):
94
- full_name = get_file_name(full_name)
95
- prefix = full_name.split("_")[0]
96
- if prefix not in helper.PlanePrefix.values():
92
+ name_parts = helper.parse_name_parts(file)
93
+ if name_parts is None:
94
+ logger.warning(
95
+ "File recognized as NIfTI, but failed to parse plane identifier from name.",
96
+ extra={"filename": file},
97
+ )
97
98
  continue
98
- if any(label_name in full_name for label_name in helper.LABEL_NAME):
99
- ann_dict[prefix].append(path)
100
- else:
101
- volumes_dict[prefix].append(path)
99
+
100
+ dict_to_use = ann_dict if name_parts.is_ann else volumes_dict
101
+ key = (
102
+ name_parts.plane
103
+ if name_parts.patient_uuid is None and name_parts.case_uuid is None
104
+ else f"{name_parts.plane}_{name_parts.patient_uuid}_{name_parts.case_uuid}"
105
+ )
106
+ dict_to_use[key].append(path)
102
107
  ext = get_file_ext(path)
103
108
  if ext == ".txt":
104
109
  cls_color_map = helper.read_cls_color_map(path)
@@ -106,22 +111,22 @@ class NiiPlaneStructuredConverter(NiiConverter, VolumeConverter):
106
111
  logger.warning(f"Failed to read class color map from {path}.")
107
112
 
108
113
  self._items = []
109
- for prefix, paths in volumes_dict.items():
114
+ for key, paths in volumes_dict.items():
110
115
  if len(paths) == 1:
111
116
  item = self.Item(item_path=paths[0])
112
- item.ann_data = ann_dict.get(prefix, [])
117
+ item.ann_data = ann_dict.get(key, [])
113
118
  item.is_semantic = len(item.ann_data) == 1
114
119
  if cls_color_map is not None:
115
120
  item.custom_data["cls_color_map"] = cls_color_map
116
121
  self._items.append(item)
117
122
  elif len(paths) > 1:
118
123
  logger.info(
119
- f"Found {len(paths)} volumes with prefix {prefix}. Will try to match them by directories."
124
+ f"Found {len(paths)} volumes with key {key}. Will try to match them by directories."
120
125
  )
121
126
  for path in paths:
122
127
  item = self.Item(item_path=path)
123
128
  possible_ann_paths = []
124
- for ann_path in ann_dict.get(prefix, []):
129
+ for ann_path in ann_dict.get(key, []):
125
130
  if Path(ann_path).parent == Path(path).parent:
126
131
  possible_ann_paths.append(ann_path)
127
132
  item.ann_data = possible_ann_paths
@@ -129,6 +134,7 @@ class NiiPlaneStructuredConverter(NiiConverter, VolumeConverter):
129
134
  if cls_color_map is not None:
130
135
  item.custom_data["cls_color_map"] = cls_color_map
131
136
  self._items.append(item)
137
+
132
138
  self._meta = ProjectMeta()
133
139
  return self.items_count > 0
134
140
 
@@ -221,20 +227,22 @@ class NiiPlaneStructuredAnnotationConverter(NiiConverter, VolumeConverter):
221
227
  if json_map is not None:
222
228
  self._json_map = helper.read_json_map(json_map)
223
229
 
224
- is_ann = lambda x: any(label_name in x for label_name in helper.LABEL_NAME)
230
+ is_nii = lambda x: any(x.endswith(ext) for ext in [".nii", ".nii.gz"])
225
231
  for root, _, files in os.walk(self._input_data):
226
232
  for file in files:
227
233
  path = os.path.join(root, file)
228
- if is_ann(file):
229
- prefix = get_file_name(path).split("_")[0]
230
- if prefix not in helper.PlanePrefix.values():
234
+ if is_nii(file):
235
+ name_parts = helper.parse_name_parts(file)
236
+ if name_parts is None or not name_parts.is_ann:
231
237
  continue
232
238
  try:
233
239
  nii = load(path)
234
240
  except filebasedimages.ImageFileError:
241
+ logger.warning(f"Failed to load NIfTI file: {path}")
235
242
  continue
236
243
  item = self.Item(item_path=None, ann_data=path)
237
244
  item.set_shape(nii.shape)
245
+ item.custom_data["name_parts"] = name_parts
238
246
  if cls_color_map is not None:
239
247
  item.custom_data["cls_color_map"] = cls_color_map
240
248
  self._items.append(item)
@@ -254,15 +262,11 @@ class NiiPlaneStructuredAnnotationConverter(NiiConverter, VolumeConverter):
254
262
  renamed_tags: dict = None,
255
263
  ) -> VolumeAnnotation:
256
264
  """Convert to Supervisely format."""
257
- import re
258
265
  try:
259
266
  objs = []
260
267
  spatial_figures = []
261
268
  ann_path = item.ann_data
262
- ann_idx = 0
263
- match = re.search(r"_(\d+)(?:\.[^.]+)+$", ann_path)
264
- if match:
265
- ann_idx = int(match.group(1))
269
+ ann_idx = item.custom_data["name_parts"].ending_idx or 0
266
270
  for mask, pixel_id in helper.get_annotation_from_nii(ann_path):
267
271
  class_id = pixel_id if item.is_semantic else ann_idx
268
272
  class_name = f"Segment_{class_id}"
@@ -337,7 +341,6 @@ class NiiPlaneStructuredAnnotationConverter(NiiConverter, VolumeConverter):
337
341
  logger.info("Resulting dataset is empty. Removing it.")
338
342
  api.dataset.remove(dataset_id)
339
343
 
340
-
341
344
  if log_progress:
342
345
  if is_development():
343
346
  progress.close()
@@ -81,9 +81,9 @@ class NiiConverter(VolumeConverter):
81
81
  if name.endswith(".nii"):
82
82
  name = get_file_name(name)
83
83
  nifti_dict[name] = path
84
- for prefix in planes_detected.keys():
85
- if name.startswith(prefix):
86
- planes_detected[prefix] = True
84
+ for plane in planes_detected.keys():
85
+ if plane in name:
86
+ planes_detected[plane] = True
87
87
 
88
88
  if any(planes_detected.values()):
89
89
  return False
@@ -72,6 +72,7 @@ def read_cls_color_map(path: str) -> dict:
72
72
  return None
73
73
  return cls_color_map
74
74
 
75
+
75
76
  def read_json_map(path: str) -> dict:
76
77
  import json
77
78
 
@@ -115,6 +116,7 @@ def get_annotation_from_nii(path: str) -> Generator[Mask3D, None, None]:
115
116
  mask = Mask3D(data == class_id)
116
117
  yield mask, class_id
117
118
 
119
+
118
120
  class AnnotationMatcher:
119
121
  def __init__(self, items, dataset_id):
120
122
  self._items = items
@@ -143,7 +145,9 @@ class AnnotationMatcher:
143
145
  if dataset_info.items_count > 0 and len(self._ann_paths.keys()) == 1:
144
146
  self._project_wide = False
145
147
  else:
146
- datasets = {dsinfo.name: dsinfo for dsinfo in api.dataset.get_list(project_id, recursive=True)}
148
+ datasets = {
149
+ dsinfo.name: dsinfo for dsinfo in api.dataset.get_list(project_id, recursive=True)
150
+ }
147
151
  self._project_wide = True
148
152
 
149
153
  volumes = defaultdict(lambda: {})
@@ -164,86 +168,55 @@ class AnnotationMatcher:
164
168
  """Match annotation files with corresponding volumes using regex-based matching."""
165
169
  import re
166
170
 
167
- def extract_prefix(ann_file):
168
- import re
169
- pattern = r'^(?P<prefix>cor|sag|axl).*?(?:' + "|".join(LABEL_NAME) + r')'
170
- m = re.match(pattern, ann_file, re.IGNORECASE)
171
- if m:
172
- return m.group("prefix").lower()
173
- return None
174
-
175
- def is_volume_match(volume_name, prefix):
176
- pattern = r'^' + re.escape(prefix) + r'.*?anatomic'
177
- return re.match(pattern, volume_name, re.IGNORECASE) is not None
178
-
179
- def find_best_volume_match(prefix, available_volumes):
180
- candidates = {name: volume for name, volume in available_volumes.items() if is_volume_match(name, prefix)}
181
- if not candidates:
182
- return None, None
183
-
184
- # Prefer an exact candidate
185
- ann_name_no_ext = ann_file.split(".")[0]
186
- exact_candidate = re.sub(r'(' + '|'.join(LABEL_NAME) + r')', 'anatomic', ann_name_no_ext, flags=re.IGNORECASE)
187
- for name in candidates:
188
- if re.fullmatch(re.escape(exact_candidate), name, re.IGNORECASE):
189
- return name, candidates[name]
190
-
191
- # Otherwise, choose the candidate with the shortest name
192
- best_match = sorted(candidates.keys(), key=len)[0]
193
- return best_match, candidates[best_match]
194
-
195
171
  item_to_volume = {}
196
172
 
197
- def process_annotation_file(ann_file, dataset_name, volumes):
198
- prefix = extract_prefix(ann_file)
199
- if prefix is None:
200
- logger.warning(f"Failed to extract prefix from annotation file {ann_file}. Skipping.")
201
- return
202
-
203
- matched_name, matched_volume = find_best_volume_match(prefix, volumes)
204
- if not matched_volume:
205
- logger.warning(f"No matching volume found for annotation with prefix '{prefix}' in dataset {dataset_name}.")
206
- return
207
-
208
- # Retrieve the correct item based on matching mode.
209
- item = (
210
- self._item_by_path.get((dataset_name, ann_file))
211
- if self._project_wide
212
- else self._item_by_filename.get(ann_file)
213
- )
214
- if not item:
215
- logger.warning(f"Item not found for annotation file {ann_file} in {'dataset ' + dataset_name if self._project_wide else 'single dataset mode'}.")
216
- return
217
-
218
- item_to_volume[item] = matched_volume
219
- ann_file = ann_file.split(".")[0]
220
- ann_supposed_match = re.sub(r'(' + '|'.join(LABEL_NAME) + r')', 'anatomic', ann_file, flags=re.IGNORECASE)
221
- if matched_name.lower() != ann_supposed_match:
222
- logger.debug(f"Fuzzy matched {ann_file} to volume {matched_name} using prefix '{prefix}'.")
223
-
224
173
  # Perform matching
225
174
  for dataset_name, volumes in self._volumes.items():
226
- ann_files = self._ann_paths.get(dataset_name, []) if self._project_wide else list(self._ann_paths.values())[0]
175
+ volume_names = [parse_name_parts(name) for name in list(volumes.keys())]
176
+ _volume_names = [vol for vol in volume_names if vol is not None]
177
+ if len(_volume_names) == 0:
178
+ logger.warning(f"No valid volume names found in dataset {dataset_name}.")
179
+ continue
180
+ elif len(_volume_names) != len(volume_names):
181
+ logger.debug(f"Some volume names in dataset {dataset_name} could not be parsed.")
182
+ volume_names = _volume_names
183
+
184
+ ann_files = (
185
+ self._ann_paths.get(dataset_name, [])
186
+ if self._project_wide
187
+ else list(self._ann_paths.values())[0]
188
+ )
227
189
  for ann_file in ann_files:
228
- process_annotation_file(ann_file, dataset_name, volumes)
190
+ ann_name = parse_name_parts(ann_file)
191
+ if ann_name is None:
192
+ logger.warning(f"Failed to parse annotation name: {ann_file}")
193
+ continue
194
+ match = find_best_volume_match_for_ann(ann_name, volume_names)
195
+ if match is not None:
196
+ if match.plane != ann_name.plane:
197
+ logger.warning(
198
+ f"Plane mismatch: {match.plane} != {ann_name.plane} for {ann_file}. Skipping."
199
+ )
200
+ continue
201
+ item_to_volume[self._item_by_filename[ann_file]] = volumes[match.full_name]
229
202
 
230
203
  # Mark volumes having only one matching item as semantic and validate shape.
231
204
  volume_to_items = defaultdict(list)
232
205
  for item, volume in item_to_volume.items():
233
206
  volume_to_items[volume.id].append(item)
234
-
207
+
235
208
  for volume_id, items in volume_to_items.items():
236
209
  if len(items) == 1:
237
210
  items[0].is_semantic = True
238
211
 
239
- items_to_remove = []
212
+ # items_to_remove = []
240
213
  for item, volume in item_to_volume.items():
241
214
  volume_shape = tuple(volume.file_meta["sizes"])
242
215
  if item.shape != volume_shape:
243
216
  logger.warning(f"Volume shape mismatch: {item.shape} != {volume_shape}")
244
217
  # items_to_remove.append(item)
245
- for item in items_to_remove:
246
- del item_to_volume[item]
218
+ # for item in items_to_remove:
219
+ # del item_to_volume[item]
247
220
 
248
221
  return item_to_volume
249
222
 
@@ -304,3 +277,148 @@ class AnnotationMatcher:
304
277
  items[0].is_semantic = True
305
278
 
306
279
  return item_to_volume
280
+
281
+
282
+ NameParts = namedtuple(
283
+ "NameParts",
284
+ [
285
+ "full_name",
286
+ "name_no_ext",
287
+ "type",
288
+ "plane",
289
+ "is_ann",
290
+ "patient_uuid",
291
+ "case_uuid",
292
+ "ending_idx",
293
+ ],
294
+ )
295
+
296
+
297
+ def parse_name_parts(full_name: str) -> NameParts:
298
+ from uuid import UUID
299
+
300
+ name = get_file_name(full_name)
301
+ if name.endswith(".nii"):
302
+ name = get_file_name(name)
303
+ name_no_ext = name
304
+
305
+ type = None
306
+ is_ann = False
307
+ if VOLUME_NAME in full_name:
308
+ type = "anatomic"
309
+ else:
310
+ type = next((part for part in LABEL_NAME if part in full_name), None)
311
+ is_ann = type is not None
312
+
313
+ if type is None:
314
+ return
315
+
316
+ plane = None
317
+ for part in PlanePrefix.values():
318
+ if part in name:
319
+ plane = part
320
+ break
321
+
322
+ if plane is None:
323
+ return
324
+
325
+ is_ann = any(part in name.lower() for part in LABEL_NAME)
326
+
327
+ patient_uuid = None
328
+ case_uuid = None
329
+
330
+ if len(name_no_ext) > 73:
331
+ try:
332
+ uuids = name_no_ext[:73].split("_")
333
+ if len(uuids) != 2:
334
+ raise ValueError("Invalid UUID format")
335
+ patient_uuid = UUID(name_no_ext[:36])
336
+ case_uuid = UUID(name_no_ext[37:73])
337
+ except ValueError:
338
+ logger.debug(
339
+ f"Failed to parse UUIDs from name: {name_no_ext}.",
340
+ extra={"full_name": full_name},
341
+ )
342
+ patient_uuid = None
343
+ case_uuid = None
344
+
345
+ try:
346
+ ending_idx = name_no_ext.split("_")[-1]
347
+ if ending_idx.isdigit():
348
+ ending_idx = int(ending_idx)
349
+ else:
350
+ ending_idx = None
351
+ except ValueError:
352
+ ending_idx = None
353
+ logger.debug(
354
+ f"Failed to parse ending index from name: {name_no_ext}.",
355
+ extra={"full_name": full_name},
356
+ )
357
+
358
+ return NameParts(
359
+ full_name=full_name,
360
+ name_no_ext=name_no_ext,
361
+ type=type,
362
+ plane=plane,
363
+ is_ann=is_ann,
364
+ patient_uuid=patient_uuid,
365
+ case_uuid=case_uuid,
366
+ ending_idx=ending_idx,
367
+ )
368
+
369
+
370
+ def find_best_volume_match_for_ann(ann, volumes):
371
+ """
372
+ Finds the best matching NameParts object from `volumes` for the given annotation NameParts `ann`.
373
+ Prefers an exact match where all fields except `type` are the same, and `type` is 'anatomic'.
374
+ Returns the matched NameParts object or None if not found.
375
+ """
376
+ volume_names = [volume.full_name for volume in volumes]
377
+ ann_name = ann.full_name
378
+ # Prefer exact match except for type
379
+ for vol in volumes:
380
+ if vol.name_no_ext == ann.name_no_ext.replace(ann.type, "anatomic"):
381
+ logger.debug(
382
+ "Found exact match for annotation.",
383
+ extra={"ann": ann_name, "vol": vol.full_name},
384
+ )
385
+ return vol
386
+
387
+ logger.debug(
388
+ "Failed to find exact match, trying to find a fallback match UUIDs.",
389
+ extra={"ann": ann_name, "volumes": volume_names},
390
+ )
391
+
392
+ # Fallback: match by plane and patient_uuid, type='anatomic'
393
+ for vol in volumes:
394
+ if (
395
+ vol.plane == ann.plane
396
+ and vol.patient_uuid == ann.patient_uuid
397
+ and vol.case_uuid == ann.case_uuid
398
+ ):
399
+ logger.debug(
400
+ "Found fallback match for annotation by UUIDs.",
401
+ extra={"ann": ann_name, "vol": vol.full_name},
402
+ )
403
+ return vol
404
+
405
+ logger.debug(
406
+ "Failed to find fallback match, trying to find a fallback match by plane.",
407
+ extra={"ann": ann_name, "volumes": volume_names},
408
+ )
409
+
410
+ # Fallback: match by plane and type='anatomic'
411
+ for vol in volumes:
412
+ if vol.plane == ann.plane:
413
+ logger.debug(
414
+ "Found fallback match for annotation by plane.",
415
+ extra={"ann": ann_name, "vol": vol.full_name},
416
+ )
417
+ return vol
418
+
419
+ logger.debug(
420
+ "Failed to find any match for annotation.",
421
+ extra={"ann": ann_name, "volumes": volume_names},
422
+ )
423
+
424
+ return None
@@ -322,6 +322,8 @@ def download_fast(
322
322
  :type download_blob_files: :class:`bool`, optional
323
323
  :param project_info: Project info object. To avoid additional API requests.
324
324
  :type project_info: :class:`ProjectInfo`, optional
325
+ :param skip_create_readme: Skip creating README.md file. Default is False.
326
+ :type skip_create_readme: bool, optional
325
327
  :return: None
326
328
  :rtype: NoneType
327
329
 
@@ -3323,6 +3323,8 @@ class Project:
3323
3323
  :param download_blob_files: Default is False. It will download images in classic way.
3324
3324
  If True, it will download blob files, if they are present in the project, to optimize download process.
3325
3325
  :type download_blob_files: bool, optional
3326
+ :param skip_create_readme: Skip creating README.md file. Default is False.
3327
+ :type skip_create_readme: bool, optional
3326
3328
  :return: None
3327
3329
  :rtype: NoneType
3328
3330
  :Usage example:
@@ -3926,6 +3928,8 @@ class Project:
3926
3928
  :type images_ids: :class:`list` [ :class:`int` ], optional
3927
3929
  :param resume_download: Resume download enables to download only missing files avoiding erase of existing files.
3928
3930
  :type resume_download: :class:`bool`, optional
3931
+ :param skip_create_readme: Skip creating README.md file. Default is False.
3932
+ :type skip_create_readme: bool, optional
3929
3933
  :return: None
3930
3934
  :rtype: NoneType
3931
3935
 
@@ -4217,6 +4221,7 @@ def _download_project(
4217
4221
  **kwargs,
4218
4222
  ):
4219
4223
  download_blob_files = kwargs.pop("download_blob_files", False)
4224
+ skip_create_readme = kwargs.pop("skip_create_readme", False)
4220
4225
 
4221
4226
  dataset_ids = set(dataset_ids) if (dataset_ids is not None) else None
4222
4227
  project_fs = None
@@ -4456,14 +4461,15 @@ def _download_project(
4456
4461
  for item_name in dataset_fs.get_items_names():
4457
4462
  if item_name not in items_names_set:
4458
4463
  dataset_fs.delete_item(item_name)
4459
- try:
4460
- if download_blob_files:
4461
- project_info = api.project.get_info_by_id(project_id)
4462
- create_blob_readme(project_fs=project_fs, project_info=project_info)
4463
- else:
4464
- create_readme(dest_dir, project_id, api)
4465
- except Exception as e:
4466
- logger.info(f"There was an error while creating README: {e}")
4464
+ if not skip_create_readme:
4465
+ try:
4466
+ if download_blob_files:
4467
+ project_info = api.project.get_info_by_id(project_id)
4468
+ create_blob_readme(project_fs=project_fs, project_info=project_info)
4469
+ else:
4470
+ create_readme(dest_dir, project_id, api)
4471
+ except Exception as e:
4472
+ logger.info(f"There was an error while creating README: {e}")
4467
4473
 
4468
4474
 
4469
4475
  def upload_project(
@@ -4722,6 +4728,8 @@ def download_project(
4722
4728
  :param download_blob_files: Default is False. It will download images in classic way.
4723
4729
  If True, it will download blob files, if they are present in the project, to optimize download process.
4724
4730
  :type download_blob_files: bool, optional
4731
+ :param skip_create_readme: Skip creating README.md file. Default is False.
4732
+ :type skip_create_readme: bool, optional
4725
4733
  :return: None.
4726
4734
  :rtype: NoneType
4727
4735
  :Usage example:
@@ -4806,6 +4814,9 @@ def _download_project_optimized(
4806
4814
  images_ids: List[int] = None,
4807
4815
  **kwargs,
4808
4816
  ):
4817
+
4818
+ skip_create_readme = kwargs.pop("skip_create_readme", False)
4819
+
4809
4820
  project_info = api.project.get_info_by_id(project_id)
4810
4821
  project_id = project_info.id
4811
4822
  logger.info("Annotations are not cached (always download latest version from server)")
@@ -4851,11 +4862,11 @@ def _download_project_optimized(
4851
4862
  save_images=save_images,
4852
4863
  images_ids=images_ids,
4853
4864
  )
4854
-
4855
- try:
4856
- create_readme(project_dir, project_id, api)
4857
- except Exception as e:
4858
- logger.info(f"There was an error while creating README: {e}")
4865
+ if not skip_create_readme:
4866
+ try:
4867
+ create_readme(project_dir, project_id, api)
4868
+ except Exception as e:
4869
+ logger.info(f"There was an error while creating README: {e}")
4859
4870
 
4860
4871
 
4861
4872
  def _split_images_by_cache(images, cache):
@@ -5311,7 +5322,7 @@ def _project_info_md(project_info: sly.ProjectInfo) -> str:
5311
5322
 
5312
5323
 
5313
5324
  def _dataset_structure_md(
5314
- project_info: sly.ProjectInfo, api: sly.Api, entity_limit: Optional[int] = 10
5325
+ project_info: sly.ProjectInfo, api: sly.Api, entity_limit: Optional[int] = 4
5315
5326
  ) -> str:
5316
5327
  """Creates a markdown string with the dataset structure of the project.
5317
5328
  Supports only images and videos projects.
@@ -5321,6 +5332,7 @@ def _dataset_structure_md(
5321
5332
  :param api: Supervisely API address and token.
5322
5333
  :type api: :class:`Api<supervisely.api.api.Api>`
5323
5334
  :param entity_limit: The maximum number of entities to display in the README.
5335
+ This is the limit for top level datasets and items in the dataset at the same time.
5324
5336
  :type entity_limit: int, optional
5325
5337
  :return: Markdown string with the dataset structure of the project.
5326
5338
  :rtype: str
@@ -5347,28 +5359,76 @@ def _dataset_structure_md(
5347
5359
 
5348
5360
  result_md = f"🗂️ {project_info.name}<br>"
5349
5361
 
5350
- # if project_info
5362
+ # Build a dataset hierarchy tree
5363
+ dataset_tree = {}
5364
+ root_datasets = []
5351
5365
 
5352
5366
  for parents, dataset_info in api.dataset.tree(project_info.id):
5353
- # The dataset path is needed to create a clickable link in the README.
5354
- dataset_path = Dataset._get_dataset_path(dataset_info.name, parents)
5355
- basic_indent = "┃ " * len(parents)
5356
- result_md += (
5357
- basic_indent + "┣ " + dataset_icon + f"[{dataset_info.name}]({dataset_path})" + "<br>"
5367
+ level = len(parents)
5368
+ parent_id = dataset_info.parent_id
5369
+
5370
+ if level == 0: # Root dataset
5371
+ root_datasets.append(dataset_info)
5372
+
5373
+ dataset_tree[dataset_info.id] = {
5374
+ "info": dataset_info,
5375
+ "path": Dataset._get_dataset_path(dataset_info.name, parents),
5376
+ "level": level,
5377
+ "parents": parents,
5378
+ "children": [],
5379
+ }
5380
+
5381
+ # Connect parents with children
5382
+ for ds_id, ds_data in dataset_tree.items():
5383
+ parent_id = ds_data["info"].parent_id
5384
+ if parent_id in dataset_tree:
5385
+ dataset_tree[parent_id]["children"].append(ds_id)
5386
+
5387
+ # Display only top entity_limit root datasets
5388
+ if len(root_datasets) > entity_limit:
5389
+ root_datasets = root_datasets[:entity_limit]
5390
+ result_md += f"(Showing only {entity_limit} top-level datasets)<br>"
5391
+
5392
+ # Function to render a dataset and its children up to a certain depth
5393
+ def render_dataset(ds_id, current_depth=0, max_depth=2):
5394
+ if current_depth > max_depth:
5395
+ return
5396
+
5397
+ ds_data = dataset_tree[ds_id]
5398
+ ds_info = ds_data["info"]
5399
+ basic_indent = "┃ " * current_depth
5400
+
5401
+ # Render the dataset
5402
+ result_md.append(
5403
+ basic_indent + "┣ " + dataset_icon + f"[{ds_info.name}]({ds_data['path']})" + "<br>"
5358
5404
  )
5359
- entity_infos = list_function(dataset_info.id)
5405
+
5406
+ # Render items in the dataset
5407
+ entity_infos = list_function(ds_info.id)
5360
5408
  for idx, entity_info in enumerate(entity_infos):
5361
5409
  if idx == entity_limit:
5362
- result_md += (
5410
+ result_md.append(
5363
5411
  basic_indent + "┃ ┗ ... " + str(len(entity_infos) - entity_limit) + " more<br>"
5364
5412
  )
5365
5413
  break
5366
5414
  symbol = "┗" if idx == len(entity_infos) - 1 else "┣"
5367
- result_md += (
5368
- "┃ " * (len(parents) + 1) + symbol + entity_icon + entity_info.name + "<br>"
5369
- )
5415
+ result_md.append(basic_indent + "┃ " + symbol + entity_icon + entity_info.name + "<br>")
5370
5416
 
5371
- return result_md
5417
+ # Render children (limited to entity_limit)
5418
+ children = ds_data["children"]
5419
+ if len(children) > entity_limit:
5420
+ children = children[:entity_limit]
5421
+ result_md.append(basic_indent + f"┃ (Showing only {entity_limit} child datasets)<br>")
5422
+
5423
+ for child_id in children:
5424
+ render_dataset(child_id, current_depth + 1, max_depth)
5425
+
5426
+ # Render each root dataset
5427
+ result_md = [result_md] # Convert to list for appending in the recursive function
5428
+ for root_ds in root_datasets:
5429
+ render_dataset(root_ds.id)
5430
+
5431
+ return "".join(result_md)
5372
5432
 
5373
5433
 
5374
5434
  async def _download_project_async(
@@ -5401,6 +5461,8 @@ async def _download_project_async(
5401
5461
  batch_size = kwargs.get("batch_size", 100)
5402
5462
  # control whether to download blob files
5403
5463
  download_blob_files = kwargs.get("download_blob_files", False)
5464
+ # control whether to create README file
5465
+ skip_create_readme = kwargs.get("skip_create_readme", False)
5404
5466
 
5405
5467
  if semaphore is None:
5406
5468
  semaphore = api.get_default_semaphore()
@@ -5631,15 +5693,15 @@ async def _download_project_async(
5631
5693
  for item_name in dataset_fs.get_items_names():
5632
5694
  if item_name not in items_names_set:
5633
5695
  dataset_fs.delete_item(item_name)
5634
-
5635
- try:
5636
- if download_blob_files:
5637
- project_info = api.project.get_info_by_id(project_id)
5638
- create_blob_readme(project_fs=project_fs, project_info=project_info)
5639
- else:
5640
- create_readme(dest_dir, project_id, api)
5641
- except Exception as e:
5642
- logger.info(f"There was an error while creating README: {e}")
5696
+ if not skip_create_readme:
5697
+ try:
5698
+ if download_blob_files:
5699
+ project_info = api.project.get_info_by_id(project_id)
5700
+ create_blob_readme(project_fs=project_fs, project_info=project_info)
5701
+ else:
5702
+ create_readme(dest_dir, project_id, api)
5703
+ except Exception as e:
5704
+ logger.info(f"There was an error while creating README: {e}")
5643
5705
 
5644
5706
 
5645
5707
  async def _download_project_item_async(
@@ -1182,6 +1182,7 @@ class VideoProject(Project):
1182
1182
  log_progress: bool = True,
1183
1183
  progress_cb: Optional[Union[tqdm, Callable]] = None,
1184
1184
  include_custom_data: bool = False,
1185
+ **kwargs,
1185
1186
  ) -> None:
1186
1187
  """
1187
1188
  Download video project from Supervisely to the given directory asynchronously.
@@ -1224,7 +1225,7 @@ class VideoProject(Project):
1224
1225
 
1225
1226
  coroutine = sly.VideoProject.download_async(api, project_id, save_directory)
1226
1227
  run_coroutine(coroutine)
1227
-
1228
+
1228
1229
  """
1229
1230
  await download_video_project_async(
1230
1231
  api=api,
@@ -1237,6 +1238,7 @@ class VideoProject(Project):
1237
1238
  log_progress=log_progress,
1238
1239
  progress_cb=progress_cb,
1239
1240
  include_custom_data=include_custom_data,
1241
+ **kwargs,
1240
1242
  )
1241
1243
 
1242
1244
 
@@ -1468,7 +1470,7 @@ def upload_video_project(
1468
1470
  parent_id = dataset_map.get(parent)
1469
1471
  else:
1470
1472
  parent = ""
1471
- parent_id = None
1473
+ parent_id = None
1472
1474
  dataset = api.dataset.create(project.id, dataset_fs.short_name, parent_id=parent_id)
1473
1475
  dataset_map[os.path.join(parent, dataset.name)] = dataset.id
1474
1476
 
@@ -1547,6 +1549,7 @@ async def download_video_project_async(
1547
1549
  log_progress: bool = True,
1548
1550
  progress_cb: Optional[Union[tqdm, Callable]] = None,
1549
1551
  include_custom_data: Optional[bool] = False,
1552
+ **kwargs,
1550
1553
  ) -> None:
1551
1554
  """
1552
1555
  Download video project to the local directory.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.353
3
+ Version: 6.73.354
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -34,7 +34,7 @@ Requires-Dist: bidict<1.0.0,>=0.21.2
34
34
  Requires-Dist: varname<1.0.0,>=0.8.1
35
35
  Requires-Dist: python-dotenv<=1.0.0,>=0.19.2
36
36
  Requires-Dist: pynrrd<1.0.0,>=0.4.2
37
- Requires-Dist: SimpleITK<3.0.0.0,>=2.1.1.2
37
+ Requires-Dist: SimpleITK<=2.4.1.0,>=2.1.1.2
38
38
  Requires-Dist: pydicom<3.0.0,>=2.3.0
39
39
  Requires-Dist: stringcase<2.0.0,>=1.2.0
40
40
  Requires-Dist: python-magic<1.0.0,>=0.4.25
@@ -570,7 +570,7 @@ supervisely/convert/base_converter.py,sha256=O2SP4I_Hd0aSn8kbOUocy8orkc_-iD-TQ-z
570
570
  supervisely/convert/converter.py,sha256=022I1UieyaPDVb8lOcKW20jSt1_1TcbIWhghSmieHAE,10885
571
571
  supervisely/convert/image/__init__.py,sha256=JEuyaBiiyiYmEUYqdn8Mog5FVXpz0H1zFubKkOOm73I,1395
572
572
  supervisely/convert/image/image_converter.py,sha256=8vak8ZoKTN1ye2ZmCTvCZ605-Rw1AFLIEo7bJMfnR68,10426
573
- supervisely/convert/image/image_helper.py,sha256=j1HqgmCiUIOBft43OkdPSOxQD8aB7zWXPmOXNvzP6j4,3697
573
+ supervisely/convert/image/image_helper.py,sha256=VfFJmMEdMTOZ-G15lFEYUuFC0z62aHWrw8MLigxJS58,3647
574
574
  supervisely/convert/image/cityscapes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
575
575
  supervisely/convert/image/cityscapes/cityscapes_converter.py,sha256=tnelQJHvGz_IGMXWe-EKWAkBhexRzmkv_0Kln5sN12E,8100
576
576
  supervisely/convert/image/cityscapes/cityscapes_helper.py,sha256=in5nR7__q_u5dCkVtZmynfZ_ZuvsIAHrTzyTG4EvNgU,2988
@@ -665,9 +665,9 @@ supervisely/convert/volume/dicom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
665
665
  supervisely/convert/volume/dicom/dicom_converter.py,sha256=Hw4RxU_qvllk6M26udZE6G-m1RWR8-VVPcEPwFlqrVg,3354
666
666
  supervisely/convert/volume/dicom/dicom_helper.py,sha256=OrKlyt1hA5BOXKhE1LF1WxBIv3b6t96xRras4OSAuNM,2891
667
667
  supervisely/convert/volume/nii/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
668
- supervisely/convert/volume/nii/nii_planes_volume_converter.py,sha256=TrV7Mkczt8w2WpJizmOZwqeG9zlcLy-8p4D22B9nYyo,14344
669
- supervisely/convert/volume/nii/nii_volume_converter.py,sha256=n8HWRvwXUzugTQt4PKpbSacsuC4EQxoYHAWXcXC5KE8,8526
670
- supervisely/convert/volume/nii/nii_volume_helper.py,sha256=8cS1LCvDcgGuinBARTmbOm-lLQmJ___3gyemt26W_-Y,11572
668
+ supervisely/convert/volume/nii/nii_planes_volume_converter.py,sha256=xxRc8qhBGS3Uz1Aepc5s3UqFHLm_zJeR1eo-UtxH1v8,14591
669
+ supervisely/convert/volume/nii/nii_volume_converter.py,sha256=BAOKX96-bp6WfTFLrCQNrXk2YhKqIFSU5LJ-auKiAfc,8514
670
+ supervisely/convert/volume/nii/nii_volume_helper.py,sha256=ME_2bgbKZg4IYDFOYqhGRdt7LbwigdF2p6oSgPgPWpw,14132
671
671
  supervisely/convert/volume/sly/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
672
672
  supervisely/convert/volume/sly/sly_volume_converter.py,sha256=XmSuxnRqxchG87b244f3h0UHvOt6IkajMquL1drWlCM,5595
673
673
  supervisely/convert/volume/sly/sly_volume_helper.py,sha256=gUY0GW3zDMlO2y-zQQG36uoXMrKkKz4-ErM1CDxFCxE,5620
@@ -1018,16 +1018,16 @@ supervisely/pointcloud_episodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm
1018
1018
  supervisely/pointcloud_episodes/pointcloud_episodes.py,sha256=cRXdtw7bMsbsdVQjxfWxFSESrO-LGiqqsZyyExl2Mbg,3430
1019
1019
  supervisely/project/__init__.py,sha256=hlzdj9Pgy53Q3qdP8LMtGTChvZHQuuShdtui2eRUQeE,2601
1020
1020
  supervisely/project/data_version.py,sha256=P5Lui6i64pYeJWmAdGJDv8GRXxjfpSSZ8zT_MxIrynE,19553
1021
- supervisely/project/download.py,sha256=N6UEXY_eLtzjz61Y2SaJLg2-vv_Cvp9cXCUUM9R8-d8,27677
1021
+ supervisely/project/download.py,sha256=Veu1wNt1ho-flOPo5-z55nBm5OlkxxqzqOyccjq7dsg,27801
1022
1022
  supervisely/project/pointcloud_episode_project.py,sha256=yiWdNBQiI6f1O9sr1pg8JHW6O-w3XUB1rikJNn3Oung,41866
1023
1023
  supervisely/project/pointcloud_project.py,sha256=Kx1Vaes-krwG3BiRRtHRLQxb9G5m5bTHPN9IzRqmNWo,49399
1024
- supervisely/project/project.py,sha256=OunVB11sVQSOvkqkjsEEkX1nq9OUXOXpHTdcLDjOFe0,233256
1024
+ supervisely/project/project.py,sha256=k0eE6Jy9eDYO-WUbDK0a-IVA34VVWYRzMBVkPY9XdGw,235812
1025
1025
  supervisely/project/project_meta.py,sha256=26s8IiHC5Pg8B1AQi6_CrsWteioJP2in00cRNe8QlW0,51423
1026
1026
  supervisely/project/project_settings.py,sha256=NLThzU_DCynOK6hkHhVdFyezwprn9UqlnrLDe_3qhkY,9347
1027
1027
  supervisely/project/project_type.py,sha256=7mQ7zg6r7Bm2oFn5aR8n_PeLqMmOaPZd6ph7Z8ZISTw,608
1028
1028
  supervisely/project/readme_template.md,sha256=NKYEoJubNWLV_HmhVmdB6L4dneLqDkvl2b71xy5fc54,9150
1029
1029
  supervisely/project/upload.py,sha256=AjgHYgVZwUE25ygC5pqvFjdAladbyB8T78mlet5Qpho,3750
1030
- supervisely/project/video_project.py,sha256=bkQI3JTimQR-83iWAThjoz7bTcbCIqvvk039AG3BPXc,64081
1030
+ supervisely/project/video_project.py,sha256=_WsVdIdGZ_VFaXEVs6_Y46YYOzeJpvt82X4rOkt40nk,64117
1031
1031
  supervisely/project/volume_project.py,sha256=Kn9VEvWuKKZvL2nx6B6bjSvHuoZhAOxEc6DvPRexUco,22666
1032
1032
  supervisely/pyscripts_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1033
1033
  supervisely/pyscripts_utils/utils.py,sha256=scEwHJvHRQa8NHIOn2eTwH6-Zc8CGdLoxM-WzH9jcRo,314
@@ -1083,9 +1083,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1083
1083
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1084
1084
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1085
1085
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1086
- supervisely-6.73.353.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1087
- supervisely-6.73.353.dist-info/METADATA,sha256=U-dj-ZEJnbbIuHQrW8dU2-_iXFK54VhmQFno01bC9so,33597
1088
- supervisely-6.73.353.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1089
- supervisely-6.73.353.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1090
- supervisely-6.73.353.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1091
- supervisely-6.73.353.dist-info/RECORD,,
1086
+ supervisely-6.73.354.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1087
+ supervisely-6.73.354.dist-info/METADATA,sha256=62vSdg9SmEuOuxLWTSNU7owo4bjUyH-3jFQ2zgwldeQ,33598
1088
+ supervisely-6.73.354.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
1089
+ supervisely-6.73.354.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1090
+ supervisely-6.73.354.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1091
+ supervisely-6.73.354.dist-info/RECORD,,