supervisely 6.73.286__py3-none-any.whl → 6.73.288__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

Files changed (24) hide show
  1. supervisely/annotation/annotation.py +10 -2
  2. supervisely/api/annotation_api.py +26 -30
  3. supervisely/api/api.py +8 -4
  4. supervisely/api/entity_annotation/figure_api.py +7 -7
  5. supervisely/api/image_api.py +19 -19
  6. supervisely/geometry/graph.py +7 -0
  7. supervisely/geometry/rectangle.py +3 -1
  8. supervisely/nn/benchmark/base_benchmark.py +3 -0
  9. supervisely/nn/benchmark/base_evaluator.py +1 -1
  10. supervisely/nn/benchmark/instance_segmentation/text_templates.py +3 -2
  11. supervisely/nn/benchmark/object_detection/metric_provider.py +38 -12
  12. supervisely/nn/benchmark/object_detection/text_templates.py +2 -1
  13. supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py +2 -2
  14. supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py +4 -4
  15. supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py +4 -5
  16. supervisely/nn/benchmark/object_detection/vis_metrics/overview.py +4 -1
  17. supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py +1 -2
  18. supervisely/project/project.py +38 -37
  19. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/METADATA +1 -1
  20. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/RECORD +24 -24
  21. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/LICENSE +0 -0
  22. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/WHEEL +0 -0
  23. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/entry_points.txt +0 -0
  24. {supervisely-6.73.286.dist-info → supervisely-6.73.288.dist-info}/top_level.txt +0 -0
@@ -543,7 +543,7 @@ class Annotation:
543
543
  image_id=take_with_default(image_id, self.image_id),
544
544
  )
545
545
 
546
- def _add_labels_impl(self, dest, labels):
546
+ def _add_labels_impl(self, dest: List, labels: List[Label]):
547
547
  """
548
548
  The function _add_labels_impl extend list of the labels of the current Annotation object
549
549
  :param dest: destination list of the Label class objects
@@ -554,7 +554,15 @@ class Annotation:
554
554
  if self.img_size.count(None) == 0:
555
555
  # image has resolution in DB
556
556
  canvas_rect = Rectangle.from_size(self.img_size)
557
- dest.extend(label.crop(canvas_rect))
557
+ try:
558
+ dest.extend(label.crop(canvas_rect))
559
+ except Exception:
560
+ logger.error(
561
+ f"Cannot crop label of '{label.obj_class.name}' class "
562
+ "when extend list of the labels of the current Annotation object",
563
+ exc_info=True,
564
+ )
565
+ raise
558
566
  else:
559
567
  # image was uploaded by link and does not have resolution in DB
560
568
  # add label without normalization and validation
@@ -1403,37 +1403,33 @@ class AnnotationApi(ModuleApi):
1403
1403
  progress_cb(len(response.content))
1404
1404
 
1405
1405
  result = response.json()
1406
- # Convert annotation to pixel coordinate system
1407
- result[ApiField.ANNOTATION] = Annotation._to_pixel_coordinate_system_json(
1408
- result[ApiField.ANNOTATION]
1406
+ # Convert annotation to pixel coordinate system
1407
+ result[ApiField.ANNOTATION] = Annotation._to_pixel_coordinate_system_json(
1408
+ result[ApiField.ANNOTATION]
1409
+ )
1410
+ # check if there are any AlphaMask geometries in the batch
1411
+ additonal_geometries = defaultdict(int)
1412
+ labels = result[ApiField.ANNOTATION][AnnotationJsonFields.LABELS]
1413
+ for idx, label in enumerate(labels):
1414
+ if label[LabelJsonFields.GEOMETRY_TYPE] == AlphaMask.geometry_name():
1415
+ figure_id = label[LabelJsonFields.ID]
1416
+ additonal_geometries[figure_id] = idx
1417
+
1418
+ # if so, download them separately and update the annotation
1419
+ if len(additonal_geometries) > 0:
1420
+ figure_ids = list(additonal_geometries.keys())
1421
+ figures = await self._api.image.figure.download_geometries_batch_async(
1422
+ figure_ids,
1423
+ (progress_cb if progress_cb is not None and progress_cb_type == "size" else None),
1424
+ semaphore=semaphore,
1409
1425
  )
1410
- # check if there are any AlphaMask geometries in the batch
1411
- additonal_geometries = defaultdict(int)
1412
- labels = result[ApiField.ANNOTATION][AnnotationJsonFields.LABELS]
1413
- for idx, label in enumerate(labels):
1414
- if label[LabelJsonFields.GEOMETRY_TYPE] == AlphaMask.geometry_name():
1415
- figure_id = label[LabelJsonFields.ID]
1416
- additonal_geometries[figure_id] = idx
1417
-
1418
- # if so, download them separately and update the annotation
1419
- if len(additonal_geometries) > 0:
1420
- figure_ids = list(additonal_geometries.keys())
1421
- figures = await self._api.image.figure.download_geometries_batch_async(
1422
- figure_ids,
1423
- (
1424
- progress_cb
1425
- if progress_cb is not None and progress_cb_type == "size"
1426
- else None
1427
- ),
1428
- semaphore=semaphore,
1429
- )
1430
- for figure_id, geometry in zip(figure_ids, figures):
1431
- label_idx = additonal_geometries[figure_id]
1432
- labels[label_idx].update({BITMAP: geometry})
1433
- ann_info = self._convert_json_info(result)
1434
- if progress_cb is not None and progress_cb_type == "number":
1435
- progress_cb(1)
1436
- return ann_info
1426
+ for figure_id, geometry in zip(figure_ids, figures):
1427
+ label_idx = additonal_geometries[figure_id]
1428
+ labels[label_idx].update({BITMAP: geometry})
1429
+ ann_info = self._convert_json_info(result)
1430
+ if progress_cb is not None and progress_cb_type == "number":
1431
+ progress_cb(1)
1432
+ return ann_info
1437
1433
 
1438
1434
  async def download_batch_async(
1439
1435
  self,
supervisely/api/api.py CHANGED
@@ -66,10 +66,10 @@ import supervisely.io.env as sly_env
66
66
  from supervisely._utils import camel_to_snake, is_community, is_development
67
67
  from supervisely.api.module_api import ApiField
68
68
  from supervisely.io.network_exceptions import (
69
+ RetryableRequestException,
69
70
  process_requests_exception,
70
71
  process_requests_exception_async,
71
72
  process_unhandled_request,
72
- RetryableRequestException,
73
73
  )
74
74
  from supervisely.project.project_meta import ProjectMeta
75
75
  from supervisely.sly_logger import logger
@@ -380,6 +380,7 @@ class Api:
380
380
  self.async_httpx_client: httpx.AsyncClient = None
381
381
  self.httpx_client: httpx.Client = None
382
382
  self._semaphore = None
383
+ self._instance_version = None
383
384
 
384
385
  @classmethod
385
386
  def normalize_server_address(cls, server_address: str) -> str:
@@ -515,11 +516,14 @@ class Api:
515
516
  # '6.9.13'
516
517
  """
517
518
  try:
518
- version = self.post("instance.version", {}).json().get(ApiField.VERSION)
519
+ if self._instance_version is None:
520
+ self._instance_version = (
521
+ self.post("instance.version", {}).json().get(ApiField.VERSION)
522
+ )
519
523
  except Exception as e:
520
524
  logger.warning(f"Failed to get instance version from server: {e}")
521
- version = "unknown"
522
- return version
525
+ self._instance_version = "unknown"
526
+ return self._instance_version
523
527
 
524
528
  def is_version_supported(self, version: Optional[str] = None) -> Union[bool, None]:
525
529
  """Check if the given version is lower or equal to the current Supervisely instance version.
@@ -642,13 +642,13 @@ class FigureApi(RemoveableBulkModuleApi):
642
642
  response = await self._api.post_async(
643
643
  "figures.bulk.download.geometry", {ApiField.IDS: batch_ids}
644
644
  )
645
- decoder = MultipartDecoder.from_response(response)
646
- for part in decoder.parts:
647
- content_utf8 = part.headers[b"Content-Disposition"].decode("utf-8")
648
- # Find name="1245" preceded by a whitespace, semicolon or beginning of line.
649
- # The regex has 2 capture group: one for the prefix and one for the actual name value.
650
- figure_id = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1])
651
- yield figure_id, part.content
645
+ decoder = MultipartDecoder.from_response(response)
646
+ for part in decoder.parts:
647
+ content_utf8 = part.headers[b"Content-Disposition"].decode("utf-8")
648
+ # Find name="1245" preceded by a whitespace, semicolon or beginning of line.
649
+ # The regex has 2 capture group: one for the prefix and one for the actual name value.
650
+ figure_id = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1])
651
+ yield figure_id, part.content
652
652
 
653
653
  async def download_geometries_batch_async(
654
654
  self,
@@ -4456,26 +4456,26 @@ class ImageApi(RemoveableBulkModuleApi):
4456
4456
  json=json_body,
4457
4457
  headers=headers,
4458
4458
  )
4459
- decoder = MultipartDecoder.from_response(response)
4460
- for part in decoder.parts:
4461
- content_utf8 = part.headers[b"Content-Disposition"].decode("utf-8")
4462
- # Find name="1245" preceded by a whitespace, semicolon or beginning of line.
4463
- # The regex has 2 capture group: one for the prefix and one for the actual name value.
4464
- img_id = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1])
4465
- if check_hash:
4466
- hhash = part.headers.get("x-content-checksum-sha256", None)
4467
- if hhash is not None:
4468
- downloaded_bytes_hash = get_bytes_hash(part)
4469
- if hhash != downloaded_bytes_hash:
4470
- raise RuntimeError(
4471
- f"Downloaded hash of image with ID:{img_id} does not match the expected hash: {downloaded_bytes_hash} != {hhash}"
4472
- )
4473
- if progress_cb is not None and progress_cb_type == "number":
4474
- progress_cb(1)
4475
- elif progress_cb is not None and progress_cb_type == "size":
4476
- progress_cb(len(part.content))
4459
+ decoder = MultipartDecoder.from_response(response)
4460
+ for part in decoder.parts:
4461
+ content_utf8 = part.headers[b"Content-Disposition"].decode("utf-8")
4462
+ # Find name="1245" preceded by a whitespace, semicolon or beginning of line.
4463
+ # The regex has 2 capture group: one for the prefix and one for the actual name value.
4464
+ img_id = int(re.findall(r'(^|[\s;])name="(\d*)"', content_utf8)[0][1])
4465
+ if check_hash:
4466
+ hhash = part.headers.get("x-content-checksum-sha256", None)
4467
+ if hhash is not None:
4468
+ downloaded_bytes_hash = get_bytes_hash(part)
4469
+ if hhash != downloaded_bytes_hash:
4470
+ raise RuntimeError(
4471
+ f"Downloaded hash of image with ID:{img_id} does not match the expected hash: {downloaded_bytes_hash} != {hhash}"
4472
+ )
4473
+ if progress_cb is not None and progress_cb_type == "number":
4474
+ progress_cb(1)
4475
+ elif progress_cb is not None and progress_cb_type == "size":
4476
+ progress_cb(len(part.content))
4477
4477
 
4478
- yield img_id, part.content
4478
+ yield img_id, part.content
4479
4479
 
4480
4480
  async def get_list_generator_async(
4481
4481
  self,
@@ -9,6 +9,7 @@ from typing import Dict, List, Optional, Tuple, Union
9
9
  import cv2
10
10
  import numpy as np
11
11
 
12
+ from supervisely import logger
12
13
  from supervisely.geometry.constants import (
13
14
  CLASS_ID,
14
15
  CREATED_AT,
@@ -215,6 +216,8 @@ class GraphNodes(Geometry):
215
216
  updated_at=updated_at,
216
217
  created_at=created_at,
217
218
  )
219
+ if len(nodes) == 0:
220
+ raise ValueError("Empty list of nodes is not allowed for GraphNodes")
218
221
  self._nodes = nodes
219
222
  if isinstance(nodes, (list, tuple)):
220
223
  self._nodes = {}
@@ -593,6 +596,10 @@ class GraphNodes(Geometry):
593
596
 
594
597
  rectangle = figure.to_bbox()
595
598
  """
599
+ if self._nodes is None or len(self._nodes) == 0:
600
+ logger.warning(
601
+ f"Cannot create a bounding box from {self.name()} with empty nodes. Geometry ID: {self.sly_id} "
602
+ )
596
603
  return Rectangle.from_geometries_list(
597
604
  [Point.from_point_location(node.location) for node in self._nodes.values()]
598
605
  )
@@ -476,7 +476,7 @@ class Rectangle(Geometry):
476
476
  return cls(0, 0, size[0] - 1, size[1] - 1)
477
477
 
478
478
  @classmethod
479
- def from_geometries_list(cls, geometries: List[sly.geometry.geometry]) -> Rectangle:
479
+ def from_geometries_list(cls, geometries: List[Geometry]) -> Rectangle:
480
480
  """
481
481
  Create Rectangle from given geometry objects.
482
482
 
@@ -494,6 +494,8 @@ class Rectangle(Geometry):
494
494
  geom_objs = [sly.Point(100, 200), sly.Polyline([sly.PointLocation(730, 2104), sly.PointLocation(2479, 402)])]
495
495
  figure_from_geom_objs = sly.Rectangle.from_geometries_list(geom_objs)
496
496
  """
497
+ if geometries is None or len(geometries) == 0:
498
+ raise ValueError("No geometries provided to create a Rectangle.")
497
499
  bboxes = [g.to_bbox() for g in geometries]
498
500
  top = min(bbox.top for bbox in bboxes)
499
501
  left = min(bbox.left for bbox in bboxes)
@@ -620,6 +620,9 @@ class BaseBenchmark:
620
620
  def get_eval_result(self):
621
621
  if self._eval_results is None:
622
622
  self._eval_results = self.evaluator.get_eval_result()
623
+ if not self._eval_results.inference_info:
624
+ self._eval_results.inference_info["gt_project_id"] = self.gt_project_info.id
625
+ self._eval_results.inference_info["dt_project_id"] = self.dt_project_info.id
623
626
  return self._eval_results
624
627
 
625
628
  def get_diff_project_info(self):
@@ -16,7 +16,7 @@ class BaseEvalResult:
16
16
 
17
17
  def __init__(self, directory: Optional[str] = None):
18
18
  self.directory = directory
19
- self.inference_info: Dict = None
19
+ self.inference_info: Dict = {}
20
20
  self.speedtest_info: Dict = None
21
21
  self.eval_data: Dict = None
22
22
  self.mp = None
@@ -14,7 +14,7 @@ definitions = SimpleNamespace(
14
14
  )
15
15
 
16
16
  docs_url = (
17
- "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/instant-segmentation"
17
+ "https://docs.supervisely.com/neural-networks/model-evaluation-benchmark/instance-segmentation"
18
18
  )
19
19
 
20
20
  # <i class="zmdi zmdi-check-circle" style="color: #13ce66; margin-right: 5px"></i>
@@ -44,6 +44,7 @@ markdown_overview = """
44
44
  {}
45
45
  - **IoU threshold**: {}
46
46
  - **Optimal confidence threshold**: {} (calculated automatically), <a href="{}" target="_blank">learn more</a>.
47
+ - **Averaging across IoU thresholds:** {}, <a href="{}" target="_blank">learn more</a>.
47
48
 
48
49
  Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
49
50
  """
@@ -60,7 +61,7 @@ Here, we comprehensively assess the model's performance by presenting a broad se
60
61
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
61
62
  """
62
63
 
63
- markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
64
+ markdown_AP_custom_description = """> *AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
64
65
 
65
66
  markdown_iou_per_class = """### IoU Threshold per Class
66
67
 
@@ -90,13 +90,16 @@ class MetricProvider:
90
90
  self.iouThrs = params["iouThrs"]
91
91
  self.recThrs = params["recThrs"]
92
92
 
93
+ # Evaluation params
93
94
  eval_params = params.get("evaluation_params", {})
94
95
  self.iou_threshold = eval_params.get("iou_threshold", 0.5)
95
96
  self.iou_threshold_idx = np.where(np.isclose(self.iouThrs, self.iou_threshold))[0][0]
96
-
97
- # IoU per class (optional)
98
97
  self.iou_threshold_per_class = eval_params.get("iou_threshold_per_class")
99
98
  self.iou_idx_per_class = params.get("iou_idx_per_class") # {cat id: iou_idx}
99
+ if self.iou_threshold_per_class is not None:
100
+ # TODO: temporary solution
101
+ eval_params["average_across_iou_thresholds"] = False
102
+ self.average_across_iou_thresholds = eval_params.get("average_across_iou_thresholds", True)
100
103
 
101
104
  def calculate(self):
102
105
  self.m_full = _MetricProvider(
@@ -199,9 +202,10 @@ class MetricProvider:
199
202
  }
200
203
 
201
204
  def AP_per_class(self):
202
- s = self.coco_precision[:, :, :, 0, 2]
205
+ s = self.coco_precision[:, :, :, 0, 2].copy()
203
206
  s[s == -1] = np.nan
204
207
  ap = np.nanmean(s, axis=(0, 1))
208
+ ap = np.nan_to_num(ap, nan=0)
205
209
  return ap
206
210
 
207
211
  def AP_custom_per_class(self):
@@ -212,6 +216,7 @@ class MetricProvider:
212
216
  s[:, cat_id - 1] = self.coco_precision[iou_idx, :, cat_id - 1, 0, 2]
213
217
  s[s == -1] = np.nan
214
218
  ap = np.nanmean(s, axis=0)
219
+ ap = np.nan_to_num(ap, nan=0)
215
220
  return ap
216
221
 
217
222
  def AP_custom(self):
@@ -284,6 +289,14 @@ class _MetricProvider:
284
289
  self.fp_not_confused_matches = [m for m in self.fp_matches if not m["miss_cls"]]
285
290
  self.ious = np.array([m["iou"] for m in self.tp_matches])
286
291
 
292
+ # Evaluation params
293
+ self.params = params
294
+ self.iou_idx_per_class = np.array(
295
+ [params["iou_idx_per_class"][cat_id] for cat_id in self.cat_ids]
296
+ )[:, None]
297
+ eval_params = params.get("evaluation_params", {})
298
+ self.average_across_iou_thresholds = eval_params.get("average_across_iou_thresholds", True)
299
+
287
300
  def _init_counts(self):
288
301
  cat_ids = self.cat_ids
289
302
  iouThrs = self.iouThrs
@@ -328,14 +341,22 @@ class _MetricProvider:
328
341
  self.true_positives = true_positives
329
342
  self.false_negatives = false_negatives
330
343
  self.false_positives = false_positives
331
- self.TP_count = int(self.true_positives[:, 0].sum(0))
332
- self.FP_count = int(self.false_positives[:, 0].sum(0))
333
- self.FN_count = int(self.false_negatives[:, 0].sum(0))
344
+ self.TP_count = int(self._take_iou_thresholds(true_positives).sum())
345
+ self.FP_count = int(self._take_iou_thresholds(false_positives).sum())
346
+ self.FN_count = int(self._take_iou_thresholds(false_negatives).sum())
334
347
 
348
+ def _take_iou_thresholds(self, x):
349
+ return np.take_along_axis(x, self.iou_idx_per_class, axis=1)
350
+
335
351
  def base_metrics(self):
336
- tp = self.true_positives
337
- fp = self.false_positives
338
- fn = self.false_negatives
352
+ if self.average_across_iou_thresholds:
353
+ tp = self.true_positives
354
+ fp = self.false_positives
355
+ fn = self.false_negatives
356
+ else:
357
+ tp = self._take_iou_thresholds(self.true_positives)
358
+ fp = self._take_iou_thresholds(self.false_positives)
359
+ fn = self._take_iou_thresholds(self.false_negatives)
339
360
  confuse_count = len(self.confused_matches)
340
361
 
341
362
  mAP = self.coco_mAP
@@ -358,9 +379,14 @@ class _MetricProvider:
358
379
  }
359
380
 
360
381
  def per_class_metrics(self):
361
- tp = self.true_positives.mean(1)
362
- fp = self.false_positives.mean(1)
363
- fn = self.false_negatives.mean(1)
382
+ if self.average_across_iou_thresholds:
383
+ tp = self.true_positives.mean(1)
384
+ fp = self.false_positives.mean(1)
385
+ fn = self.false_negatives.mean(1)
386
+ else:
387
+ tp = self._take_iou_thresholds(self.true_positives).flatten()
388
+ fp = self._take_iou_thresholds(self.false_positives).flatten()
389
+ fn = self._take_iou_thresholds(self.false_negatives).flatten()
364
390
  pr = tp / (tp + fp)
365
391
  rc = tp / (tp + fn)
366
392
  f1 = 2 * pr * rc / (pr + rc)
@@ -44,6 +44,7 @@ markdown_overview = """
44
44
  {}
45
45
  - **IoU threshold**: {}
46
46
  - **Optimal confidence threshold**: {} (calculated automatically), <a href="{}" target="_blank">learn more</a>.
47
+ - **Averaging across IoU thresholds:** {}, <a href="{}" target="_blank">learn more</a>.
47
48
 
48
49
  Learn more about Model Benchmark, implementation details, and how to use the charts in our <a href="{}" target="_blank">Technical Report</a>.
49
50
  """
@@ -65,7 +66,7 @@ Here, we comprehensively assess the model's performance by presenting a broad se
65
66
  - **Calibration Score**: This score represents the consistency of predicted probabilities (or <abbr title="{}">confidence scores</abbr>) made by the model. We evaluate how well predicted probabilities align with actual outcomes. A well-calibrated model means that when it predicts an object with, say, 80% confidence, approximately 80% of those predictions should actually be correct.
66
67
  """
67
68
 
68
- markdown_AP_custom_description = """> * AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
69
+ markdown_AP_custom_description = """> *AP_custom - Average Precision with different IoU thresholds for each class, that was set in evaluation params by the user."""
69
70
 
70
71
  markdown_iou_per_class = """### IoU Threshold per Class
71
72
 
@@ -72,13 +72,13 @@ class ConfidenceScore(DetectionVisMetric):
72
72
  self.eval_result.dfsp_down,
73
73
  x="scores",
74
74
  y=["precision", "recall", "f1"],
75
- labels={"value": "Value", "variable": "Metric", "scores": "Confidence Score"},
75
+ labels={"value": "Metric", "variable": "Metric", "scores": "Confidence Score"},
76
76
  width=None,
77
77
  height=500,
78
78
  color_discrete_map=color_map,
79
79
  )
80
80
  fig.update_traces(
81
- hovertemplate="Confidence Score: %{x:.2f}<br>Value: %{y:.2f}<extra></extra>"
81
+ hovertemplate="Confidence score: %{x:.2f}<br>Metric: %{y:.2f}<extra></extra>"
82
82
  )
83
83
  fig.update_layout(yaxis=dict(range=[0, 1]), xaxis=dict(range=[0, 1], tick0=0, dtick=0.1))
84
84
 
@@ -39,7 +39,7 @@ class F1ScoreAtDifferentIOU(DetectionVisMetric):
39
39
  np.concatenate([self.eval_result.dfsp_down["scores"].values[:, None], f1s_down.T], 1),
40
40
  columns=["scores"] + iou_names,
41
41
  )
42
- labels = {"value": "Value", "variable": "IoU threshold", "scores": "Confidence Score"}
42
+ labels = {"value": "F1-score", "variable": "IoU threshold", "scores": "Confidence Score"}
43
43
 
44
44
  fig = px.line(
45
45
  df,
@@ -51,19 +51,19 @@ class F1ScoreAtDifferentIOU(DetectionVisMetric):
51
51
  height=500,
52
52
  )
53
53
  fig.update_traces(
54
- hovertemplate="Confidence Score: %{x:.2f}<br>Value: %{y:.2f}<extra></extra>"
54
+ hovertemplate="Confidence Score: %{x:.2f}<br>F1-score: %{y:.2f}<extra></extra>"
55
55
  )
56
56
  fig.update_layout(yaxis=dict(range=[0, 1]), xaxis=dict(range=[0, 1], tick0=0, dtick=0.1))
57
57
 
58
58
  # add annotations for maximum F1-Score for each IoU threshold
59
59
  for i, iou in enumerate(iou_names):
60
- argmax_f1 = f1s[i].argmax()
60
+ argmax_f1 = np.nanargmax(f1s[i])
61
61
  max_f1 = f1s[i][argmax_f1]
62
62
  score = self.eval_result.mp.m_full.score_profile["scores"][argmax_f1]
63
63
  fig.add_annotation(
64
64
  x=score,
65
65
  y=max_f1,
66
- text=f"Best score: {score:.2f}",
66
+ text=f"Best conf: {score:.2f}",
67
67
  showarrow=True,
68
68
  arrowhead=1,
69
69
  arrowcolor="black",
@@ -86,11 +86,10 @@ class PerClassOutcomeCounts(DetectionVisMetric):
86
86
  import plotly.express as px # pylint: disable=import-error
87
87
 
88
88
  # Per-class Counts
89
- iou_thres = 0
90
-
91
- tp = self.eval_result.mp.true_positives[:, iou_thres]
92
- fp = self.eval_result.mp.false_positives[:, iou_thres]
93
- fn = self.eval_result.mp.false_negatives[:, iou_thres]
89
+ mp = self.eval_result.mp
90
+ tp = mp.m._take_iou_thresholds(mp.true_positives).flatten()
91
+ fp = mp.m._take_iou_thresholds(mp.false_positives).flatten()
92
+ fn = mp.m._take_iou_thresholds(mp.false_negatives).flatten()
94
93
 
95
94
  # normalize
96
95
  support = tp + fn
@@ -20,7 +20,7 @@ class Overview(DetectionVisMetric):
20
20
  url = self.eval_result.inference_info.get("checkpoint_url")
21
21
  link_text = self.eval_result.inference_info.get("custom_checkpoint_path")
22
22
  if link_text is None:
23
- link_text = url
23
+ link_text = url or ""
24
24
  link_text = link_text.replace("_", "\_")
25
25
 
26
26
  model_name = self.eval_result.inference_info.get("model_name") or "Custom"
@@ -31,6 +31,7 @@ class Overview(DetectionVisMetric):
31
31
 
32
32
  # link to scroll to the optimal confidence section
33
33
  opt_conf_url = self.vis_texts.docs_url + "#f1-optimal-confidence-threshold"
34
+ average_url = self.vis_texts.docs_url + "#averaging-iou-thresholds"
34
35
 
35
36
  iou_threshold = self.eval_result.mp.iou_threshold
36
37
  if self.eval_result.different_iou_thresholds_per_class:
@@ -52,6 +53,8 @@ class Overview(DetectionVisMetric):
52
53
  iou_threshold,
53
54
  round(self.eval_result.mp.f1_optimal_conf, 4),
54
55
  opt_conf_url,
56
+ self.eval_result.mp.average_across_iou_thresholds,
57
+ average_url,
55
58
  self.vis_texts.docs_url,
56
59
  ]
57
60
 
@@ -32,8 +32,7 @@ class PerClassAvgPrecision(DetectionVisMetric):
32
32
  import plotly.express as px # pylint: disable=import-error
33
33
 
34
34
  # AP per-class
35
- ap_per_class = self.eval_result.mp.coco_precision[:, :, :, 0, 2].mean(axis=(0, 1))
36
- ap_per_class[ap_per_class == -1] = 0 # -1 is a placeholder for no GT
35
+ ap_per_class = self.eval_result.mp.AP_per_class()
37
36
  labels = dict(r="Average Precision", theta="Class")
38
37
  fig = px.scatter_polar(
39
38
  r=ap_per_class,
@@ -4780,26 +4780,6 @@ async def _download_project_async(
4780
4780
  if semaphore is None:
4781
4781
  semaphore = api.get_default_semaphore()
4782
4782
 
4783
- # number of workers
4784
- num_workers = min(kwargs.get("num_workers", semaphore._value), 10)
4785
-
4786
- async def worker(queue: asyncio.Queue, stop_event: asyncio.Event):
4787
- while not stop_event.is_set():
4788
- task = await queue.get()
4789
- if task is None:
4790
- break
4791
- try:
4792
- await task
4793
- except Exception as e:
4794
- logger.error(f"Error in _download_project_async worker: {e}")
4795
- stop_event.set()
4796
- finally:
4797
- queue.task_done()
4798
-
4799
- queue = asyncio.Queue()
4800
- stop_event = asyncio.Event()
4801
- workers = [asyncio.create_task(worker(queue, stop_event)) for _ in range(num_workers)]
4802
-
4803
4783
  dataset_ids = set(dataset_ids) if (dataset_ids is not None) else None
4804
4784
  project_fs = None
4805
4785
  meta = ProjectMeta.from_json(api.project.get_meta(project_id, with_settings=True))
@@ -4883,11 +4863,25 @@ async def _download_project_async(
4883
4863
  ds_progress(1)
4884
4864
  return to_download
4885
4865
 
4866
+ async def run_tasks_with_delay(tasks, delay=0.1):
4867
+ created_tasks = []
4868
+ for task in tasks:
4869
+ created_task = asyncio.create_task(task)
4870
+ created_tasks.append(created_task)
4871
+ await asyncio.sleep(delay)
4872
+ logger.debug(
4873
+ f"{len(created_tasks)} tasks have been created for dataset ID: {dataset.id}, Name: {dataset.name}"
4874
+ )
4875
+ return created_tasks
4876
+
4877
+ tasks = []
4886
4878
  small_images = await check_items(small_images)
4887
4879
  large_images = await check_items(large_images)
4880
+
4888
4881
  if len(small_images) == 1:
4889
4882
  large_images.append(small_images.pop())
4890
4883
  for images_batch in batched(small_images, batch_size=batch_size):
4884
+
4891
4885
  task = _download_project_items_batch_async(
4892
4886
  api=api,
4893
4887
  dataset_id=dataset_id,
@@ -4901,7 +4895,7 @@ async def _download_project_async(
4901
4895
  only_image_tags=only_image_tags,
4902
4896
  progress_cb=ds_progress,
4903
4897
  )
4904
- await queue.put(task)
4898
+ tasks.append(task)
4905
4899
  for image in large_images:
4906
4900
  task = _download_project_item_async(
4907
4901
  api=api,
@@ -4915,9 +4909,10 @@ async def _download_project_async(
4915
4909
  only_image_tags=only_image_tags,
4916
4910
  progress_cb=ds_progress,
4917
4911
  )
4918
- await queue.put(task)
4912
+ tasks.append(task)
4919
4913
 
4920
- await queue.join()
4914
+ created_tasks = await run_tasks_with_delay(tasks)
4915
+ await asyncio.gather(*created_tasks)
4921
4916
 
4922
4917
  if save_image_meta:
4923
4918
  meta_dir = dataset_fs.meta_dir
@@ -4934,13 +4929,6 @@ async def _download_project_async(
4934
4929
  if item_name not in items_names_set:
4935
4930
  dataset_fs.delete_item(item_name)
4936
4931
 
4937
- for _ in range(num_workers):
4938
- await queue.put(None)
4939
- await asyncio.gather(*workers)
4940
-
4941
- if stop_event.is_set():
4942
- raise RuntimeError("Download process was stopped due to an error in one of the workers.")
4943
-
4944
4932
  try:
4945
4933
  create_readme(dest_dir, project_id, api)
4946
4934
  except Exception as e:
@@ -4964,7 +4952,7 @@ async def _download_project_item_async(
4964
4952
  """
4965
4953
  if save_images:
4966
4954
  logger.debug(
4967
- f"Downloading 1 image in single mode: {img_info.name} with _download_project_item_async"
4955
+ f"Downloading 1 image in single mode with _download_project_item_async. ID: {img_info.id}, Name: {img_info.name}"
4968
4956
  )
4969
4957
  img_bytes = await api.image.download_bytes_single_async(
4970
4958
  img_info.id, semaphore=semaphore, check_hash=True
@@ -4982,7 +4970,11 @@ async def _download_project_item_async(
4982
4970
  force_metadata_for_links=not save_images,
4983
4971
  )
4984
4972
  ann_json = ann_info.annotation
4985
- tmp_ann = Annotation.from_json(ann_json, meta)
4973
+ try:
4974
+ tmp_ann = Annotation.from_json(ann_json, meta)
4975
+ except Exception:
4976
+ logger.error(f"Error while deserializing annotation for image with ID: {img_info.id}")
4977
+ raise
4986
4978
  if None in tmp_ann.img_size:
4987
4979
  tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
4988
4980
  ann_json = tmp_ann.to_json()
@@ -5004,6 +4996,7 @@ async def _download_project_item_async(
5004
4996
  )
5005
4997
  if progress_cb is not None:
5006
4998
  progress_cb(1)
4999
+ logger.debug(f"Single project item has been downloaded. Semaphore state: {semaphore._value}")
5007
5000
 
5008
5001
 
5009
5002
  async def _download_project_items_batch_async(
@@ -5056,12 +5049,18 @@ async def _download_project_items_batch_async(
5056
5049
  semaphore=semaphore,
5057
5050
  force_metadata_for_links=not save_images,
5058
5051
  )
5059
- tmps_anns = [Annotation.from_json(ann_info.annotation, meta) for ann_info in ann_infos]
5060
5052
  ann_jsons = []
5061
- for tmp_ann in tmps_anns:
5062
- if None in tmp_ann.img_size:
5063
- tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
5064
- ann_jsons.append(tmp_ann.to_json())
5053
+ for img_info, ann_info in zip(img_infos, ann_infos):
5054
+ try:
5055
+ tmp_ann = Annotation.from_json(ann_info.annotation, meta)
5056
+ if None in tmp_ann.img_size:
5057
+ tmp_ann = tmp_ann.clone(img_size=(img_info.height, img_info.width))
5058
+ ann_jsons.append(tmp_ann.to_json())
5059
+ except Exception:
5060
+ logger.error(
5061
+ f"Error while deserializing annotation for image with ID: {img_info.id}"
5062
+ )
5063
+ raise
5065
5064
  else:
5066
5065
  ann_jsons = []
5067
5066
  for img_info in img_infos:
@@ -5083,5 +5082,7 @@ async def _download_project_items_batch_async(
5083
5082
  if progress_cb is not None:
5084
5083
  progress_cb(1)
5085
5084
 
5085
+ logger.debug(f"Batch of project items has been downloaded. Semaphore state: {semaphore._value}")
5086
+
5086
5087
 
5087
5088
  DatasetDict = Project.DatasetDict
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.286
3
+ Version: 6.73.288
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -5,7 +5,7 @@ supervisely/function_wrapper.py,sha256=R5YajTQ0GnRp2vtjwfC9hINkzQc0JiyGsu8TER373
5
5
  supervisely/sly_logger.py,sha256=LG1wTyyctyEKuCuKM2IKf_SMPH7BzkTsFdO-0tnorzg,6225
6
6
  supervisely/tiny_timer.py,sha256=hkpe_7FE6bsKL79blSs7WBaktuPavEVu67IpEPrfmjE,183
7
7
  supervisely/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- supervisely/annotation/annotation.py,sha256=Kdn3HRpx7ie6vkDaQFXrg597nOidZ6FMN-oXpDk4nyI,114289
8
+ supervisely/annotation/annotation.py,sha256=5AG1AhebkmiYy2r7nKbz6TjdmCF4tuf9FtqUjLLs7aU,114659
9
9
  supervisely/annotation/annotation_transforms.py,sha256=TlVy_gUbM-XH6GbLpZPrAi6pMIGTr7Ow02iSKOSTa-I,9582
10
10
  supervisely/annotation/json_geometries_map.py,sha256=nL6AmMhFy02fw9ryBm75plKyOkDh61QdOToSuLAcz_Q,1659
11
11
  supervisely/annotation/label.py,sha256=NpHZ5o2H6dI4KiII22o2HpiLXG1yekh-bEy8WvI2Ljg,37498
@@ -21,14 +21,14 @@ supervisely/annotation/tag_meta_mapper.py,sha256=RWeTrxJ64syodyhXIRSH007bX6Hr3B4
21
21
  supervisely/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  supervisely/api/advanced_api.py,sha256=Nd5cCnHFWc3PSUrCtENxTGtDjS37_lCHXsgXvUI3Ti8,2054
23
23
  supervisely/api/agent_api.py,sha256=ShWAIlXcWXcyI9fqVuP5GZVCigCMJmjnvdGUfLspD6Y,8890
24
- supervisely/api/annotation_api.py,sha256=kB9l0NhQEkunGDC9fWjNzf5DdhqRF1tv-RRnIbkV2k0,64941
25
- supervisely/api/api.py,sha256=0dgPx_eizoCEFzfT8YH9uh1kq-OJwjrV5fBGD7uZ7E4,65840
24
+ supervisely/api/annotation_api.py,sha256=fVQJOg5SLcD_mRUmPaVsJIOVTGFhsabRXqve0LyUgrc,64743
25
+ supervisely/api/api.py,sha256=YBE6yi682H5dy3BBQtESmfC9hKZcbHyYRPNGLRldgSU,66014
26
26
  supervisely/api/app_api.py,sha256=RsbVej8WxWVn9cNo5s3Fqd1symsCdsfOaKVBKEUapRY,71927
27
27
  supervisely/api/dataset_api.py,sha256=GH7prDRJKyJlTv_7_Y-RkTwJN7ED4EkXNqqmi3iIdI4,41352
28
28
  supervisely/api/file_api.py,sha256=v2FsD3oljwNPqcDgEJRe8Bu5k0PYKzVhqmRb5QFaHAQ,83422
29
29
  supervisely/api/github_api.py,sha256=NIexNjEer9H5rf5sw2LEZd7C1WR-tK4t6IZzsgeAAwQ,623
30
30
  supervisely/api/image_annotation_tool_api.py,sha256=YcUo78jRDBJYvIjrd-Y6FJAasLta54nnxhyaGyanovA,5237
31
- supervisely/api/image_api.py,sha256=qZwTjeCo6bkEuXDuB8RhhP0g6PzlRuCXJkUfN9rsUZ4,190985
31
+ supervisely/api/image_api.py,sha256=bSal6vB2c7Ct2qDarXTaTmXy7x0X1VlV8oTuT6YpY2o,191061
32
32
  supervisely/api/import_storage_api.py,sha256=BDCgmR0Hv6OoiRHLCVPKt3iDxSVlQp1WrnKhAK_Zl84,460
33
33
  supervisely/api/issues_api.py,sha256=BqDJXmNoTzwc3xe6_-mA7FDFC5QQ-ahGbXk_HmpkSeQ,17925
34
34
  supervisely/api/labeling_job_api.py,sha256=odnzZjp29yM16Gq-FYkv-OA4WFMNJCLFo4qSikW2A7c,56280
@@ -49,7 +49,7 @@ supervisely/api/video_annotation_tool_api.py,sha256=3A9-U8WJzrTShP_n9T8U01M9FzGY
49
49
  supervisely/api/workspace_api.py,sha256=5KAxpI9DKBmgF_pyQaXHpGT30HZ9wRtR6DP3FoYFZtY,9228
50
50
  supervisely/api/entity_annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  supervisely/api/entity_annotation/entity_annotation_api.py,sha256=K79KdDyepQv4FiNQHBj9V4-zLIemxK9WG1ig1bfBKb8,3083
52
- supervisely/api/entity_annotation/figure_api.py,sha256=deYCZNG7JeDhxlYew51FyGvqY3dc7fkERtwmBPJmHcw,24503
52
+ supervisely/api/entity_annotation/figure_api.py,sha256=jNObHAjy2JdXvKLP5IeBWISDjrZn_Budxp9J3Odyhxo,24531
53
53
  supervisely/api/entity_annotation/object_api.py,sha256=gbcNvN_KY6G80Me8fHKQgryc2Co7VU_kfFd1GYILZ4E,8875
54
54
  supervisely/api/entity_annotation/tag_api.py,sha256=M-28m9h8R4k9Eqo6P1S0UH8_D5kqCwAvQLYY6_Yz4oM,11161
55
55
  supervisely/api/pointcloud/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -677,7 +677,7 @@ supervisely/geometry/cuboid.py,sha256=oxsRoTKuwTNxH4Vp6khyvw1TCrBagSWNV5HmQKJZHt
677
677
  supervisely/geometry/cuboid_2d.py,sha256=-oXeKiUS2gguQ4GyIZYp1cNPPhOLsGOFZl7uI71BfZM,13438
678
678
  supervisely/geometry/cuboid_3d.py,sha256=x472ZPHTZDIY5Dj8tKbLQG3BCukFPgSvPJlxfHdKi1w,4168
679
679
  supervisely/geometry/geometry.py,sha256=dbXnct8hrr7Wour6yCrtAef22KSJ2uYRm1F5GE10_MM,15287
680
- supervisely/geometry/graph.py,sha256=1_tX7FGmYkXuAx3P_w4zA4p8mRYnCN4iZ--2pMyxseI,24121
680
+ supervisely/geometry/graph.py,sha256=RDdZtN_P7TKAg4s_QXluCGzdmhD-IeonvK4Pix924kk,24474
681
681
  supervisely/geometry/helpers.py,sha256=2gdYMFWTAr836gVXcp-lkDQs9tdaV0ou33kj3mzJBQA,5132
682
682
  supervisely/geometry/image_rotator.py,sha256=wrU8cXEUfuNcmPms2myUV4BpZqz_2oDArsEUFeiTpxs,6888
683
683
  supervisely/geometry/main_tests.py,sha256=K3Olsz9igHDW2IfIA5JOpjoE8bZ3ex2PXvVR2ZCDrHU,27199
@@ -689,7 +689,7 @@ supervisely/geometry/point_location.py,sha256=vLu5pWdtAi-WVQUKgFO7skigTaR-mtWR0t
689
689
  supervisely/geometry/pointcloud.py,sha256=cc4P_UNLGx5dWah3caRJytW7_mAi8UnYsJOa20mUy8s,1472
690
690
  supervisely/geometry/polygon.py,sha256=cAgCR8ccdGtieJTnmDnupPALMEwerHIqMWx7k3OCzVQ,11594
691
691
  supervisely/geometry/polyline.py,sha256=LjjD-YGVDw1TQ84_IOHqnq43JFuSnsGdGMx404olYDs,8258
692
- supervisely/geometry/rectangle.py,sha256=f-Y6AnVYbMXXaAOLREyjqVJeb-l_tevQQHy9kiMKHhI,33749
692
+ supervisely/geometry/rectangle.py,sha256=QaBcSPeH87rcwsSft1TavEdCe4NpvfHZztZMEmzIxGk,33869
693
693
  supervisely/geometry/sliding_windows.py,sha256=VWtE3DS9AaIlS0ch0PY6wwtWU89J82icDRZ-F0LFrjM,1700
694
694
  supervisely/geometry/sliding_windows_fuzzy.py,sha256=InvJlH6MEW55DM1IdoMHP2MLFLieTDZfHrZZEINLQOc,3626
695
695
  supervisely/geometry/validation.py,sha256=G5vjtiXTCaTQvWegPIBiNw8pN_GiY86OUSRSsccdyLU,2139
@@ -744,8 +744,8 @@ supervisely/nn/artifacts/utils.py,sha256=C4EaMi95MAwtK5TOnhK4sQ1BWvgwYBxXyRStkhY
744
744
  supervisely/nn/artifacts/yolov5.py,sha256=slh05EpQsxqgKwB9KMClshdBxPBN3ZWZ6S4B80ECEt4,1724
745
745
  supervisely/nn/artifacts/yolov8.py,sha256=sFd9kU7Gdowq6WH1S3NdlQeoL9jjQKmRYb51fG_wbDk,1446
746
746
  supervisely/nn/benchmark/__init__.py,sha256=7jDezvavJFtO9mDeB2TqW8N4sD8TsHQBPpA9RESleIQ,610
747
- supervisely/nn/benchmark/base_benchmark.py,sha256=Xnb0jL0voBPC-s_eVYSYbYv-xVfLYtQf1tHLnJ9ktq8,25713
748
- supervisely/nn/benchmark/base_evaluator.py,sha256=sc8gNn3myGA8sGnP6EIiTp24JPXUQ9Ou-8BmTf-Dt7w,5248
747
+ supervisely/nn/benchmark/base_benchmark.py,sha256=2buF7mD58igPMBPiEAJqfWHRO-JdPPzzOVlbR7-nvoA,25956
748
+ supervisely/nn/benchmark/base_evaluator.py,sha256=MJeZnMcWr_cbeJ2r0GJ4SWgjWX5w33Y3pYVR6kCIQMQ,5246
749
749
  supervisely/nn/benchmark/base_visualizer.py,sha256=7woiYmztDzYZlbhL1hTfJnIi26RFi4obF2VLA519uxQ,10092
750
750
  supervisely/nn/benchmark/cv_tasks.py,sha256=ShoAbuNzfMYj0Se-KOnl_-dJnrmvN6Aukxa0eq28bFw,239
751
751
  supervisely/nn/benchmark/comparison/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -779,33 +779,33 @@ supervisely/nn/benchmark/instance_segmentation/__init__.py,sha256=47DEQpj8HBSa-_
779
779
  supervisely/nn/benchmark/instance_segmentation/benchmark.py,sha256=lTDzgKGpfeF5o_a2nS56wiAsUQPH1eubk37b9CaB2KI,1171
780
780
  supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
781
781
  supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=mpCi8S6YNwlVvgcERQSHBOhC9PrSfQkQ55pPTcK6V9c,2811
782
- supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=_ZIU_3-xlUGKTcbEthxB4Ngt12azdC7pxpgqHHw7M3I,25780
782
+ supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=l2I1PbenuBzcCPu_h2J5JE5gQsJCr6lNnoIzk5BEuwc,25868
783
783
  supervisely/nn/benchmark/instance_segmentation/visualizer.py,sha256=8NscOKy7JK4AG-Czu3SM0qJQXLDfKD9URdG1d4nz89E,564
784
784
  supervisely/nn/benchmark/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
785
785
  supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=XjUnFCnCMYLrpjojIOwiRNaSsSLYpozTHWfwLkaCd5U,1612
786
786
  supervisely/nn/benchmark/object_detection/benchmark.py,sha256=Wb4xlFXilIMVfsifNNQY25uE52NeEDLzQpnq8QPYq9U,1086
787
787
  supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=POzpiaxnxuwAPSNQOGgjoUPfsk6Lf5hb9GLHwltWY5Y,94
788
788
  supervisely/nn/benchmark/object_detection/evaluator.py,sha256=EOQQbmwQqjjvbRu3tY24SRA7K8nyqshR92gUcP1lcrY,7371
789
- supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=MLVRnSwMQ9lfrlgBt4ThIHTVKY-6zuuEWK5-yVsmaj0,21140
790
- supervisely/nn/benchmark/object_detection/text_templates.py,sha256=J5xUPCGY-QWxc5AEt_u9_2r5q0LBlIzsa007H0GgoeU,26026
789
+ supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=iV79hlyB-_wj-X25-JPjoXfwQWNxGrMVweha3JZA46M,22557
790
+ supervisely/nn/benchmark/object_detection/text_templates.py,sha256=ZjkcP91dj98_1xqxKSy5TGrU08puXaLhpjNC-c_41A0,26113
791
791
  supervisely/nn/benchmark/object_detection/visualizer.py,sha256=NpLKVW5fo6N0kYzgLsfY66wvCv38G3k-SNm4HImXt6g,32366
792
792
  supervisely/nn/benchmark/object_detection/vis_metrics/__init__.py,sha256=AXCLHEySEdR-B-5sfDoWBmmOLBVlyW2U_xr8Ta42sQI,2096
793
793
  supervisely/nn/benchmark/object_detection/vis_metrics/confidence_distribution.py,sha256=OlwkPgzEQ-RegcLZHVUVOL0n6I_2iayPVpAIie4y2O8,3615
794
- supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=r_saaZI4WB7C7ykNb1obmf8kEOkphLA4pInDoS6dXXU,4005
794
+ supervisely/nn/benchmark/object_detection/vis_metrics/confidence_score.py,sha256=kuOhQDNwAsBbtjuMU_7GajVzu6j6n3xDJTv_hNKX6o8,4007
795
795
  supervisely/nn/benchmark/object_detection/vis_metrics/confusion_matrix.py,sha256=2PJUt0-njRpzN7XBGjkSt9kkh5tDPuv_Sne-2v8DWHc,3731
796
796
  supervisely/nn/benchmark/object_detection/vis_metrics/explore_predictions.py,sha256=wIYfq3izM2XNJHr56h3j5XhuU8W8Y3wO_RKAwxntQs4,4855
797
- supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=6y2Kx-R_t4SdJkdWNyZQ6TGjCC-u6KhXb4cCno4GuTk,2882
797
+ supervisely/nn/benchmark/object_detection/vis_metrics/f1_score_at_different_iou.py,sha256=Aewzu2QhxZoPT_k0QJt_G11B8M2DXLCGRjE0MlVYNko,2892
798
798
  supervisely/nn/benchmark/object_detection/vis_metrics/frequently_confused.py,sha256=7rObk7WNsfwK7xBWl3aOxcn0uD48njEc04fQIPHc3_4,4678
799
799
  supervisely/nn/benchmark/object_detection/vis_metrics/iou_distribution.py,sha256=lv4Bk8W4X8ZhvQKyMXI46d240PNlMFx1hdji_aoTS50,3601
800
800
  supervisely/nn/benchmark/object_detection/vis_metrics/key_metrics.py,sha256=byucJuHYWSXIZU8U1Dc44QDpG3lTlhoNdUfD1b-uriw,4721
801
801
  supervisely/nn/benchmark/object_detection/vis_metrics/model_predictions.py,sha256=gsGDsesiwOcqeFvHr33b4PSJNw6MoA5brO-qRydRtsA,5944
802
802
  supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts.py,sha256=HuTgisYmXCSUeF5WOahy-uaCdvRLsNzg28BDrZ-5hww,7161
803
- supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=GBq0KlPka5z4cxHcKCe2eVOI_h3qlWUqGCyhYs6mjrk,6825
804
- supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=M6E--Yd1ztP4VBjR6VDUVrj2hgs5mwJF-vhWIjgVGkw,6376
803
+ supervisely/nn/benchmark/object_detection/vis_metrics/outcome_counts_per_class.py,sha256=BKsb1XGVsg6-aOI5G6NibxvdD5lVzkMjHisI8T85Sns,6853
804
+ supervisely/nn/benchmark/object_detection/vis_metrics/overview.py,sha256=uBxHIz8t0ujS5MI-LASg1RrPx3YqeKKc7dWz3MozHIU,6546
805
805
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve.py,sha256=EeZmyNlTVQLQ-0wIDGdvFmRkahJBBiOKSmWiAJ8Bfks,3478
806
806
  supervisely/nn/benchmark/object_detection/vis_metrics/pr_curve_by_class.py,sha256=Bl_buVvH8SVqwsc4DcHnojMOqpwTnRgXFt9yw_Y1BR0,1607
807
807
  supervisely/nn/benchmark/object_detection/vis_metrics/precision.py,sha256=cAgMrp13uulHfM8xnPDZyR6PqS8nck1Fo7YPpvHPCbw,2708
808
- supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py,sha256=X-hyf7OP7hzQzI5Yb2yDU536hxYQLvxcnVyYa3x27XA,2076
808
+ supervisely/nn/benchmark/object_detection/vis_metrics/precision_avg_per_class.py,sha256=IetoRTE9xZHHMZlPwUmQyTnaD7cQpnIQ3zJmUUr6fgY,1965
809
809
  supervisely/nn/benchmark/object_detection/vis_metrics/recall.py,sha256=AAxg3eJDjWIZEarOMZNcdIcYpVDFFDlbc5bwF4X3GIo,2579
810
810
  supervisely/nn/benchmark/object_detection/vis_metrics/recall_vs_precision.py,sha256=u-00HH54XzlhFuzc7U-mk2-IhUSEPYzv23BIIz8STvk,1984
811
811
  supervisely/nn/benchmark/object_detection/vis_metrics/reliability_diagram.py,sha256=_8ie3dPfwOyNCg-YhqO5jFW6kubCfQC2Obn9QSZFVeQ,3615
@@ -1009,7 +1009,7 @@ supervisely/project/data_version.py,sha256=nknaWJSUCwoDyNG9_d1KA-GjzidhV9zd9Cn8c
1009
1009
  supervisely/project/download.py,sha256=zb8sb4XZ6Qi3CP7fmtLRUAYzaxs_W0WnOfe2x3ZVRMs,24639
1010
1010
  supervisely/project/pointcloud_episode_project.py,sha256=yiWdNBQiI6f1O9sr1pg8JHW6O-w3XUB1rikJNn3Oung,41866
1011
1011
  supervisely/project/pointcloud_project.py,sha256=Kx1Vaes-krwG3BiRRtHRLQxb9G5m5bTHPN9IzRqmNWo,49399
1012
- supervisely/project/project.py,sha256=tvNPGyIZVs4p3iMz2eDU1tmtsPZWZOhQ9vBJCqCMxbs,202003
1012
+ supervisely/project/project.py,sha256=34fAbYV4VdfVSqMs0a5ggAIwELd8nPb-uGoaC1F7h4I,202299
1013
1013
  supervisely/project/project_meta.py,sha256=26s8IiHC5Pg8B1AQi6_CrsWteioJP2in00cRNe8QlW0,51423
1014
1014
  supervisely/project/project_settings.py,sha256=NLThzU_DCynOK6hkHhVdFyezwprn9UqlnrLDe_3qhkY,9347
1015
1015
  supervisely/project/project_type.py,sha256=_3RqW2CnDBKFOvSIrQT1RJQaiHirs34_jiQS8CkwCpo,530
@@ -1071,9 +1071,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1071
1071
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1072
1072
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1073
1073
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1074
- supervisely-6.73.286.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1075
- supervisely-6.73.286.dist-info/METADATA,sha256=ulxDJ50Pdv9rflRKXgqgB8fpw1pOMrd4xzC1uDdIseU,33573
1076
- supervisely-6.73.286.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1077
- supervisely-6.73.286.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1078
- supervisely-6.73.286.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1079
- supervisely-6.73.286.dist-info/RECORD,,
1074
+ supervisely-6.73.288.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1075
+ supervisely-6.73.288.dist-info/METADATA,sha256=bY0YA_vkWd0RNep6zrPSU0itBr5BQBWR8J6aHKBkzQw,33573
1076
+ supervisely-6.73.288.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1077
+ supervisely-6.73.288.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1078
+ supervisely-6.73.288.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1079
+ supervisely-6.73.288.dist-info/RECORD,,