supervisely 6.73.307__py3-none-any.whl → 6.73.309__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -176,7 +176,7 @@ class TagMeta(KeyObject, JsonSerializable):
176
176
  self._applicable_to, SUPPORTED_APPLICABLE_TO
177
177
  )
178
178
  )
179
-
179
+
180
180
  if self._target_type not in SUPPORTED_TARGET_TYPES:
181
181
  raise ValueError(
182
182
  "target_type = {!r} is unknown, should be one of {}".format(
@@ -362,7 +362,7 @@ class TagMeta(KeyObject, JsonSerializable):
362
362
  # Output: ['car', 'bicycle']
363
363
  """
364
364
  return self._applicable_classes
365
-
365
+
366
366
  @property
367
367
  def target_type(self) -> str:
368
368
  """
@@ -430,6 +430,12 @@ class TagMeta(KeyObject, JsonSerializable):
430
430
  TagMetaJsonFields.VALUE_TYPE: self.value_type,
431
431
  TagMetaJsonFields.COLOR: rgb2hex(self.color),
432
432
  }
433
+
434
+ #! fix for the issue with the default value of the target_type
435
+ #! while restoring Data Version with old class definitions
436
+ if not hasattr(self, "_target_type"):
437
+ self._target_type = TagTargetType.ALL
438
+
433
439
  if self.value_type == TagValueType.ONEOF_STRING:
434
440
  jdict[TagMetaJsonFields.VALUES] = self.possible_values
435
441
 
@@ -1799,7 +1799,12 @@ class ProjectApi(CloneableModuleApi, UpdateableModule, RemoveableModuleApi):
1799
1799
  self.update_meta(id, meta)
1800
1800
 
1801
1801
  def _set_custom_grouping_settings(
1802
- self, id: int, group_images: bool, tag_name: str, sync: bool, label_group_tag_name: str = None
1802
+ self,
1803
+ id: int,
1804
+ group_images: bool,
1805
+ tag_name: str,
1806
+ sync: bool,
1807
+ label_group_tag_name: str = None,
1803
1808
  ) -> None:
1804
1809
  """Sets the project settings for custom grouping.
1805
1810
 
@@ -54,6 +54,22 @@ from supervisely.sly_logger import logger
54
54
  if TYPE_CHECKING:
55
55
  from supervisely.app.widgets import Widget
56
56
 
57
+ import logging
58
+
59
+ uvicorn_logger = logging.getLogger("uvicorn.access")
60
+
61
+
62
+ class ReadyzFilter(logging.Filter):
63
+ def filter(self, record):
64
+ if "/readyz" in record.getMessage() or "/livez" in record.getMessage():
65
+ record.levelno = logging.DEBUG # Change log level to DEBUG
66
+ record.levelname = "DEBUG"
67
+ return True
68
+
69
+
70
+ # Apply the filter
71
+ uvicorn_logger.addFilter(ReadyzFilter())
72
+
57
73
 
58
74
  class Event:
59
75
  class Brush:
@@ -5,3 +5,6 @@ iou_threshold: 0.5
5
5
  # Confidence threshold.
6
6
  # Set 'auto' to calculate the optimal confidence threshold.
7
7
  confidence_threshold: auto
8
+
9
+ # Maximum number of detections per image.
10
+ max_detections: 100
@@ -5,3 +5,6 @@ iou_threshold: 0.5
5
5
  # Confidence threshold.
6
6
  # Set 'auto' to calculate the optimal confidence threshold.
7
7
  confidence_threshold: auto
8
+
9
+ # Maximum number of detections per image.
10
+ max_detections: 100
@@ -3,9 +3,15 @@ from typing import Callable, List, Literal, Optional
3
3
 
4
4
  import numpy as np
5
5
 
6
+ from supervisely.nn.benchmark.utils.detection.coco_eval import (
7
+ COCO,
8
+ SlyCOCOeval,
9
+ pycocotools_installed,
10
+ )
11
+
6
12
 
7
13
  def set_cocoeval_params(
8
- cocoeval,
14
+ cocoeval: SlyCOCOeval,
9
15
  parameters: dict,
10
16
  ):
11
17
  """
@@ -28,8 +34,8 @@ def set_cocoeval_params(
28
34
 
29
35
 
30
36
  def calculate_metrics(
31
- cocoGt,
32
- cocoDt,
37
+ cocoGt: COCO,
38
+ cocoDt: COCO,
33
39
  iouType: Literal["bbox", "segm"],
34
40
  progress_cb: Optional[Callable] = None,
35
41
  evaluation_params: Optional[dict] = None,
@@ -48,12 +54,14 @@ def calculate_metrics(
48
54
  :return: Results of the evaluation
49
55
  :rtype: dict
50
56
  """
51
- from pycocotools.coco import COCO # pylint: disable=import-error
52
- from pycocotools.cocoeval import COCOeval # pylint: disable=import-error
57
+ if not pycocotools_installed:
58
+ raise ImportError("pycocotools is not installed")
53
59
 
54
- cocoGt: COCO = cocoGt
60
+ evaluation_params = evaluation_params or {}
61
+ max_dets = evaluation_params.get("max_detections", 100)
55
62
 
56
- cocoEval = COCOeval(cocoGt, cocoDt, iouType=iouType)
63
+ cocoEval = SlyCOCOeval(cocoGt, cocoDt, iouType=iouType)
64
+ cocoEval.params.maxDets[-1] = max_dets
57
65
  cocoEval.evaluate()
58
66
  progress_cb(1) if progress_cb is not None else None
59
67
  cocoEval.accumulate()
@@ -61,8 +69,9 @@ def calculate_metrics(
61
69
  cocoEval.summarize()
62
70
 
63
71
  # For classification metrics
64
- cocoEval_cls = COCOeval(cocoGt, cocoDt, iouType=iouType)
72
+ cocoEval_cls = SlyCOCOeval(cocoGt, cocoDt, iouType=iouType)
65
73
  cocoEval_cls.params.useCats = 0
74
+ cocoEval_cls.params.maxDets[-1] = max_dets
66
75
  cocoEval_cls.evaluate()
67
76
  progress_cb(1) if progress_cb is not None else None
68
77
  cocoEval_cls.accumulate()
@@ -70,7 +79,6 @@ def calculate_metrics(
70
79
  cocoEval_cls.summarize()
71
80
 
72
81
  iouThrs = cocoEval.params.iouThrs
73
- evaluation_params = evaluation_params or {}
74
82
  iou_threshold = evaluation_params.get("iou_threshold", 0.5)
75
83
  iou_threshold_per_class = evaluation_params.get("iou_threshold_per_class")
76
84
  if iou_threshold_per_class is not None:
@@ -86,7 +94,7 @@ def calculate_metrics(
86
94
  if iou_threshold_per_class is not None or iou_threshold != 0.5:
87
95
  average_across_iou_thresholds = False
88
96
  evaluation_params["average_across_iou_thresholds"] = average_across_iou_thresholds
89
-
97
+
90
98
  eval_img_dict = get_eval_img_dict(cocoEval)
91
99
  eval_img_dict_cls = get_eval_img_dict(cocoEval_cls)
92
100
  matches = get_matches(
@@ -116,7 +124,10 @@ def calculate_metrics(
116
124
  return eval_data
117
125
 
118
126
 
119
- def get_counts(eval_img_dict: dict, cocoEval_cls):
127
+ def get_counts(eval_img_dict: dict, cocoEval_cls: SlyCOCOeval):
128
+ if not pycocotools_installed:
129
+ raise ImportError("pycocotools is not installed")
130
+
120
131
  cat_ids = cocoEval_cls.cocoGt.getCatIds()
121
132
  iouThrs = cocoEval_cls.params.iouThrs
122
133
  catId2idx = {cat_id: i for i, cat_id in enumerate(cat_ids)}
@@ -143,12 +154,12 @@ def get_counts(eval_img_dict: dict, cocoEval_cls):
143
154
  return true_positives.astype(int), false_positives.astype(int), false_negatives.astype(int)
144
155
 
145
156
 
146
- def get_counts_and_scores(cocoEval, cat_id: int, t: int):
147
- """
148
- tps, fps, scores, n_positives
157
+ def get_counts_and_scores(cocoEval: SlyCOCOeval, cat_id: int, t: int):
158
+ """Returns tps, fps, scores, n_positives"""
159
+
160
+ if not pycocotools_installed:
161
+ raise ImportError("pycocotools is not installed")
149
162
 
150
- type cocoEval: COCOeval
151
- """
152
163
  aRng = cocoEval.params.areaRng[0]
153
164
  eval_imgs = [ev for ev in cocoEval.evalImgs if ev is not None and ev["aRng"] == aRng]
154
165
 
@@ -192,10 +203,10 @@ def get_counts_and_scores(cocoEval, cat_id: int, t: int):
192
203
  return tps, fps, scores, n_positives
193
204
 
194
205
 
195
- def get_eval_img_dict(cocoEval):
196
- """
197
- type cocoEval: COCOeval
198
- """
206
+ def get_eval_img_dict(cocoEval: SlyCOCOeval):
207
+ if not pycocotools_installed:
208
+ raise ImportError("pycocotools is not installed")
209
+
199
210
  aRng = cocoEval.params.areaRng[0]
200
211
  eval_img_dict = defaultdict(list) # img_id : dt/gt
201
212
  for i, eval_img in enumerate(cocoEval.evalImgs):
@@ -211,7 +222,10 @@ def get_eval_img_dict(cocoEval):
211
222
  return eval_img_dict
212
223
 
213
224
 
214
- def _get_missclassified_match(eval_img_cls, dt_id, gtIds_orig, dtIds_orig, iou_t):
225
+ def _get_missclassified_match(eval_img_cls: SlyCOCOeval, dt_id, gtIds_orig, dtIds_orig, iou_t):
226
+ if not pycocotools_installed:
227
+ raise ImportError("pycocotools is not installed")
228
+
215
229
  # Correction on miss-classification
216
230
  gt_idx = np.nonzero(eval_img_cls["gtMatches"][iou_t] == dt_id)[0]
217
231
  if len(gt_idx) == 1:
@@ -231,12 +245,12 @@ def _get_missclassified_match(eval_img_cls, dt_id, gtIds_orig, dtIds_orig, iou_t
231
245
  def get_matches(
232
246
  eval_img_dict: dict,
233
247
  eval_img_dict_cls: dict,
234
- cocoEval_cls,
248
+ cocoEval_cls: SlyCOCOeval,
235
249
  iou_idx_per_class: dict = None,
236
250
  ):
237
- """
238
- type cocoEval_cls: COCOeval
239
- """
251
+ if not pycocotools_installed:
252
+ raise ImportError("pycocotools is not installed")
253
+
240
254
  cat_ids = cocoEval_cls.cocoGt.getCatIds()
241
255
  matches = []
242
256
  for img_id, eval_imgs in eval_img_dict.items():
@@ -326,11 +340,10 @@ def get_matches(
326
340
  return matches
327
341
 
328
342
 
329
- def get_rare_classes(cocoGt, topk_ann_fraction=0.1, topk_classes_fraction=0.2):
330
- """
331
- :param cocoGt: Ground truth dataset in COCO format
332
- :type cocoGt: COCO
333
- """
343
+ def get_rare_classes(cocoGt: COCO, topk_ann_fraction=0.1, topk_classes_fraction=0.2):
344
+ if not pycocotools_installed:
345
+ raise ImportError("pycocotools is not installed")
346
+
334
347
  anns_cat_ids = [ann["category_id"] for ann in cocoGt.anns.values()]
335
348
  cat_ids, cat_counts = np.unique(anns_cat_ids, return_counts=True)
336
349
  inds_sorted = np.argsort(cat_counts)
@@ -0,0 +1,93 @@
1
+ import numpy as np
2
+
3
+ pycocotools_installed = False
4
+ try:
5
+ from pycocotools.coco import COCO # pylint: disable=import-error
6
+ from pycocotools.cocoeval import COCOeval # pylint: disable=import-error
7
+
8
+ pycocotools_installed = True
9
+ except ImportError:
10
+ COCO = object
11
+ COCOeval = object
12
+
13
+
14
+ class SlyCOCOeval(COCOeval):
15
+ def summarize(self):
16
+ """
17
+ Compute and display summary metrics for evaluation results.
18
+ Note this functin can *only* be applied on the default parameter setting
19
+ """
20
+
21
+ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
22
+ p = self.params
23
+ iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
24
+ titleStr = "Average Precision" if ap == 1 else "Average Recall"
25
+ typeStr = "(AP)" if ap == 1 else "(AR)"
26
+ iouStr = (
27
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
28
+ if iouThr is None
29
+ else "{:0.2f}".format(iouThr)
30
+ )
31
+
32
+ aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
33
+ mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
34
+ if ap == 1:
35
+ # dimension of precision: [TxRxKxAxM]
36
+ s = self.eval["precision"]
37
+ # IoU
38
+ if iouThr is not None:
39
+ t = np.where(iouThr == p.iouThrs)[0]
40
+ s = s[t]
41
+ s = s[:, :, :, aind, mind]
42
+ else:
43
+ # dimension of recall: [TxKxAxM]
44
+ s = self.eval["recall"]
45
+ if iouThr is not None:
46
+ t = np.where(iouThr == p.iouThrs)[0]
47
+ s = s[t]
48
+ s = s[:, :, aind, mind]
49
+ if len(s[s > -1]) == 0:
50
+ mean_s = -1
51
+ else:
52
+ mean_s = np.mean(s[s > -1])
53
+ print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
54
+ return mean_s
55
+
56
+ def _summarizeDets():
57
+ stats = np.zeros((12,))
58
+ stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
59
+ stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
60
+ stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
61
+ stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
62
+ stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
63
+ stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
64
+ stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
65
+ stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
66
+ stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
67
+ stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
68
+ stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
69
+ stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
70
+ return stats
71
+
72
+ def _summarizeKps():
73
+ stats = np.zeros((10,))
74
+ stats[0] = _summarize(1, maxDets=20)
75
+ stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
76
+ stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
77
+ stats[3] = _summarize(1, maxDets=20, areaRng="medium")
78
+ stats[4] = _summarize(1, maxDets=20, areaRng="large")
79
+ stats[5] = _summarize(0, maxDets=20)
80
+ stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
81
+ stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
82
+ stats[8] = _summarize(0, maxDets=20, areaRng="medium")
83
+ stats[9] = _summarize(0, maxDets=20, areaRng="large")
84
+ return stats
85
+
86
+ if not self.eval:
87
+ raise Exception("Please run accumulate() first")
88
+ iouType = self.params.iouType
89
+ if iouType == "segm" or iouType == "bbox":
90
+ summarize = _summarizeDets
91
+ elif iouType == "keypoints":
92
+ summarize = _summarizeKps
93
+ self.stats = summarize() # pylint: disable=possibly-used-before-assignment
@@ -71,6 +71,31 @@ from supervisely.sly_logger import logger
71
71
  from supervisely.task.progress import Progress, tqdm_sly
72
72
 
73
73
 
74
+ class CustomUnpickler(pickle.Unpickler):
75
+ """
76
+ Custom Unpickler to load pickled objects with fields that are not present in the class definition.
77
+ Used to load old pickled objects that have been pickled with a class that has been updated.
78
+ Supports loading namedtuple objects with missing fields.
79
+ """
80
+
81
+ def find_class(self, module, name):
82
+ cls = super().find_class(module, name)
83
+ if hasattr(cls, "_fields"):
84
+ orig_new = cls.__new__
85
+
86
+ def new(cls, *args, **kwargs):
87
+ if len(args) < len(cls._fields):
88
+ # Set missed attrs to None
89
+ args = list(args) + [None] * (len(cls._fields) - len(args))
90
+ return orig_new(cls, *args, **kwargs)
91
+
92
+ # Create a new class dynamically
93
+ NewCls = type(f"Pickled{cls.__name__}", (cls,), {"__new__": new})
94
+ return NewCls
95
+
96
+ return cls
97
+
98
+
74
99
  # @TODO: rename img_path to item_path (maybe convert namedtuple to class and create fields and props)
75
100
  class ItemPaths(NamedTuple):
76
101
  #: :class:`str`: Full image file path of item
@@ -3289,10 +3314,10 @@ class Project:
3289
3314
  figures: Dict[int, List[sly.FigureInfo]] # image_id: List of figure_infos
3290
3315
  alpha_geometries: Dict[int, List[dict]] # figure_id: List of geometries
3291
3316
  with file if isinstance(file, io.BytesIO) else open(file, "rb") as f:
3292
- project_info, meta, dataset_infos, image_infos, figures, alpha_geometries = pickle.load(
3293
- f
3317
+ unpickler = CustomUnpickler(f)
3318
+ project_info, meta, dataset_infos, image_infos, figures, alpha_geometries = (
3319
+ unpickler.load()
3294
3320
  )
3295
-
3296
3321
  if project_name is None:
3297
3322
  project_name = project_info.name
3298
3323
  new_project_info = api.project.create(
@@ -3354,7 +3379,8 @@ class Project:
3354
3379
  )
3355
3380
  workspace_info = api.workspace.get_info_by_id(workspace_id)
3356
3381
  existing_links = api.image.check_existing_links(
3357
- list(set([inf.link for inf in image_infos if inf.link])), team_id=workspace_info.team_id
3382
+ list(set([inf.link for inf in image_infos if inf.link])),
3383
+ team_id=workspace_info.team_id,
3358
3384
  )
3359
3385
  image_infos = sorted(image_infos, key=lambda info: info.link is not None)
3360
3386
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: supervisely
3
- Version: 6.73.307
3
+ Version: 6.73.309
4
4
  Summary: Supervisely Python SDK.
5
5
  Home-page: https://github.com/supervisely/supervisely
6
6
  Author: Supervisely
@@ -15,7 +15,7 @@ supervisely/annotation/obj_class_mapper.py,sha256=aIJDoRULqcAOD2a1CQPk2OOF8k3VPP
15
15
  supervisely/annotation/renamer.py,sha256=rVvNLtpfd1kKUVPgm8VlLmYSDByWjriJ92FobC4buqY,1944
16
16
  supervisely/annotation/tag.py,sha256=m_sPgrr_ZW8HuiK7Fr2-WnHwKwez1WZtGrcdZN2DZuQ,17598
17
17
  supervisely/annotation/tag_collection.py,sha256=MVPTzer9rLpD4O0g2XhYFUheK7-ILgwAXDJd1em3kZ8,10015
18
- supervisely/annotation/tag_meta.py,sha256=2UkgRSMuQ7LXH48Ok2bJNdlK-miX54wXYAoB_as9_a0,27543
18
+ supervisely/annotation/tag_meta.py,sha256=nTRKVuW_h6mGdTxuwXvtR2ERhwOvjjdUf635ONLFFx8,27767
19
19
  supervisely/annotation/tag_meta_collection.py,sha256=JY2wAo4dF47UylYeglkJtRtpVOArGjf3dXeEYIHFWP0,14491
20
20
  supervisely/annotation/tag_meta_mapper.py,sha256=RWeTrxJ64syodyhXIRSH007bX6Hr3B45tG14YTcpwSU,1639
21
21
  supervisely/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -36,7 +36,7 @@ supervisely/api/module_api.py,sha256=06YJ-LsDDEAIZTnoexqcUilgT3VBDw-I0b4h_bQnCO0
36
36
  supervisely/api/neural_network_api.py,sha256=ktPVRO4Jeulougio8F0mioJJHwRJcX250Djp1wBoQ9c,7620
37
37
  supervisely/api/object_class_api.py,sha256=-rQcKwhBw3iL9KNH9c1ROgoimgWM1ls6Wi_tb1R-MzY,7683
38
38
  supervisely/api/plugin_api.py,sha256=TlfrosdRuYG4NUxk92QiQoVaOdztFspPpygyVa3M3zk,5283
39
- supervisely/api/project_api.py,sha256=pBhZlSL3IA0FeBoK8eyhFDh8pnW_X8mbmit4cAJQUoo,79951
39
+ supervisely/api/project_api.py,sha256=WwpwzsQjue2276Rn_wkcPxJAM4OaQr6haf81ZmCGpBI,79992
40
40
  supervisely/api/project_class_api.py,sha256=5cyjdGPPb2tpttu5WmYoOxUNiDxqiojschkhZumF0KM,1426
41
41
  supervisely/api/remote_storage_api.py,sha256=qTuPhPsstgEjRm1g-ZInddik8BNC_38YvBBPvgmim6U,17790
42
42
  supervisely/api/report_api.py,sha256=Om7CGulUbQ4BuJ16eDtz7luLe0JQNqab-LoLpUXu7YE,7123
@@ -93,7 +93,7 @@ supervisely/app/fastapi/index.html,sha256=6K8akK7_k9Au-BpZ7cM2qocuiegLdXz8UFPnWg
93
93
  supervisely/app/fastapi/no_html_main.html,sha256=NhQP7noyORBx72lFh1CQKgBRupkWjiq6Gaw-9Hkvg7c,37
94
94
  supervisely/app/fastapi/offline.py,sha256=CwMMkJ1frD6wiZS-SEoNDtQ1UJcJe1Ob6ohE3r4CQL8,7414
95
95
  supervisely/app/fastapi/request.py,sha256=NU7rKmxJ1pfkDZ7_yHckRcRAueJRQIqCor11UO2OHr8,766
96
- supervisely/app/fastapi/subapp.py,sha256=MlB2dcHEtF0RPk-hxk67Gb1wBeGHsgCEIqAaBHzLEoY,43653
96
+ supervisely/app/fastapi/subapp.py,sha256=AE_AJQ5ZfNKbV38To2uhSnSR7C_XoI99lAc0nDXOtbU,44064
97
97
  supervisely/app/fastapi/templating.py,sha256=JOAW8U-14GD47E286mzFi3mZSPbm_csJGqtXWLRM4rc,2929
98
98
  supervisely/app/fastapi/utils.py,sha256=GZuTWLcVRGVx8TL3jVEYUOZIT2FawbwIe2kAOBLw9ho,398
99
99
  supervisely/app/fastapi/websocket.py,sha256=TlRSPOAhRItTv1HGvdukK1ZvhRjMUxRa-lJlsRR9rJw,1308
@@ -781,14 +781,14 @@ supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/renormaliz
781
781
  supervisely/nn/benchmark/comparison/semantic_segmentation/vis_metrics/speedtest.py,sha256=sQDkzfpVNaSYBHVcHYqydRSWN0i-yV9uhtEAggg295A,10879
782
782
  supervisely/nn/benchmark/instance_segmentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
783
783
  supervisely/nn/benchmark/instance_segmentation/benchmark.py,sha256=lTDzgKGpfeF5o_a2nS56wiAsUQPH1eubk37b9CaB2KI,1171
784
- supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=NoaecTcEp-LhsDQcHNQZi1gzNXcahgycKy_C4aDcSSw,304
784
+ supervisely/nn/benchmark/instance_segmentation/evaluation_params.yaml,sha256=fEYA-ExmxDiSzRl7YfBMpF6LZui0tcDgZyC-YUvmmqg,367
785
785
  supervisely/nn/benchmark/instance_segmentation/evaluator.py,sha256=mpCi8S6YNwlVvgcERQSHBOhC9PrSfQkQ55pPTcK6V9c,2811
786
786
  supervisely/nn/benchmark/instance_segmentation/text_templates.py,sha256=usKqm_FaO-WXiopxzrdjpIrOqHdqFQ89lmYoayzt6KM,25597
787
787
  supervisely/nn/benchmark/instance_segmentation/visualizer.py,sha256=8NscOKy7JK4AG-Czu3SM0qJQXLDfKD9URdG1d4nz89E,564
788
788
  supervisely/nn/benchmark/object_detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
789
789
  supervisely/nn/benchmark/object_detection/base_vis_metric.py,sha256=44Em214YPxZgn2hEzFvqBcnjsyiElD9TSuLamwUnx20,1611
790
790
  supervisely/nn/benchmark/object_detection/benchmark.py,sha256=Wb4xlFXilIMVfsifNNQY25uE52NeEDLzQpnq8QPYq9U,1086
791
- supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=NoaecTcEp-LhsDQcHNQZi1gzNXcahgycKy_C4aDcSSw,304
791
+ supervisely/nn/benchmark/object_detection/evaluation_params.yaml,sha256=fEYA-ExmxDiSzRl7YfBMpF6LZui0tcDgZyC-YUvmmqg,367
792
792
  supervisely/nn/benchmark/object_detection/evaluator.py,sha256=s-hPBm5BmoCgwoozVyDacum4kVLNtYK6I6NCt_L_LSA,7278
793
793
  supervisely/nn/benchmark/object_detection/metric_provider.py,sha256=59UnOX7VuYvVQFeUJy5v6EFIpqSDNgx5wMp9qyVixgM,23686
794
794
  supervisely/nn/benchmark/object_detection/text_templates.py,sha256=4BgTIX1Co4WK9_VSUa1qWCmh5OJzo3_opVU6LOjKSjc,25842
@@ -837,7 +837,8 @@ supervisely/nn/benchmark/semantic_segmentation/vis_metrics/speedtest.py,sha256=0
837
837
  supervisely/nn/benchmark/semantic_segmentation/vis_metrics/vis_texts.py,sha256=rRdYZxmhQX4T3RsXJVGp34NMZPz8jUHtVvBN5BpPJ5I,603
838
838
  supervisely/nn/benchmark/utils/__init__.py,sha256=r0Ay4OMqfIL-9wwJykKji_Uks2Dm9vUhyA7hT8eLxII,657
839
839
  supervisely/nn/benchmark/utils/detection/__init__.py,sha256=6CsMxQqUp1GOc-2Wmnw2lamtvklHo2tcCYTxgT5NsZo,88
840
- supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=gC6by_2HT9ACuxbtW93eKeioW9sCMMDM3aPi99w1xx8,11963
840
+ supervisely/nn/benchmark/utils/detection/calculate_metrics.py,sha256=plgBNJXRZ2MEY_Es8kVnrzpsZAyZqtvsOFT3uZocBhU,12593
841
+ supervisely/nn/benchmark/utils/detection/coco_eval.py,sha256=9Pz0_zUzg8qCOWyE24wzhRoDLO5z9qPuWoqc8Pj29do,4135
841
842
  supervisely/nn/benchmark/utils/detection/metrics.py,sha256=oyictdJ7rRDUkaVvHoxntywW5zZweS8pIJ1bN6JgXtE,2420
842
843
  supervisely/nn/benchmark/utils/detection/sly2coco.py,sha256=0O2LSCU5zIX34mD4hZIv8O3-j6LwnB0DqhiVPAiosO8,6883
843
844
  supervisely/nn/benchmark/utils/detection/utlis.py,sha256=dKhsOGmQKH20-IlD90DWfZzi171j65N71hNdHRCX5Hs,954
@@ -1012,7 +1013,7 @@ supervisely/project/data_version.py,sha256=nknaWJSUCwoDyNG9_d1KA-GjzidhV9zd9Cn8c
1012
1013
  supervisely/project/download.py,sha256=zb8sb4XZ6Qi3CP7fmtLRUAYzaxs_W0WnOfe2x3ZVRMs,24639
1013
1014
  supervisely/project/pointcloud_episode_project.py,sha256=yiWdNBQiI6f1O9sr1pg8JHW6O-w3XUB1rikJNn3Oung,41866
1014
1015
  supervisely/project/pointcloud_project.py,sha256=Kx1Vaes-krwG3BiRRtHRLQxb9G5m5bTHPN9IzRqmNWo,49399
1015
- supervisely/project/project.py,sha256=pDKRPZZCwW79wlfDi4JK9rOZisO2vWRfiOD_j7AId5k,202403
1016
+ supervisely/project/project.py,sha256=LHsrMBbMxdMynhuS0RhjiRFxnzikj6esyKFfR8kEZ9Q,203388
1016
1017
  supervisely/project/project_meta.py,sha256=26s8IiHC5Pg8B1AQi6_CrsWteioJP2in00cRNe8QlW0,51423
1017
1018
  supervisely/project/project_settings.py,sha256=NLThzU_DCynOK6hkHhVdFyezwprn9UqlnrLDe_3qhkY,9347
1018
1019
  supervisely/project/project_type.py,sha256=EZDJFRi4MmC_5epYexBgML5WMZsWdEVk_CjqDQy5m3c,572
@@ -1074,9 +1075,9 @@ supervisely/worker_proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
1074
1075
  supervisely/worker_proto/worker_api_pb2.py,sha256=VQfi5JRBHs2pFCK1snec3JECgGnua3Xjqw_-b3aFxuM,59142
1075
1076
  supervisely/worker_proto/worker_api_pb2_grpc.py,sha256=3BwQXOaP9qpdi0Dt9EKG--Lm8KGN0C5AgmUfRv77_Jk,28940
1076
1077
  supervisely_lib/__init__.py,sha256=7-3QnN8Zf0wj8NCr2oJmqoQWMKKPKTECvjH9pd2S5vY,159
1077
- supervisely-6.73.307.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1078
- supervisely-6.73.307.dist-info/METADATA,sha256=cAJxuLzG815JpY_UwjFG3soVNxtAiUo89L7wcISLSDY,33573
1079
- supervisely-6.73.307.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1080
- supervisely-6.73.307.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1081
- supervisely-6.73.307.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1082
- supervisely-6.73.307.dist-info/RECORD,,
1078
+ supervisely-6.73.309.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
1079
+ supervisely-6.73.309.dist-info/METADATA,sha256=Ek8RYYRA_kKO3OyWLaXtIBmF7b61iOmD3h56s3gDFaI,33573
1080
+ supervisely-6.73.309.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
1081
+ supervisely-6.73.309.dist-info/entry_points.txt,sha256=U96-5Hxrp2ApRjnCoUiUhWMqijqh8zLR03sEhWtAcms,102
1082
+ supervisely-6.73.309.dist-info/top_level.txt,sha256=kcFVwb7SXtfqZifrZaSE3owHExX4gcNYe7Q2uoby084,28
1083
+ supervisely-6.73.309.dist-info/RECORD,,