scale-nucleus 0.12b3__tar.gz → 0.12b9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/LICENSE +0 -0
  2. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/PKG-INFO +2 -2
  3. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/README.md +0 -0
  4. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/client.py +0 -0
  5. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/datasets.py +0 -0
  6. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/helpers/__init__.py +0 -0
  7. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/helpers/nucleus_url.py +0 -0
  8. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/helpers/web_helper.py +0 -0
  9. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/install_completion.py +0 -0
  10. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/jobs.py +0 -0
  11. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/models.py +0 -0
  12. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/nu.py +0 -0
  13. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/reference.py +0 -0
  14. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/slices.py +0 -0
  15. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/cli/tests.py +0 -0
  16. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/__init__.py +0 -0
  17. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/annotation.py +0 -0
  18. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/annotation_uploader.py +0 -0
  19. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/async_utils.py +0 -0
  20. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/autocurate.py +0 -0
  21. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/camera_params.py +0 -0
  22. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/connection.py +0 -0
  23. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/constants.py +0 -0
  24. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/data_transfer_object/__init__.py +0 -0
  25. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/data_transfer_object/dataset_details.py +0 -0
  26. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/data_transfer_object/dataset_info.py +0 -0
  27. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/data_transfer_object/dataset_size.py +0 -0
  28. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/data_transfer_object/scenes_list.py +0 -0
  29. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/dataset.py +0 -0
  30. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/dataset_item.py +0 -0
  31. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/dataset_item_uploader.py +0 -0
  32. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/deprecation_warning.py +0 -0
  33. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/errors.py +0 -0
  34. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/job.py +0 -0
  35. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/logger.py +0 -0
  36. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metadata_manager.py +0 -0
  37. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/__init__.py +2 -0
  38. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/base.py +0 -0
  39. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/categorization_metrics.py +0 -0
  40. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/cuboid_metrics.py +0 -0
  41. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/cuboid_utils.py +0 -0
  42. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/custom_types.py +0 -0
  43. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/errors.py +0 -0
  44. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/filtering.py +17 -12
  45. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/filters.py +0 -0
  46. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/metric_utils.py +0 -0
  47. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/polygon_metrics.py +0 -0
  48. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/polygon_utils.py +0 -0
  49. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/segmentation_metrics.py +79 -82
  50. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/segmentation_to_poly_metrics.py +15 -11
  51. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/metrics/segmentation_utils.py +0 -0
  52. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/model.py +0 -0
  53. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/model_run.py +0 -0
  54. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/payload_constructor.py +0 -0
  55. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/prediction.py +0 -0
  56. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/pydantic_base.py +0 -0
  57. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/quaternion.py +0 -0
  58. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/retry_strategy.py +0 -0
  59. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/scene.py +0 -0
  60. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/shapely_not_installed.py +0 -0
  61. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/slice.py +0 -0
  62. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/upload_response.py +0 -0
  63. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/url_utils.py +0 -0
  64. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/utils.py +0 -0
  65. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/__init__.py +0 -0
  66. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/client.py +0 -0
  67. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/constants.py +0 -0
  68. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/data_transfer_objects/__init__.py +0 -0
  69. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/data_transfer_objects/eval_function.py +0 -0
  70. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/data_transfer_objects/scenario_test.py +0 -0
  71. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/data_transfer_objects/scenario_test_evaluations.py +0 -0
  72. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/data_transfer_objects/scenario_test_metric.py +0 -0
  73. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/errors.py +0 -0
  74. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/eval_functions/__init__.py +0 -0
  75. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/eval_functions/available_eval_functions.py +0 -0
  76. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/eval_functions/base_eval_function.py +0 -0
  77. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/eval_functions/config_classes/__init__.py +0 -0
  78. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/eval_functions/config_classes/segmentation.py +0 -0
  79. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/scenario_test.py +0 -0
  80. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/scenario_test_evaluation.py +0 -0
  81. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/scenario_test_metric.py +0 -0
  82. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/nucleus/validate/utils.py +0 -0
  83. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/pyproject.toml +2 -2
  84. {scale-nucleus-0.12b3 → scale-nucleus-0.12b9}/setup.py +2 -2
File without changes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scale-nucleus
3
- Version: 0.12b3
3
+ Version: 0.12b9
4
4
  Summary: The official Python client library for Nucleus, the Data Platform for AI
5
5
  Home-page: https://scale.com/nucleus
6
6
  License: MIT
@@ -14,7 +14,7 @@ Classifier: Programming Language :: Python :: 3.7
14
14
  Classifier: Programming Language :: Python :: 3.8
15
15
  Classifier: Programming Language :: Python :: 3.9
16
16
  Provides-Extra: shapely
17
- Requires-Dist: Pillow (>=8.3.1)
17
+ Requires-Dist: Pillow (>=7.1.2)
18
18
  Requires-Dist: Shapely (>=1.8.0); extra == "shapely"
19
19
  Requires-Dist: aiohttp (>=3.7.4,<4.0.0)
20
20
  Requires-Dist: click (>=7.1.2,<9.0)
File without changes
File without changes
@@ -5,6 +5,8 @@ from .filtering import (
5
5
  FieldFilter,
6
6
  ListOfOrAndFilters,
7
7
  MetadataFilter,
8
+ SegmentFieldFilter,
9
+ SegmentMetadataFilter,
8
10
  apply_filters,
9
11
  )
10
12
  from .polygon_metrics import (
@@ -258,7 +258,7 @@ DNFFieldOrMetadataFilters = List[
258
258
  def _attribute_getter(
259
259
  field_name: str,
260
260
  allow_missing: bool,
261
- ann_or_pred: Union[AnnotationTypes, PredictionTypes],
261
+ ann_or_pred: Union[AnnotationTypes, PredictionTypes, Segment],
262
262
  ):
263
263
  """Create a function to get object fields"""
264
264
  if allow_missing:
@@ -296,7 +296,7 @@ class AlwaysFalseComparison:
296
296
  def _metadata_field_getter(
297
297
  field_name: str,
298
298
  allow_missing: bool,
299
- ann_or_pred: Union[AnnotationTypes, PredictionTypes],
299
+ ann_or_pred: Union[AnnotationTypes, PredictionTypes, Segment],
300
300
  ):
301
301
  """Create a function to get a metadata field"""
302
302
  if isinstance(
@@ -331,7 +331,7 @@ def _metadata_field_getter(
331
331
 
332
332
  def _filter_to_comparison_function( # pylint: disable=too-many-return-statements
333
333
  filter_def: Filter,
334
- ) -> Callable[[Union[AnnotationTypes, PredictionTypes]], bool]:
334
+ ) -> Callable[[Union[AnnotationTypes, PredictionTypes, Segment]], bool]:
335
335
  """Creates a comparison function from a filter configuration to apply to annotations or predictions
336
336
 
337
337
  Parameters:
@@ -385,9 +385,10 @@ def _apply_field_or_metadata_filters(
385
385
  ],
386
386
  filters: DNFFieldOrMetadataFilters,
387
387
  ):
388
- """Apply filters to list of annotations or list of predictions
388
+ """Apply filters to list of annotations or list of predictions or to a list of segments
389
+
389
390
  Attributes:
390
- filterable_sequence: Prediction or Annotation
391
+ filterable_sequence: Prediction or Annotation or Segment sequence
391
392
  filters: Filter predicates. Allowed formats are:
392
393
  ListOfAndFilters where each Filter forms a chain of AND predicates.
393
394
  or
@@ -440,7 +441,9 @@ def _split_segment_filters(
440
441
 
441
442
 
442
443
  def _filter_segments(
443
- anns_or_preds: Union[Sequence[AnnotationTypes], Sequence[PredictionTypes]],
444
+ anns_or_preds: Union[
445
+ Sequence[SegmentationAnnotation], Sequence[SegmentationPrediction]
446
+ ],
444
447
  segment_filters: OrAndDNFFilters,
445
448
  ):
446
449
  """Filter Segments of a SegmentationAnnotation or Prediction
@@ -451,11 +454,13 @@ def _filter_segments(
451
454
  return anns_or_preds
452
455
 
453
456
  # Transform segment filter types to field and metadata to iterate over annotation sub fields
454
- transformed_or_branches = []
457
+ transformed_or_branches = (
458
+ []
459
+ ) # type: List[List[Union[MetadataFilter, FieldFilter]]]
455
460
  for and_branch in segment_filters:
456
- transformed_and = []
461
+ transformed_and = [] # type: List[Union[MetadataFilter, FieldFilter]]
457
462
  for filter_statement in and_branch:
458
- if filter_statement.type is FilterType.SEGMENT_FIELD:
463
+ if filter_statement.type == FilterType.SEGMENT_FIELD:
459
464
  transformed_and.append(
460
465
  FieldFilter(
461
466
  filter_statement.key,
@@ -464,7 +469,7 @@ def _filter_segments(
464
469
  filter_statement.allow_missing,
465
470
  )
466
471
  )
467
- elif filter_statement.type is FilterType.SEGMENT_METADATA:
472
+ elif filter_statement.type == FilterType.SEGMENT_METADATA:
468
473
  transformed_and.append(
469
474
  MetadataFilter(
470
475
  filter_statement.key,
@@ -484,7 +489,7 @@ def _filter_segments(
484
489
  ann_or_pred, (SegmentationAnnotation, SegmentationPrediction)
485
490
  ):
486
491
  ann_or_pred.annotations = _apply_field_or_metadata_filters(
487
- ann_or_pred.annotations, transformed_or_branches
492
+ ann_or_pred.annotations, transformed_or_branches # type: ignore
488
493
  )
489
494
  segments_filtered.append(ann_or_pred)
490
495
 
@@ -513,7 +518,7 @@ def apply_filters(
513
518
 
514
519
  dnf_filters = ensureDNFFilters(filters)
515
520
  filters, segment_filters = _split_segment_filters(dnf_filters)
516
- filtered = _apply_field_or_metadata_filters(ann_or_pred, filters)
521
+ filtered = _apply_field_or_metadata_filters(ann_or_pred, filters) # type: ignore
517
522
  filtered = _filter_segments(filtered, segment_filters)
518
523
 
519
524
  return filtered
@@ -75,8 +75,6 @@ class SegmentationMaskMetric(Metric):
75
75
  # TODO -> add custom filtering to Segmentation(Annotation|Prediction).annotations.(metadata|label)
76
76
  super().__init__(annotation_filters, prediction_filters)
77
77
  self.loader = SegmentationMaskLoader(S3FileSystem(anon=False))
78
- # NOTE: We store histogram for re-use in subsequently calculated metrics
79
- self.confusion: Optional[np.ndarray] = None
80
78
 
81
79
  def call_metric(
82
80
  self, annotations: AnnotationList, predictions: PredictionList
@@ -97,30 +95,54 @@ class SegmentationMaskMetric(Metric):
97
95
  if predictions.segmentation_predictions
98
96
  else None
99
97
  )
100
- if annotation is None or prediction is None:
98
+ if (
99
+ annotation
100
+ and prediction
101
+ and annotation.annotations
102
+ and prediction.annotations
103
+ ):
104
+ annotation_img = self.get_mask_channel(annotation)
105
+ pred_img = self.get_mask_channel(prediction)
106
+ return self._metric_impl(
107
+ np.asarray(annotation_img, dtype=np.int32),
108
+ np.asarray(pred_img, dtype=np.int32),
109
+ annotation,
110
+ prediction,
111
+ )
112
+ else:
101
113
  return ScalarResult(0, weight=0)
102
- annotation_img = self.loader.fetch(annotation.mask_url)
103
- pred_img = self.loader.fetch(prediction.mask_url)
104
- return self._metric_impl(
105
- np.asarray(annotation_img, dtype=np.int32),
106
- np.asarray(pred_img, dtype=np.int32),
107
- annotation,
108
- prediction,
109
- )
114
+
115
+ def get_mask_channel(self, ann_or_pred):
116
+ """Some annotations are stored as RGB instead of L (single-channel).
117
+ We expect the image to be faux-single-channel with all the channels repeating so we choose the first one.
118
+ """
119
+ img = self.loader.fetch(ann_or_pred.mask_url)
120
+ if img.mode not in {"L", "F"}:
121
+ # TODO: Do we have to do anything more advanced? Currently expect all channels to have same data
122
+ img = img.getchannel(0)
123
+ return img
110
124
 
111
125
  @abc.abstractmethod
112
126
  def _metric_impl(
113
127
  self,
114
128
  annotation_img: np.ndarray,
115
129
  prediction_img: np.ndarray,
116
- annotation: Optional[SegmentationAnnotation],
117
- prediction: Optional[SegmentationPrediction],
130
+ annotation: SegmentationAnnotation,
131
+ prediction: SegmentationPrediction,
118
132
  ):
119
133
  pass
120
134
 
121
135
  def _calculate_confusion_matrix(
122
136
  self, annotation, annotation_img, prediction, prediction_img
123
- ):
137
+ ) -> np.ndarray:
138
+ """This calculates a confusion matrix with ground_truth_index X predicted_index summary
139
+
140
+ Notes:
141
+ If filtering has been applied we filter out missing segments from the confusion matrix.
142
+
143
+ TODO(gunnar): Allow pre-seeding confusion matrix (all of the metrics calculate the same confusion matrix ->
144
+ we can calculate it once and then use it for all other metrics in the chain)
145
+ """
124
146
  # NOTE: This creates a max(class_index) * max(class_index) MAT. If we have np.int16 this could become
125
147
  # huge. We could probably use a sparse matrix instead or change the logic to only create count(index) ** 2
126
148
  # matrix (we only need to keep track of available indexes)
@@ -131,31 +153,26 @@ class SegmentationMaskMetric(Metric):
131
153
  )
132
154
  + 1 # to include 0
133
155
  )
134
- confusion = (
135
- _fast_hist(annotation_img, prediction_img, num_classes)
136
- if self.confusion is None
137
- else self.confusion
138
- )
156
+ confusion = _fast_hist(annotation_img, prediction_img, num_classes)
139
157
  if self.annotation_filters or self.prediction_filters:
140
158
  # we mask the confusion matrix instead of the images
141
159
  if self.annotation_filters:
142
160
  annotation_indexes = {
143
161
  segment.index for segment in annotation.annotations
144
162
  }
145
- flatten_indexes = (
146
- set(range(confusion.shape[0])) - annotation_indexes
163
+ indexes_to_remove = (
164
+ set(range(confusion.shape[0] - 1)) - annotation_indexes
147
165
  )
148
- for row in flatten_indexes:
166
+ for row in indexes_to_remove:
149
167
  confusion[row, :] = 0
150
- confusion[annotation_indexes, :] = 0
151
168
  if self.prediction_filters:
152
169
  prediction_indexes = {
153
170
  segment.index for segment in prediction.annotations
154
171
  }
155
- flatten_indexes = (
156
- set(range(confusion.shape[0])) - prediction_indexes
172
+ indexes_to_remove = (
173
+ set(range(confusion.shape[0] - 1)) - prediction_indexes
157
174
  )
158
- for col in flatten_indexes:
175
+ for col in indexes_to_remove:
159
176
  confusion[:, col] = 0
160
177
  return confusion
161
178
 
@@ -201,22 +218,18 @@ class SegmentationIOU(SegmentationMaskMetric):
201
218
  self,
202
219
  annotation_img: np.ndarray,
203
220
  prediction_img: np.ndarray,
204
- annotation: Optional[SegmentationAnnotation],
205
- prediction: Optional[SegmentationPrediction],
221
+ annotation: SegmentationAnnotation,
222
+ prediction: SegmentationPrediction,
206
223
  ) -> ScalarResult:
207
- if annotation is None or prediction is None:
208
- # TODO: Throw error when we wrap each item in try catch
209
- return ScalarResult(0, weight=0)
210
-
211
- self.confusion = self._calculate_confusion_matrix(
224
+ confusion = self._calculate_confusion_matrix(
212
225
  annotation, annotation_img, prediction, prediction_img
213
226
  )
214
227
 
215
228
  with np.errstate(divide="ignore", invalid="ignore"):
216
- iou = np.diag(self.confusion) / (
217
- self.confusion.sum(axis=1)
218
- + self.confusion.sum(axis=0)
219
- - np.diag(self.confusion)
229
+ iou = np.diag(confusion) / (
230
+ confusion.sum(axis=1)
231
+ + confusion.sum(axis=0)
232
+ - np.diag(confusion)
220
233
  )
221
234
  return ScalarResult(value=np.nanmean(iou), weight=annotation_img.size) # type: ignore
222
235
 
@@ -265,20 +278,16 @@ class SegmentationPrecision(SegmentationMaskMetric):
265
278
  self,
266
279
  annotation_img: np.ndarray,
267
280
  prediction_img: np.ndarray,
268
- annotation: Optional[SegmentationAnnotation],
269
- prediction: Optional[SegmentationPrediction],
281
+ annotation: SegmentationAnnotation,
282
+ prediction: SegmentationPrediction,
270
283
  ) -> ScalarResult:
271
- if annotation is None or prediction is None:
272
- # TODO: Throw error when we wrap each item in try catch
273
- return ScalarResult(0, weight=0)
274
-
275
- self.confusion = self._calculate_confusion_matrix(
284
+ confusion = self._calculate_confusion_matrix(
276
285
  annotation, annotation_img, prediction, prediction_img
277
286
  )
278
287
 
279
288
  with np.errstate(divide="ignore", invalid="ignore"):
280
- true_pos = np.diag(self.confusion)
281
- precision = true_pos / np.sum(self.confusion, axis=0)
289
+ true_pos = np.diag(confusion)
290
+ precision = true_pos / np.sum(confusion, axis=1)
282
291
  mean_precision = np.nanmean(precision)
283
292
  return ScalarResult(value=mean_precision, weight=1) # type: ignore
284
293
 
@@ -327,21 +336,18 @@ class SegmentationAveragePrecision(SegmentationMaskMetric):
327
336
  self,
328
337
  annotation_img: np.ndarray,
329
338
  prediction_img: np.ndarray,
330
- annotation: Optional[SegmentationAnnotation],
331
- prediction: Optional[SegmentationPrediction],
339
+ annotation: SegmentationAnnotation,
340
+ prediction: SegmentationPrediction,
332
341
  ) -> ScalarResult:
333
- if annotation is None or prediction is None:
334
- # TODO: Throw error when we wrap each item in try catch
335
- return ScalarResult(0, weight=0)
336
342
 
337
- self.confusion = self._calculate_confusion_matrix(
343
+ confusion = self._calculate_confusion_matrix(
338
344
  annotation, annotation_img, prediction, prediction_img
339
345
  )
340
346
 
341
347
  with np.errstate(divide="ignore", invalid="ignore"):
342
- true_pos = np.diag(self.confusion)
343
- precision = true_pos / np.sum(self.confusion, axis=0)
344
- recall = true_pos / np.sum(self.confusion, axis=1)
348
+ true_pos = np.diag(confusion)
349
+ precision = true_pos / np.sum(confusion, axis=1)
350
+ recall = true_pos / np.sum(confusion, axis=0)
345
351
  average_precision = compute_average_precision(
346
352
  np.nan_to_num(recall), np.nan_to_num(precision)
347
353
  )
@@ -395,20 +401,17 @@ class SegmentationRecall(SegmentationMaskMetric):
395
401
  self,
396
402
  annotation_img: np.ndarray,
397
403
  prediction_img: np.ndarray,
398
- annotation: Optional[SegmentationAnnotation],
399
- prediction: Optional[SegmentationPrediction],
404
+ annotation: SegmentationAnnotation,
405
+ prediction: SegmentationPrediction,
400
406
  ) -> ScalarResult:
401
- if annotation is None or prediction is None:
402
- # TODO: Throw error when we wrap each item in try catch
403
- return ScalarResult(0, weight=0)
404
407
 
405
- self.confusion = self._calculate_confusion_matrix(
408
+ confusion = self._calculate_confusion_matrix(
406
409
  annotation, annotation_img, prediction, prediction_img
407
410
  )
408
411
 
409
412
  with np.errstate(divide="ignore", invalid="ignore"):
410
- true_pos = np.diag(self.confusion)
411
- recall = np.nanmean(true_pos / np.sum(self.confusion, axis=1))
413
+ true_pos = np.diag(confusion)
414
+ recall = np.nanmean(true_pos / np.sum(confusion, axis=0))
412
415
  return ScalarResult(value=recall, weight=annotation_img.size) # type: ignore
413
416
 
414
417
  def aggregate_score(self, results: List[MetricResult]) -> ScalarResult:
@@ -491,14 +494,11 @@ class SegmentationMAP(SegmentationMaskMetric):
491
494
  self,
492
495
  annotation_img: np.ndarray,
493
496
  prediction_img: np.ndarray,
494
- annotation: Optional[SegmentationAnnotation],
495
- prediction: Optional[SegmentationPrediction],
497
+ annotation: SegmentationAnnotation,
498
+ prediction: SegmentationPrediction,
496
499
  ) -> ScalarResult:
497
- if annotation is None or prediction is None:
498
- # TODO: Throw error when we wrap each item in try catch
499
- return ScalarResult(0, weight=0)
500
500
 
501
- self.confusion = self._calculate_confusion_matrix(
501
+ confusion = self._calculate_confusion_matrix(
502
502
  annotation, annotation_img, prediction, prediction_img
503
503
  )
504
504
  label_to_index = {a.label: a.index for a in annotation.annotations}
@@ -506,8 +506,8 @@ class SegmentationMAP(SegmentationMaskMetric):
506
506
  ap_per_class = np.ndarray(num_classes) # type: ignore
507
507
  with np.errstate(divide="ignore", invalid="ignore"):
508
508
  for class_idx, (_, index) in enumerate(label_to_index.items()):
509
- true_pos = self.confusion[index, index]
510
- false_pos = self.confusion[:, index].sum()
509
+ true_pos = confusion[index, index]
510
+ false_pos = confusion[:, index].sum()
511
511
  samples = true_pos + false_pos
512
512
  if samples:
513
513
  ap_per_class[class_idx] = true_pos / samples
@@ -600,23 +600,20 @@ class SegmentationFWAVACC(SegmentationMaskMetric):
600
600
  self,
601
601
  annotation_img: np.ndarray,
602
602
  prediction_img: np.ndarray,
603
- annotation: Optional[SegmentationAnnotation],
604
- prediction: Optional[SegmentationPrediction],
603
+ annotation: SegmentationAnnotation,
604
+ prediction: SegmentationPrediction,
605
605
  ) -> ScalarResult:
606
- if annotation is None or prediction is None:
607
- # TODO: Throw error when we wrap each item in try catch
608
- return ScalarResult(0, weight=0)
609
606
 
610
- self.confusion = self._calculate_confusion_matrix(
607
+ confusion = self._calculate_confusion_matrix(
611
608
  annotation, annotation_img, prediction, prediction_img
612
609
  )
613
610
  with np.errstate(divide="ignore", invalid="ignore"):
614
- iu = np.diag(self.confusion) / (
615
- self.confusion.sum(axis=1)
616
- + self.confusion.sum(axis=0)
617
- - np.diag(self.confusion)
611
+ iu = np.diag(confusion) / (
612
+ confusion.sum(axis=1)
613
+ + confusion.sum(axis=0)
614
+ - np.diag(confusion)
618
615
  )
619
- freq = self.confusion.sum(axis=1) / self.confusion.sum()
616
+ freq = confusion.sum(axis=0) / confusion.sum()
620
617
  fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
621
618
  return ScalarResult(value=np.nanmean(fwavacc), weight=1) # type: ignore
622
619
 
@@ -88,17 +88,21 @@ class SegmentationMaskToPolyMetric(Metric):
88
88
  if predictions.segmentation_predictions
89
89
  else None
90
90
  )
91
- pred_img = self.loader.fetch(prediction.mask_url)
92
- pred_value, pred_polys = instance_mask_to_polys(
93
- np.asarray(pred_img)
94
- ) # typing: ignore
95
- code_to_label = {s.index: s.label for s in prediction.annotations}
96
- poly_predictions = transform_poly_codes_to_poly_preds(
97
- prediction.reference_id, pred_value, pred_polys, code_to_label
98
- )
99
- return self.call_poly_metric(
100
- annotations, PredictionList(polygon_predictions=poly_predictions)
101
- )
91
+ if prediction:
92
+ pred_img = self.loader.fetch(prediction.mask_url)
93
+ pred_value, pred_polys = instance_mask_to_polys(
94
+ np.asarray(pred_img)
95
+ ) # typing: ignore
96
+ code_to_label = {s.index: s.label for s in prediction.annotations}
97
+ poly_predictions = transform_poly_codes_to_poly_preds(
98
+ prediction.reference_id, pred_value, pred_polys, code_to_label
99
+ )
100
+ return self.call_poly_metric(
101
+ annotations,
102
+ PredictionList(polygon_predictions=poly_predictions),
103
+ )
104
+ else:
105
+ return ScalarResult(0, weight=0)
102
106
 
103
107
  @abc.abstractmethod
104
108
  def call_poly_metric(
@@ -21,7 +21,7 @@ exclude = '''
21
21
 
22
22
  [tool.poetry]
23
23
  name = "scale-nucleus"
24
- version = "0.12b3"
24
+ version = "0.12b9"
25
25
  description = "The official Python client library for Nucleus, the Data Platform for AI"
26
26
  license = "MIT"
27
27
  authors = ["Scale AI Nucleus Team <nucleusapi@scaleapi.com>"]
@@ -47,7 +47,7 @@ shellingham = "^1.4.0"
47
47
  scikit-learn = ">=0.24.0"
48
48
  Shapely = { version = ">=1.8.0", optional = true }
49
49
  rasterio = "^1.2.10"
50
- Pillow = ">=8.3.1"
50
+ Pillow = ">=7.1.2"
51
51
  s3fs = ">=2022.1.0"
52
52
 
53
53
  [tool.poetry.dev-dependencies]
@@ -16,7 +16,7 @@ package_data = \
16
16
  {'': ['*']}
17
17
 
18
18
  install_requires = \
19
- ['Pillow>=8.3.1',
19
+ ['Pillow>=7.1.2',
20
20
  'aiohttp>=3.7.4,<4.0.0',
21
21
  'click>=7.1.2,<9.0',
22
22
  'nest-asyncio>=1.5.1,<2.0.0',
@@ -40,7 +40,7 @@ entry_points = \
40
40
 
41
41
  setup_kwargs = {
42
42
  'name': 'scale-nucleus',
43
- 'version': '0.12b3',
43
+ 'version': '0.12b9',
44
44
  'description': 'The official Python client library for Nucleus, the Data Platform for AI',
45
45
  'long_description': '# Nucleus\n\nhttps://dashboard.scale.com/nucleus\n\nAggregate metrics in ML are not good enough. To improve production ML, you need to understand their qualitative failure modes, fix them by gathering more data, and curate diverse scenarios.\n\nScale Nucleus helps you:\n\n- Visualize your data\n- Curate interesting slices within your dataset\n- Review and manage annotations\n- Measure and debug your model performance\n\nNucleus is a new way—the right way—to develop ML models, helping us move away from the concept of one dataset and towards a paradigm of collections of scenarios.\n\n## Installation\n\n`$ pip install scale-nucleus`\n\n## CLI installation\n\nWe recommend installing the CLI via `pipx` (https://pypa.github.io/pipx/installation/). This makes sure that\nthe CLI does not interfere with you system packages and is accessible from your favorite terminal.\n\nFor MacOS:\n\n```bash\nbrew install pipx\npipx ensurepath\npipx install scale-nucleus\n# Optional installation of shell completion (for bash, zsh or fish)\nnu install-completions\n```\n\nOtherwise, install via pip (requires pip 19.0 or later):\n\n```bash\npython3 -m pip install --user pipx\npython3 -m pipx ensurepath\npython3 -m pipx install scale-nucleus\n# Optional installation of shell completion (for bash, zsh or fish)\nnu install-completions\n```\n\n## Common issues/FAQ\n\n### Outdated Client\n\nNucleus is iterating rapidly and as a result we do not always perfectly preserve backwards compatibility with older versions of the client. If you run into any unexpected error, it\'s a good idea to upgrade your version of the client by running\n\n```\npip install --upgrade scale-nucleus\n```\n\n## Usage\n\nFor the most up to date documentation, reference: https://dashboard.scale.com/nucleus/docs/api?language=python.\n\n## For Developers\n\nClone from github and install as editable\n\n```\ngit clone git@github.com:scaleapi/nucleus-python-client.git\ncd nucleus-python-client\npip3 install poetry\npoetry install\n```\n\nPlease install the pre-commit hooks by running the following command:\n\n```python\npoetry run pre-commit install\n```\n\nWhen releasing a new version please add release notes to the changelog in `CHANGELOG.md`.\n\n**Best practices for testing:**\n(1). Please run pytest from the root directory of the repo, i.e.\n\n```\npoetry run pytest tests/test_dataset.py\n```\n\n(2) To skip slow integration tests that have to wait for an async job to start.\n\n```\npoetry run pytest -m "not integration"\n```\n\n## Pydantic Models\n\nPrefer using [Pydantic](https://pydantic-docs.helpmanual.io/usage/models/) models rather than creating raw dictionaries\nor dataclasses to send or receive over the wire as JSONs. Pydantic is created with data validation in mind and provides very clear error\nmessages when it encounters a problem with the payload.\n\nThe Pydantic model(s) should mirror the payload to send. To represent a JSON payload that looks like this:\n\n```json\n{\n "example_json_with_info": {\n "metadata": {\n "frame": 0\n },\n "reference_id": "frame0",\n "url": "s3://example/scale_nucleus/2021/lidar/0038711321865000.json",\n "type": "pointcloud"\n },\n "example_image_with_info": {\n "metadata": {\n "author": "Picasso"\n },\n "reference_id": "frame0",\n "url": "s3://bucket/0038711321865000.jpg",\n "type": "image"\n }\n}\n```\n\nCould be represented as the following structure. Note that the field names map to the JSON keys and the usage of field\nvalidators (`@validator`).\n\n```python\nimport os.path\nfrom pydantic import BaseModel, validator\nfrom typing import Literal\n\n\nclass JsonWithInfo(BaseModel):\n metadata: dict # any dict is valid\n reference_id: str\n url: str\n type: Literal["pointcloud", "recipe"]\n\n @validator("url")\n def has_json_extension(cls, v):\n if not v.endswith(".json"):\n raise ValueError(f"Expected \'.json\' extension got {v}")\n return v\n\n\nclass ImageWithInfo(BaseModel):\n metadata: dict # any dict is valid\n reference_id: str\n url: str\n type: Literal["image", "mask"]\n\n @validator("url")\n def has_valid_extension(cls, v):\n valid_extensions = {".jpg", ".jpeg", ".png", ".tiff"}\n _, extension = os.path.splitext(v)\n if extension not in valid_extensions:\n raise ValueError(f"Expected extension in {valid_extensions} got {v}")\n return v\n\n\nclass ExampleNestedModel(BaseModel):\n example_json_with_info: JsonWithInfo\n example_image_with_info: ImageWithInfo\n\n# Usage:\nimport requests\npayload = requests.get("/example")\nparsed_model = ExampleNestedModel.parse_obj(payload.json())\nrequests.post("example/post_to", json=parsed_model.dict())\n```\n\n### Migrating to Pydantic\n\n- When migrating an interface from a dictionary use `nucleus.pydantic_base.DictCompatibleModel`. That allows you to get\n the benefits of Pydantic but maintaints backwards compatibility with a Python dictionary by delegating `__getitem__` to\n fields.\n- When migrating a frozen dataclass use `nucleus.pydantic_base.ImmutableModel`. That is a base class set up to be\n immutable after initialization.\n\n**Updating documentation:**\nWe use [Sphinx](https://www.sphinx-doc.org/en/master/) to autogenerate our API Reference from docstrings.\n\nTo test your local docstring changes, run the following commands from the repository\'s root directory:\n\n```\npoetry shell\ncd docs\nsphinx-autobuild . ./_build/html --watch ../nucleus\n```\n\n`sphinx-autobuild` will spin up a server on localhost (port 8000 by default) that will watch for and automatically rebuild a version of the API reference based on your local docstring changes.\n\n## Custom Metrics using Shapely in scale-validate\n\nCertain metrics use `shapely` which is added as an optional dependency.\n\n```bash\npip install scale-nucleus[metrics]\n```\n\nNote that you might need to install a local GEOS package since Shapely doesn\'t provide binaries bundled with GEOS for every platform.\n\n```bash\n#Mac OS\nbrew install geos\n# Ubuntu/Debian flavors\napt-get install libgeos-dev\n```\n\nTo develop it locally use\n\n`poetry install --extras shapely`\n',
46
46
  'author': 'Scale AI Nucleus Team',