scale-nucleus 0.1.22__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. cli/client.py +14 -0
  2. cli/datasets.py +77 -0
  3. cli/helpers/__init__.py +0 -0
  4. cli/helpers/nucleus_url.py +10 -0
  5. cli/helpers/web_helper.py +40 -0
  6. cli/install_completion.py +33 -0
  7. cli/jobs.py +42 -0
  8. cli/models.py +35 -0
  9. cli/nu.py +42 -0
  10. cli/reference.py +8 -0
  11. cli/slices.py +62 -0
  12. cli/tests.py +121 -0
  13. nucleus/__init__.py +453 -699
  14. nucleus/annotation.py +435 -80
  15. nucleus/autocurate.py +9 -0
  16. nucleus/connection.py +87 -0
  17. nucleus/constants.py +12 -2
  18. nucleus/data_transfer_object/__init__.py +0 -0
  19. nucleus/data_transfer_object/dataset_details.py +9 -0
  20. nucleus/data_transfer_object/dataset_info.py +26 -0
  21. nucleus/data_transfer_object/dataset_size.py +5 -0
  22. nucleus/data_transfer_object/scenes_list.py +18 -0
  23. nucleus/dataset.py +1139 -215
  24. nucleus/dataset_item.py +130 -26
  25. nucleus/dataset_item_uploader.py +297 -0
  26. nucleus/deprecation_warning.py +32 -0
  27. nucleus/errors.py +21 -1
  28. nucleus/job.py +71 -3
  29. nucleus/logger.py +9 -0
  30. nucleus/metadata_manager.py +45 -0
  31. nucleus/metrics/__init__.py +10 -0
  32. nucleus/metrics/base.py +117 -0
  33. nucleus/metrics/categorization_metrics.py +197 -0
  34. nucleus/metrics/errors.py +7 -0
  35. nucleus/metrics/filters.py +40 -0
  36. nucleus/metrics/geometry.py +198 -0
  37. nucleus/metrics/metric_utils.py +28 -0
  38. nucleus/metrics/polygon_metrics.py +480 -0
  39. nucleus/metrics/polygon_utils.py +299 -0
  40. nucleus/model.py +121 -15
  41. nucleus/model_run.py +34 -57
  42. nucleus/payload_constructor.py +30 -18
  43. nucleus/prediction.py +259 -17
  44. nucleus/pydantic_base.py +26 -0
  45. nucleus/retry_strategy.py +4 -0
  46. nucleus/scene.py +204 -19
  47. nucleus/slice.py +230 -67
  48. nucleus/upload_response.py +20 -9
  49. nucleus/url_utils.py +4 -0
  50. nucleus/utils.py +139 -35
  51. nucleus/validate/__init__.py +24 -0
  52. nucleus/validate/client.py +168 -0
  53. nucleus/validate/constants.py +20 -0
  54. nucleus/validate/data_transfer_objects/__init__.py +0 -0
  55. nucleus/validate/data_transfer_objects/eval_function.py +81 -0
  56. nucleus/validate/data_transfer_objects/scenario_test.py +19 -0
  57. nucleus/validate/data_transfer_objects/scenario_test_evaluations.py +11 -0
  58. nucleus/validate/data_transfer_objects/scenario_test_metric.py +12 -0
  59. nucleus/validate/errors.py +6 -0
  60. nucleus/validate/eval_functions/__init__.py +0 -0
  61. nucleus/validate/eval_functions/available_eval_functions.py +212 -0
  62. nucleus/validate/eval_functions/base_eval_function.py +60 -0
  63. nucleus/validate/scenario_test.py +143 -0
  64. nucleus/validate/scenario_test_evaluation.py +114 -0
  65. nucleus/validate/scenario_test_metric.py +14 -0
  66. nucleus/validate/utils.py +8 -0
  67. {scale_nucleus-0.1.22.dist-info → scale_nucleus-0.6.4.dist-info}/LICENSE +0 -0
  68. scale_nucleus-0.6.4.dist-info/METADATA +213 -0
  69. scale_nucleus-0.6.4.dist-info/RECORD +71 -0
  70. {scale_nucleus-0.1.22.dist-info → scale_nucleus-0.6.4.dist-info}/WHEEL +1 -1
  71. scale_nucleus-0.6.4.dist-info/entry_points.txt +3 -0
  72. scale_nucleus-0.1.22.dist-info/METADATA +0 -85
  73. scale_nucleus-0.1.22.dist-info/RECORD +0 -21
@@ -0,0 +1,299 @@
1
+ import sys
2
+ from functools import wraps
3
+ from typing import Dict, List, Tuple, TypeVar
4
+
5
+ import numpy as np
6
+ from scipy.optimize import linear_sum_assignment
7
+
8
+ from nucleus.annotation import BoxAnnotation, PolygonAnnotation
9
+ from nucleus.prediction import BoxPrediction, PolygonPrediction
10
+
11
+ from .base import ScalarResult
12
+ from .errors import PolygonAnnotationTypeError
13
+ from .geometry import GeometryPolygon, polygon_intersection_area
14
+
15
+ BoxOrPolygonPrediction = TypeVar(
16
+ "BoxOrPolygonPrediction", BoxPrediction, PolygonPrediction
17
+ )
18
+ BoxOrPolygonAnnotation = TypeVar(
19
+ "BoxOrPolygonAnnotation", BoxAnnotation, PolygonAnnotation
20
+ )
21
+ BoxOrPolygonAnnoOrPred = TypeVar(
22
+ "BoxOrPolygonAnnoOrPred",
23
+ BoxAnnotation,
24
+ PolygonAnnotation,
25
+ BoxPrediction,
26
+ PolygonPrediction,
27
+ )
28
+
29
+
30
+ def polygon_annotation_to_geometry(
31
+ annotation: BoxOrPolygonAnnotation,
32
+ ) -> GeometryPolygon:
33
+ if isinstance(annotation, BoxAnnotation):
34
+ xmin = annotation.x - annotation.width / 2
35
+ xmax = annotation.x + annotation.width / 2
36
+ ymin = annotation.y - annotation.height / 2
37
+ ymax = annotation.y + annotation.height / 2
38
+ points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
39
+ return GeometryPolygon(points=points, is_rectangle=True)
40
+ elif isinstance(annotation, PolygonAnnotation):
41
+ return GeometryPolygon(
42
+ points=[(point.x, point.y) for point in annotation.vertices],
43
+ is_rectangle=False,
44
+ )
45
+ else:
46
+ raise PolygonAnnotationTypeError()
47
+
48
+
49
+ def _iou(annotation: GeometryPolygon, prediction: GeometryPolygon) -> float:
50
+ intersection = polygon_intersection_area(annotation, prediction)
51
+ union = annotation.area + prediction.area - intersection
52
+ return intersection / max(union, sys.float_info.epsilon)
53
+
54
+
55
+ def _iou_matrix(
56
+ annotations: List[GeometryPolygon], predictions: List[GeometryPolygon]
57
+ ) -> np.ndarray:
58
+ iou_matrix = np.empty((len(predictions), len(annotations)))
59
+ for i, prediction in enumerate(predictions):
60
+ for j, annotation in enumerate(annotations):
61
+ iou_matrix[i, j] = _iou(annotation, prediction)
62
+ return iou_matrix
63
+
64
+
65
+ def _iou_assignments_for_same_reference_id(
66
+ annotations: List[BoxOrPolygonAnnotation],
67
+ predictions: List[BoxOrPolygonPrediction],
68
+ iou_threshold: float,
69
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
70
+ # Matches annotations and precitions of the same reference ID.
71
+ # Returns a tuple of the list of all IoU values of valid assignments, a
72
+ # list of the indices of predictions matched to annotations (-1 if
73
+ # unmatched), and a list of all indices of annotations matched to
74
+ # predictions.
75
+
76
+ # Check that all annotations and predictions have same reference ID.
77
+ reference_ids = set(annotation.reference_id for annotation in annotations)
78
+ reference_ids |= set(prediction.reference_id for prediction in predictions)
79
+ assert (
80
+ len(reference_ids) <= 1
81
+ ), "Expected annotations and predictions to have same reference ID."
82
+
83
+ # Convert annotation and predictions to GeometryPolygon objects
84
+ polygon_annotations = list(
85
+ map(polygon_annotation_to_geometry, annotations)
86
+ )
87
+ polygon_predictions = list(
88
+ map(polygon_annotation_to_geometry, predictions)
89
+ )
90
+
91
+ # Compute IoU matrix and set IoU values below the threshold to 0.
92
+ iou_matrix = _iou_matrix(polygon_annotations, polygon_predictions)
93
+ iou_matrix[iou_matrix < iou_threshold] = 0
94
+
95
+ # Match annotations and predictions using linear sum assignment and filter out
96
+ # values below the threshold.
97
+ matched_0, matched_1 = linear_sum_assignment(-iou_matrix)
98
+ iou_assigns = iou_matrix[matched_0, matched_1]
99
+ valid_idxes = iou_assigns >= iou_threshold
100
+ iou_assigns = iou_assigns[valid_idxes]
101
+
102
+ matched_0 = matched_0[valid_idxes]
103
+ matched_1 = matched_1[valid_idxes]
104
+ anno_to_pred = -np.ones(len(annotations))
105
+ pred_to_anno = -np.ones(len(predictions))
106
+ anno_to_pred[matched_1] = matched_0
107
+ pred_to_anno[matched_0] = matched_1
108
+
109
+ return iou_assigns, anno_to_pred, pred_to_anno
110
+
111
+
112
+ def group_boxes_or_polygons_by_reference_id(
113
+ annotations: List[BoxOrPolygonAnnotation],
114
+ predictions: List[BoxOrPolygonPrediction],
115
+ ) -> Dict[
116
+ str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
117
+ ]:
118
+ """Groups input annotations and predictions by reference_id.
119
+
120
+ Args:
121
+ annotations: list of input annotations
122
+ predictions: list of input predictions
123
+
124
+ Returns:
125
+ Mapping from each reference_id to (annotations, predictions) tuple.
126
+ """
127
+ reference_ids = set(annotation.reference_id for annotation in annotations)
128
+ reference_ids |= set(prediction.reference_id for prediction in predictions)
129
+ grouped: Dict[
130
+ str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
131
+ ] = {reference_id: ([], []) for reference_id in reference_ids}
132
+ for annotation in annotations:
133
+ grouped[annotation.reference_id][0].append(annotation)
134
+ for prediction in predictions:
135
+ grouped[prediction.reference_id][1].append(prediction)
136
+ return grouped
137
+
138
+
139
+ def group_boxes_or_polygons_by_label(
140
+ annotations: List[BoxOrPolygonAnnotation],
141
+ predictions: List[BoxOrPolygonPrediction],
142
+ ) -> Dict[
143
+ str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
144
+ ]:
145
+ """Groups input annotations and predictions by label.
146
+
147
+ Args:
148
+ annotations: list of input box or polygon annotations
149
+ predictions: list of input box or polygon predictions
150
+
151
+ Returns:
152
+ Mapping from each label to (annotations, predictions) tuple
153
+ """
154
+ labels = set(annotation.label for annotation in annotations)
155
+ labels |= set(prediction.label for prediction in predictions)
156
+ grouped: Dict[
157
+ str, Tuple[List[BoxOrPolygonAnnotation], List[BoxOrPolygonPrediction]]
158
+ ] = {label: ([], []) for label in labels}
159
+ for annotation in annotations:
160
+ grouped[annotation.label][0].append(annotation)
161
+ for prediction in predictions:
162
+ grouped[prediction.label][1].append(prediction)
163
+ return grouped
164
+
165
+
166
+ def iou_assignments(
167
+ annotations: List[BoxOrPolygonAnnotation],
168
+ predictions: List[BoxOrPolygonPrediction],
169
+ iou_threshold: float,
170
+ ) -> np.ndarray:
171
+ """Matches annotations and predictions based on linear sum cost and returns the
172
+ intersection-over-union values of the matched annotation-prediction pairs, subject
173
+ to the specified IoU threshold. Note that annotations and predictions from
174
+ different reference_ids will not be matched with one another.
175
+ See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
176
+
177
+ Args:
178
+ annotations: list of box or polygon annotations
179
+ predictions: list of box or polygon predictions
180
+ iou_threshold: the intersection-over-union threshold for an
181
+ annotation-prediction pair to be considered a match.
182
+
183
+ Returns:
184
+ 1D numpy array that contains the IoU values of the matched pairs.
185
+ """
186
+ grouped_inputs = group_boxes_or_polygons_by_reference_id(
187
+ annotations, predictions
188
+ )
189
+ iou_assigns = []
190
+ for grouped_annotations, grouped_predictions in grouped_inputs.values():
191
+ result_per_reference_id, _, _ = _iou_assignments_for_same_reference_id(
192
+ grouped_annotations, grouped_predictions, iou_threshold
193
+ )
194
+ iou_assigns.append(result_per_reference_id)
195
+ return np.concatenate(iou_assigns)
196
+
197
+
198
+ def get_true_false_positives_confidences(
199
+ annotations: List[BoxOrPolygonAnnotation],
200
+ predictions: List[BoxOrPolygonPrediction],
201
+ iou_threshold: float,
202
+ ) -> Tuple[np.ndarray, np.ndarray]:
203
+ """Matches annotations and predictions based on linear sum cost and returns the
204
+ intersection-over-union values of the matched annotation-prediction pairs, subject
205
+ to the specified IoU threshold. Note that annotations and predictions from
206
+ different reference_ids will not be matched with one another.
207
+ See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
208
+
209
+ Args:
210
+ annotations: list of box or polygon annotations
211
+ predictions: list of box or polygon predictions
212
+ iou_threshold: the intersection-over-union threshold for an
213
+ annotation-prediction pair to be considered a match.
214
+
215
+ Returns:
216
+ 1D numpy array that contains the 1 if true positive and 0 if false positive
217
+ for each prediction.
218
+ 1D numpy array of confidence values.
219
+ """
220
+ grouped_inputs = group_boxes_or_polygons_by_reference_id(
221
+ annotations, predictions
222
+ )
223
+ true_false_positives = []
224
+ confidences = []
225
+ for grouped_annotations, grouped_predictions in grouped_inputs.values():
226
+ _, _, pred_to_anno = _iou_assignments_for_same_reference_id(
227
+ grouped_annotations, grouped_predictions, iou_threshold
228
+ )
229
+ true_false_positives.append(pred_to_anno > -1)
230
+ confidences.extend([pred.confidence for pred in grouped_predictions])
231
+ return np.concatenate(true_false_positives), np.array(confidences)
232
+
233
+
234
+ def num_true_positives(
235
+ annotations: List[BoxOrPolygonAnnotation],
236
+ predictions: List[BoxOrPolygonPrediction],
237
+ iou_threshold: float,
238
+ ) -> int:
239
+ """Counts the number of annotations with a matching prediction.
240
+
241
+ A prediction is considered a match for an annotation if it has not yet been
242
+ matched to another annotation, its reference_id is the same as the
243
+ annotation, and its IoU with the annotation is at least the iou_threshold.
244
+
245
+ Args:
246
+ annotations: list of box or polygon annotations
247
+ predictions: list of box or polygon predictions
248
+ iou_threshold: the intersection-over-union threshold for an
249
+ annotation-prediction pair to be considered a match.
250
+
251
+ Returns:
252
+ The number of true positives (predictions that are matched to annotations).
253
+ """
254
+ iou_assigns = iou_assignments(annotations, predictions, iou_threshold)
255
+ true_positives = len(iou_assigns)
256
+ return true_positives
257
+
258
+
259
+ def label_match_wrapper(metric_fn):
260
+ """Decorator to add the ability to only apply metric to annotations and
261
+ predictions with matching labels.
262
+
263
+ Args:
264
+ metric_fn: Metric function that takes a list of annotations, a list
265
+ of predictions, and optional args and kwargs.
266
+
267
+ Returns:
268
+ Metric function which can optionally enforce matching labels.
269
+ """
270
+
271
+ @wraps(metric_fn)
272
+ def wrapper(
273
+ annotations: List[BoxOrPolygonAnnotation],
274
+ predictions: List[BoxOrPolygonPrediction],
275
+ *args,
276
+ enforce_label_match: bool = False,
277
+ **kwargs,
278
+ ) -> ScalarResult:
279
+ # Simply return the metric if we are not enforcing label matches.
280
+ if not enforce_label_match:
281
+ return metric_fn(annotations, predictions, *args, **kwargs)
282
+
283
+ # For each bin of annotations/predictions, compute the metric applied
284
+ # only to that bin. Then aggregate results across all bins.
285
+ grouped_inputs = group_boxes_or_polygons_by_label(
286
+ annotations, predictions
287
+ )
288
+ metric_results = []
289
+ for binned_annotations, binned_predictions in grouped_inputs.values():
290
+ metric_result = metric_fn(
291
+ binned_annotations, binned_predictions, *args, **kwargs
292
+ )
293
+ metric_results.append(metric_result)
294
+ assert all(
295
+ isinstance(r, ScalarResult) for r in metric_results
296
+ ), "Expected every result to be a ScalarResult"
297
+ return ScalarResult.aggregate(metric_results)
298
+
299
+ return wrapper
nucleus/model.py CHANGED
@@ -1,28 +1,98 @@
1
- from typing import List, Optional, Dict, Union
1
+ from typing import Dict, List, Optional, Union
2
+
3
+ import requests
4
+
5
+ from .constants import METADATA_KEY, NAME_KEY, REFERENCE_ID_KEY
2
6
  from .dataset import Dataset
7
+ from .job import AsyncJob
8
+ from .model_run import ModelRun
3
9
  from .prediction import (
4
10
  BoxPrediction,
5
11
  CuboidPrediction,
6
12
  PolygonPrediction,
7
13
  SegmentationPrediction,
8
14
  )
9
- from .model_run import ModelRun
10
- from .constants import (
11
- NAME_KEY,
12
- REFERENCE_ID_KEY,
13
- METADATA_KEY,
14
- )
15
15
 
16
16
 
17
17
  class Model:
18
- def __init__(
19
- self,
20
- model_id: str,
21
- name: str,
22
- reference_id: str,
23
- metadata: Optional[Dict],
24
- client,
25
- ):
18
+ """A model that can be used to upload predictions to a dataset.
19
+
20
+ By uploading model predictions to Nucleus, you can compare your predictions
21
+ to ground truth annotations and discover problems with your Models or
22
+ :class:`Dataset`.
23
+
24
+ You can also upload predictions for unannotated images, letting you query
25
+ them based on model predictions. This can help you prioritize which
26
+ unlabeled data to label next.
27
+
28
+ Within Nucleus, Models work in the following way:
29
+
30
+ 1. You first :meth:`create a Model<NucleusClient.add_model>`. You can do this
31
+ just once and reuse the model on multiple datasets.
32
+ 2. You then :meth:`upload predictions <Dataset.upload_predictions>` to a dataset.
33
+ 3. Trigger :meth:`calculation of metrics <Dataset.calculate_evaluation_metrics>`
34
+ in order to view model debugging insights.
35
+
36
+ The above steps above will allow you to visualize model performance within
37
+ Nucleus, or compare multiple models that have been run on the same Dataset.
38
+
39
+ Note that you can always add more predictions to a dataset, but then you
40
+ will need to re-run the calculation of metrics in order to have them be
41
+ correct.
42
+
43
+ ::
44
+
45
+ import nucleus
46
+
47
+ client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
48
+ dataset = client.get_dataset(YOUR_DATASET_ID)
49
+
50
+ prediction_1 = nucleus.BoxPrediction(
51
+ label="label",
52
+ x=0,
53
+ y=0,
54
+ width=10,
55
+ height=10,
56
+ reference_id="1",
57
+ confidence=0.9,
58
+ class_pdf={"label": 0.9, "other_label": 0.1},
59
+ )
60
+ prediction_2 = nucleus.BoxPrediction(
61
+ label="label",
62
+ x=0,
63
+ y=0,
64
+ width=10,
65
+ height=10,
66
+ reference_id="2",
67
+ confidence=0.2,
68
+ class_pdf={"label": 0.2, "other_label": 0.8},
69
+ )
70
+
71
+ model = client.add_model(
72
+ name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"}
73
+ )
74
+
75
+ # For small ingestions, we recommend synchronous ingestion
76
+ response = dataset.upload_predictions(model, [prediction_1, prediction_2])
77
+
78
+ # For large ingestions, we recommend asynchronous ingestion
79
+ job = dataset.upload_predictions(
80
+ [prediction_1, prediction_2], asynchronous=True
81
+ )
82
+ # Check current status
83
+ job.status()
84
+ # Sleep until ingestion is done
85
+ job.sleep_until_complete()
86
+ # Check errors
87
+ job.errors()
88
+
89
+ dataset.calculate_evaluation_metrics(model)
90
+
91
+ Models cannot be instantiated directly and instead must be created via API
92
+ endpoint, using :meth:`NucleusClient.add_model`.
93
+ """
94
+
95
+ def __init__(self, model_id, name, reference_id, metadata, client):
26
96
  self.id = model_id
27
97
  self.name = name
28
98
  self.reference_id = reference_id
@@ -45,6 +115,7 @@ class Model:
45
115
 
46
116
  @classmethod
47
117
  def from_json(cls, payload: dict, client):
118
+ """Instantiates model object from schematized JSON dict payload."""
48
119
  return cls(
49
120
  model_id=payload["id"],
50
121
  name=payload["name"],
@@ -68,6 +139,16 @@ class Model:
68
139
  metadata: Optional[Dict] = None,
69
140
  asynchronous: bool = False,
70
141
  ) -> ModelRun:
142
+ # This method, as well as model runs in general are now deprecated.
143
+
144
+ # Instead models will automatically generate a model run when applied to
145
+ # a dataset using dataset.upload_predictions(model, predictions). Therefore
146
+ # there is no longer any need to create a model run, since you can upload
147
+ # predictions without needing to explicitly create a model run.
148
+
149
+ # When uploading to a dataset twice using the same model, the same model
150
+ # run will be reused by Nucleus.
151
+
71
152
  payload: dict = {
72
153
  NAME_KEY: name,
73
154
  REFERENCE_ID_KEY: self.reference_id,
@@ -81,3 +162,28 @@ class Model:
81
162
  model_run.predict(predictions, asynchronous=asynchronous)
82
163
 
83
164
  return model_run
165
+
166
+ def evaluate(self, scenario_test_names: List[str]) -> AsyncJob:
167
+ """Evaluates this on the specified Unit Tests. ::
168
+
169
+ import nucleus
170
+ client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
171
+ model = client.list_models()[0]
172
+ scenario_test = client.validate.create_scenario_test(
173
+ "sample_scenario_test", "YOUR_SLICE_ID"
174
+ )
175
+
176
+ model.evaluate(["sample_scenario_test"])
177
+
178
+ Args:
179
+ scenario_test_names: list of unit tests to evaluate
180
+
181
+ Returns:
182
+ AsyncJob object of evaluation job
183
+ """
184
+ response = self._client.make_request(
185
+ {"test_names": scenario_test_names},
186
+ f"validate/{self.id}/evaluate",
187
+ requests_command=requests.post,
188
+ )
189
+ return AsyncJob.from_json(response, self._client)
nucleus/model_run.py CHANGED
@@ -1,17 +1,33 @@
1
- from typing import Dict, List, Optional, Type, Union
1
+ """
2
+ Model Runs are deprecated and will be removed in a future version of the python client.
3
+ It is now possible to upload model predictions without a need for creating a model run
4
+
5
+ For example::
6
+
7
+ import nucleus
8
+ client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
9
+ prediction_1 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="1", confidence=0.9, class_pdf={'label': 0.9, 'other_label': 0.1})
10
+ prediction_2 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="2", confidence=0.2, class_pdf={'label': 0.2, 'other_label': 0.8})
11
+ model = client.add_model(name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"})
12
+ response = dataset.upload_predictions(model, [prediction_1, prediction_2])
13
+ """
14
+
15
+
16
+ from typing import List, Optional, Union
17
+
2
18
  import requests
19
+
3
20
  from nucleus.annotation import check_all_mask_paths_remote
4
21
  from nucleus.job import AsyncJob
5
- from nucleus.utils import serialize_and_write_to_presigned_url
22
+ from nucleus.utils import (
23
+ format_prediction_response,
24
+ serialize_and_write_to_presigned_url,
25
+ )
6
26
 
7
27
  from .constants import (
8
28
  ANNOTATIONS_KEY,
9
- BOX_TYPE,
10
- CUBOID_TYPE,
11
29
  DEFAULT_ANNOTATION_UPDATE_MODE,
12
- POLYGON_TYPE,
13
30
  REQUEST_ID_KEY,
14
- SEGMENTATION_TYPE,
15
31
  UPDATE_KEY,
16
32
  )
17
33
  from .prediction import (
@@ -25,8 +41,7 @@ from .prediction import (
25
41
 
26
42
  class ModelRun:
27
43
  """
28
- Model runs represent detections of a specific model on your dataset.
29
- Having an open model run is a prerequisite for uploading predictions to your dataset.
44
+ This class is deprecated and will be removed from the python client.
30
45
  """
31
46
 
32
47
  def __init__(self, model_run_id: str, dataset_id: str, client):
@@ -124,7 +139,11 @@ class ModelRun:
124
139
  )
125
140
  return AsyncJob.from_json(response, self._client)
126
141
  else:
127
- return self._client.predict(self.model_run_id, annotations, update)
142
+ return self._client.predict(
143
+ model_run_id=self.model_run_id,
144
+ annotations=annotations,
145
+ update=update,
146
+ )
128
147
 
129
148
  def iloc(self, i: int):
130
149
  """
@@ -134,7 +153,7 @@ class ModelRun:
134
153
  }
135
154
  """
136
155
  response = self._client.predictions_iloc(self.model_run_id, i)
137
- return self._format_prediction_response(response)
156
+ return format_prediction_response(response)
138
157
 
139
158
  def refloc(self, reference_id: str):
140
159
  """
@@ -142,10 +161,10 @@ class ModelRun:
142
161
  :param reference_id: reference_id of a dataset item.
143
162
  :return: List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction, SegmentationPrediction]],
144
163
  """
145
- response = self._client.predictions_ref_id(
146
- self.model_run_id, reference_id
164
+ response = self._client.get(
165
+ f"modelRun/{self.model_run_id}/refloc/{reference_id}"
147
166
  )
148
- return self._format_prediction_response(response)
167
+ return format_prediction_response(response)
149
168
 
150
169
  def loc(self, dataset_item_id: str):
151
170
  """
@@ -159,7 +178,7 @@ class ModelRun:
159
178
  response = self._client.predictions_loc(
160
179
  self.model_run_id, dataset_item_id
161
180
  )
162
- return self._format_prediction_response(response)
181
+ return format_prediction_response(response)
163
182
 
164
183
  def prediction_loc(self, reference_id: str, annotation_id: str):
165
184
  """
@@ -184,46 +203,4 @@ class ModelRun:
184
203
  route=f"modelRun/{self.model_run_id}/ungrouped",
185
204
  requests_command=requests.get,
186
205
  )
187
- return self._format_prediction_response(
188
- {ANNOTATIONS_KEY: json_response}
189
- )
190
-
191
- def _format_prediction_response(
192
- self, response: dict
193
- ) -> Union[
194
- dict,
195
- List[
196
- Union[
197
- BoxPrediction,
198
- PolygonPrediction,
199
- CuboidPrediction,
200
- SegmentationPrediction,
201
- ]
202
- ],
203
- ]:
204
- annotation_payload = response.get(ANNOTATIONS_KEY, None)
205
- if not annotation_payload:
206
- # An error occurred
207
- return response
208
- annotation_response = {}
209
- type_key_to_class: Dict[
210
- str,
211
- Union[
212
- Type[BoxPrediction],
213
- Type[PolygonPrediction],
214
- Type[CuboidPrediction],
215
- Type[SegmentationPrediction],
216
- ],
217
- ] = {
218
- BOX_TYPE: BoxPrediction,
219
- POLYGON_TYPE: PolygonPrediction,
220
- CUBOID_TYPE: CuboidPrediction,
221
- SEGMENTATION_TYPE: SegmentationPrediction,
222
- }
223
- for type_key in annotation_payload:
224
- type_class = type_key_to_class[type_key]
225
- annotation_response[type_key] = [
226
- type_class.from_json(annotation)
227
- for annotation in annotation_payload[type_key]
228
- ]
229
- return annotation_response
206
+ return format_prediction_response({ANNOTATIONS_KEY: json_response})
@@ -1,34 +1,38 @@
1
- from typing import List, Optional, Dict, Union
2
- from .dataset_item import DatasetItem
3
- from .scene import LidarScene
1
+ from typing import Dict, List, Optional, Union
2
+
4
3
  from .annotation import (
5
4
  BoxAnnotation,
5
+ CategoryAnnotation,
6
6
  CuboidAnnotation,
7
+ MultiCategoryAnnotation,
7
8
  PolygonAnnotation,
8
9
  SegmentationAnnotation,
9
10
  )
10
- from .prediction import (
11
- BoxPrediction,
12
- CuboidPrediction,
13
- PolygonPrediction,
14
- SegmentationPrediction,
15
- )
16
11
  from .constants import (
12
+ ANNOTATION_METADATA_SCHEMA_KEY,
17
13
  ANNOTATION_UPDATE_KEY,
18
- NAME_KEY,
19
- METADATA_KEY,
20
- REFERENCE_ID_KEY,
21
14
  ANNOTATIONS_KEY,
22
15
  ITEMS_KEY,
23
- SCENES_KEY,
24
- UPDATE_KEY,
16
+ LABELS_KEY,
17
+ METADATA_KEY,
25
18
  MODEL_ID_KEY,
26
- ANNOTATION_METADATA_SCHEMA_KEY,
19
+ NAME_KEY,
20
+ REFERENCE_ID_KEY,
21
+ SCENES_KEY,
27
22
  SEGMENTATIONS_KEY,
28
23
  TAXONOMY_NAME_KEY,
29
24
  TYPE_KEY,
30
- LABELS_KEY,
25
+ UPDATE_KEY,
26
+ )
27
+ from .dataset_item import DatasetItem
28
+ from .prediction import (
29
+ BoxPrediction,
30
+ CategoryPrediction,
31
+ CuboidPrediction,
32
+ PolygonPrediction,
33
+ SegmentationPrediction,
31
34
  )
35
+ from .scene import LidarScene
32
36
 
33
37
 
34
38
  def construct_append_payload(
@@ -60,6 +64,8 @@ def construct_annotation_payload(
60
64
  BoxAnnotation,
61
65
  PolygonAnnotation,
62
66
  CuboidAnnotation,
67
+ CategoryAnnotation,
68
+ MultiCategoryAnnotation,
63
69
  SegmentationAnnotation,
64
70
  ]
65
71
  ],
@@ -87,7 +93,12 @@ def construct_segmentation_payload(
87
93
 
88
94
  def construct_box_predictions_payload(
89
95
  box_predictions: List[
90
- Union[BoxPrediction, PolygonPrediction, CuboidPrediction]
96
+ Union[
97
+ BoxPrediction,
98
+ PolygonPrediction,
99
+ CuboidPrediction,
100
+ CategoryPrediction,
101
+ ]
91
102
  ],
92
103
  update: bool,
93
104
  ) -> dict:
@@ -132,10 +143,11 @@ def construct_model_run_creation_payload(
132
143
 
133
144
 
134
145
  def construct_taxonomy_payload(
135
- taxonomy_name: str, taxonomy_type: str, labels: List[str]
146
+ taxonomy_name: str, taxonomy_type: str, labels: List[str], update: bool
136
147
  ) -> dict:
137
148
  return {
138
149
  TAXONOMY_NAME_KEY: taxonomy_name,
139
150
  TYPE_KEY: taxonomy_type,
140
151
  LABELS_KEY: labels,
152
+ UPDATE_KEY: update,
141
153
  }