scale-nucleus 0.14.7__py3-none-any.whl → 0.14.14b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nucleus/dataset_item.py CHANGED
@@ -139,11 +139,21 @@ class DatasetItem: # pylint: disable=R0902
139
139
  image_url = payload.get(IMAGE_URL_KEY, None) or payload.get(
140
140
  ORIGINAL_IMAGE_URL_KEY, None
141
141
  )
142
+ pointcloud_url = payload.get(POINTCLOUD_URL_KEY, None)
143
+
144
+ # handle case when re-converting Scene.from_json
145
+ url = payload.get(URL_KEY, None)
146
+ if url and not image_url and not pointcloud_url:
147
+ if url.split(".")[-1] in ("jpg", "png"):
148
+ image_url = url
149
+ elif url.split(".")[-1] in ("json",):
150
+ pointcloud_url = url
151
+
142
152
  if BACKEND_REFERENCE_ID_KEY in payload:
143
153
  payload[REFERENCE_ID_KEY] = payload[BACKEND_REFERENCE_ID_KEY]
144
154
  return cls(
145
155
  image_location=image_url,
146
- pointcloud_location=payload.get(POINTCLOUD_URL_KEY, None),
156
+ pointcloud_location=pointcloud_url,
147
157
  reference_id=payload.get(REFERENCE_ID_KEY, None),
148
158
  metadata=payload.get(METADATA_KEY, {}),
149
159
  upload_to_scale=payload.get(UPLOAD_TO_SCALE_KEY, True),
nucleus/job.py CHANGED
@@ -80,7 +80,7 @@ class AsyncJob:
80
80
  ::
81
81
 
82
82
  [
83
- '{"annotation":{"label":"car","type":"box","geometry":{"x":50,"y":60,"width":70,"height":80},"referenceId":"bad_ref_id","annotationId":"attempted_annot_upload","metadata":{}},"error":"Item with id bad_ref_id doesn\'t exist."}'
83
+ '{"annotation":{"label":"car","type":"box","geometry":{"x":50,"y":60,"width":70,"height":80},"referenceId":"bad_ref_id","annotationId":"attempted_annot_upload","metadata":{}},"error":"Item with id bad_ref_id does not exist."}'
84
84
  ]
85
85
  """
86
86
  errors = self.client.make_request(
@@ -2,8 +2,6 @@ from abc import abstractmethod
2
2
  from dataclasses import dataclass
3
3
  from typing import List, Optional, Set, Tuple, Union
4
4
 
5
- from sklearn.metrics import f1_score
6
-
7
5
  from nucleus.annotation import AnnotationList, CategoryAnnotation
8
6
  from nucleus.metrics.base import Metric, MetricResult, ScalarResult
9
7
  from nucleus.metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
@@ -35,6 +33,9 @@ class CategorizationResult(MetricResult):
35
33
 
36
34
  @property
37
35
  def value(self):
36
+ # late import to avoid slow CLI init
37
+ from sklearn.metrics import f1_score
38
+
38
39
  annotation_labels = to_taxonomy_labels(self.annotations)
39
40
  prediction_labels = to_taxonomy_labels(self.predictions)
40
41
 
@@ -245,6 +246,9 @@ class CategorizationF1(CategorizationMetric):
245
246
  )
246
247
 
247
248
  def aggregate_score(self, results: List[CategorizationResult]) -> ScalarResult: # type: ignore[override]
249
+ # late import to avoid slow CLI init
250
+ from sklearn.metrics import f1_score
251
+
248
252
  gt = []
249
253
  predicted = []
250
254
  for result in results:
@@ -1,27 +1,23 @@
1
+ import abc
1
2
  from typing import Dict
2
3
 
3
4
  import numpy as np
4
- from PIL import Image
5
5
 
6
- try:
7
- import fsspec
8
- except (ModuleNotFoundError, OSError):
9
- from ..package_not_installed import PackageNotInstalled
10
6
 
11
- fsspec = PackageNotInstalled
7
+ class SegmentationMaskLoader(abc.ABC):
8
+ @abc.abstractmethod
9
+ def fetch(self, url: str) -> np.ndarray:
10
+ pass
12
11
 
13
12
 
14
- class SegmentationMaskLoader:
15
- def __init__(self, fs: fsspec):
16
- self.fs = fs
17
-
18
- def fetch(self, url: str):
19
- with self.fs.open(url) as fh:
20
- img = Image.open(fh)
21
- return np.asarray(img)
13
+ class DummyLoader(SegmentationMaskLoader):
14
+ def fetch(self, url: str) -> np.ndarray:
15
+ raise NotImplementedError(
16
+ "This dummy loader has to be replaced with an actual implementation of an image loader"
17
+ )
22
18
 
23
19
 
24
- class InMemoryLoader:
20
+ class InMemoryLoader(SegmentationMaskLoader):
25
21
  """We use this loader in the tests, this allows us to serve images from memory instead of fetching
26
22
  from a filesystem.
27
23
  """
@@ -9,7 +9,7 @@ from nucleus.metrics.filtering import ListOfAndFilters, ListOfOrAndFilters
9
9
  from nucleus.prediction import PredictionList, SegmentationPrediction
10
10
 
11
11
  from .base import Metric, ScalarResult
12
- from .segmentation_loader import SegmentationMaskLoader
12
+ from .segmentation_loader import DummyLoader, SegmentationMaskLoader
13
13
  from .segmentation_utils import (
14
14
  FALSE_POSITIVES,
15
15
  convert_to_instance_seg_confusion,
@@ -18,14 +18,6 @@ from .segmentation_utils import (
18
18
  setup_iou_thresholds,
19
19
  )
20
20
 
21
- try:
22
- from s3fs import S3FileSystem
23
- except (ModuleNotFoundError, OSError):
24
- from ..package_not_installed import PackageNotInstalled
25
-
26
- S3FileSystem = PackageNotInstalled
27
-
28
-
29
21
  # pylint: disable=useless-super-delegation
30
22
 
31
23
 
@@ -64,7 +56,7 @@ class SegmentationMaskMetric(Metric):
64
56
  """
65
57
  # TODO -> add custom filtering to Segmentation(Annotation|Prediction).annotations.(metadata|label)
66
58
  super().__init__(annotation_filters, prediction_filters)
67
- self.loader = SegmentationMaskLoader(S3FileSystem(anon=False))
59
+ self.loader: SegmentationMaskLoader = DummyLoader()
68
60
  self.iou_threshold = iou_threshold
69
61
 
70
62
  def call_metric(
@@ -219,25 +211,21 @@ class SegmentationMaskMetric(Metric):
219
211
 
220
212
  def _filter_confusion_matrix(self, confusion, annotation, prediction):
221
213
  if self.annotation_filters or self.prediction_filters:
214
+ new_confusion = np.zeros_like(confusion)
222
215
  # we mask the confusion matrix instead of the images
223
216
  if self.annotation_filters:
224
217
  annotation_indexes = {
225
218
  segment.index for segment in annotation.annotations
226
219
  }
227
- indexes_to_remove = (
228
- set(range(confusion.shape[0] - 1)) - annotation_indexes
229
- )
230
- for row in indexes_to_remove:
231
- confusion[row, :] = 0
220
+ for row in annotation_indexes:
221
+ new_confusion[row, :] = confusion[row, :]
232
222
  if self.prediction_filters:
233
223
  prediction_indexes = {
234
224
  segment.index for segment in prediction.annotations
235
225
  }
236
- indexes_to_remove = (
237
- set(range(confusion.shape[0] - 1)) - prediction_indexes
238
- )
239
- for col in indexes_to_remove:
240
- confusion[:, col] = 0
226
+ for col in prediction_indexes:
227
+ new_confusion[:, col] = confusion[:, col]
228
+ confusion = new_confusion
241
229
  return confusion
242
230
 
243
231
 
@@ -20,21 +20,6 @@ from nucleus.metrics.segmentation_utils import (
20
20
  )
21
21
  from nucleus.prediction import PredictionList
22
22
 
23
- from .segmentation_loader import InMemoryLoader, SegmentationMaskLoader
24
- from .segmentation_metrics import (
25
- SegmentationIOU,
26
- SegmentationMAP,
27
- SegmentationPrecision,
28
- SegmentationRecall,
29
- )
30
-
31
- try:
32
- from s3fs import S3FileSystem
33
- except (ModuleNotFoundError, OSError):
34
- from ..package_not_installed import PackageNotInstalled
35
-
36
- S3FileSystem = PackageNotInstalled
37
-
38
23
  from .base import Metric, ScalarResult
39
24
  from .polygon_metrics import (
40
25
  PolygonAveragePrecision,
@@ -42,6 +27,17 @@ from .polygon_metrics import (
42
27
  PolygonPrecision,
43
28
  PolygonRecall,
44
29
  )
30
+ from .segmentation_loader import (
31
+ DummyLoader,
32
+ InMemoryLoader,
33
+ SegmentationMaskLoader,
34
+ )
35
+ from .segmentation_metrics import (
36
+ SegmentationIOU,
37
+ SegmentationMAP,
38
+ SegmentationPrecision,
39
+ SegmentationRecall,
40
+ )
45
41
 
46
42
 
47
43
  class SegToPolyMode(str, Enum):
@@ -93,7 +89,7 @@ class SegmentationMaskToPolyMetric(Metric):
93
89
  self.enforce_label_match = enforce_label_match
94
90
  assert 0 <= confidence_threshold <= 1
95
91
  self.confidence_threshold = confidence_threshold
96
- self.loader = SegmentationMaskLoader(S3FileSystem(anon=False))
92
+ self.loader: SegmentationMaskLoader = DummyLoader()
97
93
  self.mode = mode
98
94
 
99
95
  def call_metric(
@@ -166,7 +166,7 @@ def non_max_suppress_confusion(confusion: np.ndarray, iou_threshold):
166
166
 
167
167
 
168
168
  def rasterize_polygons_to_segmentation_mask(
169
- annotations: Sequence[BoxOrPolygonAnnotation], shape: Tuple[int, int]
169
+ annotations: Sequence[BoxOrPolygonAnnotation], shape: Tuple
170
170
  ) -> Tuple[np.ndarray, List[Segment]]:
171
171
  polys = [polygon_annotation_to_shape(a) for a in annotations]
172
172
  segments = [
nucleus/model.py CHANGED
@@ -27,7 +27,7 @@ class Model:
27
27
 
28
28
  Within Nucleus, Models work in the following way:
29
29
 
30
- 1. You first :meth:`create a Model<NucleusClient.add_model>`. You can do this
30
+ 1. You first :meth:`create a Model<NucleusClient.create_model>`. You can do this
31
31
  just once and reuse the model on multiple datasets.
32
32
  2. You then :meth:`upload predictions <Dataset.upload_predictions>` to a dataset.
33
33
  3. Trigger :meth:`calculation of metrics <Dataset.calculate_evaluation_metrics>`
@@ -68,7 +68,7 @@ class Model:
68
68
  class_pdf={"label": 0.2, "other_label": 0.8},
69
69
  )
70
70
 
71
- model = client.add_model(
71
+ model = client.create_model(
72
72
  name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"}
73
73
  )
74
74
 
@@ -89,7 +89,7 @@ class Model:
89
89
  dataset.calculate_evaluation_metrics(model)
90
90
 
91
91
  Models cannot be instantiated directly and instead must be created via API
92
- endpoint, using :meth:`NucleusClient.add_model`.
92
+ endpoint, using :meth:`NucleusClient.create_model`.
93
93
  """
94
94
 
95
95
  def __init__(
@@ -100,7 +100,7 @@ class Model:
100
100
  metadata,
101
101
  client,
102
102
  bundle_name=None,
103
- tags: List[str] = None,
103
+ tags=None,
104
104
  ):
105
105
  self.id = model_id
106
106
  self.name = name
@@ -111,7 +111,7 @@ class Model:
111
111
  self._client = client
112
112
 
113
113
  def __repr__(self):
114
- return f"Model(model_id='{self.id}', name='{self.name}', reference_id='{self.reference_id}', metadata={self.metadata}, bundle_name={self.bundle_name}, client={self._client})"
114
+ return f"Model(model_id='{self.id}', name='{self.name}', reference_id='{self.reference_id}', metadata={self.metadata}, bundle_name={self.bundle_name}, tags={self.tags}, client={self._client})"
115
115
 
116
116
  def __eq__(self, other):
117
117
  return (
@@ -210,9 +210,9 @@ class Model:
210
210
  model.run("ds_123456")
211
211
 
212
212
  Args:
213
- dataset_id: id of dataset to run inference on
214
- job_id: nucleus job used to track async job progress
215
- slice_id: (optional) id of slice of the dataset to run inference on
213
+ dataset_id: The ID of the dataset to run inference on.
214
+ job_id: The ID of the :class:`AsyncJob` used to track job progress.
215
+ slice_id: The ID of the slice of the dataset to run inference on.
216
216
  """
217
217
  response = self._client.make_request(
218
218
  {"dataset_id": dataset_id, "slice_id": slice_id},
@@ -234,16 +234,17 @@ class Model:
234
234
  Args:
235
235
  tags: list of tag names
236
236
  """
237
- response = self._client.make_request(
237
+ response: requests.Response = self._client.make_request(
238
238
  {MODEL_TAGS_KEY: tags},
239
239
  f"model/{self.id}/tag",
240
240
  requests_command=requests.post,
241
+ return_raw_response=True,
241
242
  )
242
243
 
243
- if response.get("msg", False):
244
+ if response.ok:
244
245
  self.tags.extend(tags)
245
246
 
246
- return response
247
+ return response.json()
247
248
 
248
249
  def remove_tags(self, tags: List[str]):
249
250
  """Remove tag(s) from the model. ::
@@ -257,13 +258,14 @@ class Model:
257
258
  Args:
258
259
  tags: list of tag names to remove
259
260
  """
260
- response = self._client.make_request(
261
+ response: requests.Response = self._client.make_request(
261
262
  {MODEL_TAGS_KEY: tags},
262
263
  f"model/{self.id}/tag",
263
264
  requests_command=requests.delete,
265
+ return_raw_response=True,
264
266
  )
265
267
 
266
- if response.get("msg", False):
268
+ if response.ok:
267
269
  self.tags = list(filter(lambda t: t not in tags, self.tags))
268
270
 
269
- return response
271
+ return response.json()
nucleus/model_run.py CHANGED
@@ -8,7 +8,7 @@ For example::
8
8
  client = nucleus.NucleusClient(YOUR_SCALE_API_KEY)
9
9
  prediction_1 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="1", confidence=0.9, class_pdf={'label': 0.9, 'other_label': 0.1})
10
10
  prediction_2 = nucleus.BoxPrediction(label="label", x=0, y=0, width=10, height=10, reference_id="2", confidence=0.2, class_pdf={'label': 0.2, 'other_label': 0.8})
11
- model = client.add_model(name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"})
11
+ model = client.create_model(name="My Model", reference_id="My-CNN", metadata={"timestamp": "121012401"})
12
12
  response = dataset.upload_predictions(model, [prediction_1, prediction_2])
13
13
  """
14
14
 
nucleus/prediction.py CHANGED
@@ -75,7 +75,7 @@ class SegmentationPrediction(SegmentationAnnotation):
75
75
  from nucleus import SegmentationPrediction
76
76
 
77
77
  segmentation = SegmentationPrediction(
78
- mask_url="s3://your-bucket-name/pred-seg-masks/image_2_pred_mask_id1.png",
78
+ mask_url="s3://your-bucket-name/pred-seg-masks/image_2_pred_mask_id_1.png",
79
79
  annotations=[
80
80
  Segment(label="grass", index="1"),
81
81
  Segment(label="road", index="2"),
@@ -88,7 +88,7 @@ class SegmentationPrediction(SegmentationAnnotation):
88
88
 
89
89
  Parameters:
90
90
  mask_url (str): A URL pointing to the segmentation prediction mask which is
91
- accessible to Scale. This URL can be a path to a local file.
91
+ accessible to Scale. This "URL" can also be a path to a local file.
92
92
  The mask is an HxW int8 array saved in PNG format,
93
93
  with each pixel value ranging from [0, N), where N is the number of
94
94
  possible classes (for semantic segmentation) or instances (for instance
@@ -226,7 +226,7 @@ class LinePrediction(LineAnnotation):
226
226
 
227
227
  Parameters:
228
228
  label (str): The label for this prediction (e.g. car, pedestrian, bicycle).
229
- vertices List[:class:`Point`]: The list of points making up the line.
229
+ vertices (List[:class:`Point`]): The list of points making up the line.
230
230
  reference_id (str): User-defined ID of the image to which to apply this
231
231
  annotation.
232
232
  confidence: 0-1 indicating the confidence of the prediction.
nucleus/slice.py CHANGED
@@ -1,17 +1,17 @@
1
+ import datetime
1
2
  import warnings
2
- from typing import Dict, Iterable, List, Set, Tuple, Union
3
+ from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
3
4
 
4
5
  import requests
5
6
 
6
7
  from nucleus.annotation import Annotation
7
- from nucleus.constants import EXPORTED_ROWS, ITEMS_KEY
8
+ from nucleus.constants import EXPORT_FOR_TRAINING_KEY, EXPORTED_ROWS, ITEMS_KEY
8
9
  from nucleus.dataset_item import DatasetItem
9
10
  from nucleus.errors import NucleusAPIError
10
11
  from nucleus.job import AsyncJob
11
12
  from nucleus.utils import (
12
13
  KeyErrorDict,
13
14
  convert_export_payload,
14
- format_dataset_item_response,
15
15
  format_scale_task_info_response,
16
16
  paginate_generator,
17
17
  )
@@ -50,9 +50,11 @@ class Slice:
50
50
  self._client = client
51
51
  self._name = None
52
52
  self._dataset_id = None
53
+ self._created_at = None
54
+ self._pending_job_count = None
53
55
 
54
56
  def __repr__(self):
55
- return f"Slice(slice_id='{self.id}', client={self._client})"
57
+ return f"Slice(slice_id='{self.id}', name={self._name}, dataset_id={self._dataset_id})"
56
58
 
57
59
  def __eq__(self, other):
58
60
  if self.id == other.id:
@@ -60,6 +62,43 @@ class Slice:
60
62
  return True
61
63
  return False
62
64
 
65
+ @property
66
+ def created_at(self) -> Optional[datetime.datetime]:
67
+ """Timestamp of creation of the slice
68
+
69
+ Returns:
70
+ datetime of creation or None if not created yet
71
+ """
72
+ if self._created_at is None:
73
+ self._created_at = self.info().get("created_at", None)
74
+ return self._created_at
75
+
76
+ @property
77
+ def pending_job_count(self) -> Optional[int]:
78
+ if self._pending_job_count is None:
79
+ self._pending_job_count = self.info().get(
80
+ "pending_job_count", None
81
+ )
82
+ return self._pending_job_count
83
+
84
+ @classmethod
85
+ def from_request(cls, request, client):
86
+ instance = cls(request["id"], client)
87
+ instance._name = request.get("name", None)
88
+ instance._dataset_id = request.get("dataset_id", None)
89
+ created_at_str = request.get("created_at").rstrip("Z")
90
+ if hasattr(datetime.datetime, "fromisoformat"):
91
+ instance._created_at = datetime.datetime.fromisoformat(
92
+ created_at_str
93
+ )
94
+ else:
95
+ fmt_str = r"%Y-%m-%dT%H:%M:%S.%f" # replaces the fromisoformatm, not available in python 3.6
96
+ instance._created_at = datetime.datetime.strptime(
97
+ created_at_str, fmt_str
98
+ )
99
+ instance._pending_job_count = request.get("pending_job_count", None)
100
+ return instance
101
+
63
102
  @property
64
103
  def slice_id(self):
65
104
  warnings.warn(
@@ -86,9 +125,11 @@ class Slice:
86
125
  """Generator yielding all dataset items in the dataset.
87
126
 
88
127
  ::
89
- sum_example_field = 0
90
- for item in slice.items_generator():
91
- sum += item.metadata["example_field"]
128
+
129
+ collected_ref_ids = []
130
+ for item in dataset.items_generator():
131
+ print(f"Exporting item: {item.reference_id}")
132
+ collected_ref_ids.append(item.reference_id)
92
133
 
93
134
  Args:
94
135
  page_size (int, optional): Number of items to return per page. If you are
@@ -111,7 +152,7 @@ class Slice:
111
152
  def items(self):
112
153
  """All DatasetItems contained in the Slice.
113
154
 
114
- For fetching more than 200k items see :meth:`Slice.items_generator`.
155
+ We recommend using :meth:`Slice.items_generator` if the Slice has more than 200k items.
115
156
 
116
157
  """
117
158
  try:
@@ -185,7 +226,7 @@ class Slice:
185
226
 
186
227
  Returns:
187
228
  Generator where each element is a dict containing the DatasetItem
188
- and all of its associated Annotations, grouped by type.
229
+ and all of its associated Annotations, grouped by type (e.g. box).
189
230
  ::
190
231
 
191
232
  Iterable[{
@@ -194,18 +235,22 @@ class Slice:
194
235
  "box": List[BoxAnnotation],
195
236
  "polygon": List[PolygonAnnotation],
196
237
  "cuboid": List[CuboidAnnotation],
238
+ "line": List[LineAnnotation],
197
239
  "segmentation": List[SegmentationAnnotation],
198
240
  "category": List[CategoryAnnotation],
241
+ "keypoints": List[KeypointsAnnotation],
199
242
  }
200
243
  }]
201
244
  """
202
- for item_metadata in self.items:
203
- yield format_dataset_item_response(
204
- self._client.dataitem_loc(
205
- dataset_id=self.dataset_id,
206
- dataset_item_id=item_metadata["id"],
207
- )
208
- )
245
+ json_generator = paginate_generator(
246
+ client=self._client,
247
+ endpoint=f"slice/{self.id}/exportForTrainingPage",
248
+ result_key=EXPORT_FOR_TRAINING_KEY,
249
+ page_size=100000,
250
+ )
251
+ for data in json_generator:
252
+ for ia in convert_export_payload([data], has_predictions=False):
253
+ yield ia
209
254
 
210
255
  def items_and_annotations(
211
256
  self,
@@ -223,8 +268,10 @@ class Slice:
223
268
  "box": List[BoxAnnotation],
224
269
  "polygon": List[PolygonAnnotation],
225
270
  "cuboid": List[CuboidAnnotation],
271
+ "line": List[LineAnnotation],
226
272
  "segmentation": List[SegmentationAnnotation],
227
273
  "category": List[CategoryAnnotation],
274
+ "keypoints": List[KeypointsAnnotation],
228
275
  }
229
276
  }]
230
277
  """
@@ -250,7 +297,7 @@ class Slice:
250
297
 
251
298
  List[{
252
299
  "item": DatasetItem,
253
- "predicions": {
300
+ "predictions": {
254
301
  "box": List[BoxAnnotation],
255
302
  "polygon": List[PolygonAnnotation],
256
303
  "cuboid": List[CuboidAnnotation],
@@ -266,6 +313,40 @@ class Slice:
266
313
  )
267
314
  return convert_export_payload(api_payload[EXPORTED_ROWS], True)
268
315
 
316
+ def export_predictions_generator(
317
+ self, model
318
+ ) -> Iterable[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
319
+ """Provides a list of all DatasetItems and Predictions in the Slice for the given Model.
320
+
321
+ Parameters:
322
+ model (Model): the nucleus model objects representing the model for which to export predictions.
323
+
324
+ Returns:
325
+ Iterable where each element is a dict containing the DatasetItem
326
+ and all of its associated Predictions, grouped by type (e.g. box).
327
+ ::
328
+
329
+ List[{
330
+ "item": DatasetItem,
331
+ "predictions": {
332
+ "box": List[BoxAnnotation],
333
+ "polygon": List[PolygonAnnotation],
334
+ "cuboid": List[CuboidAnnotation],
335
+ "segmentation": List[SegmentationAnnotation],
336
+ "category": List[CategoryAnnotation],
337
+ }
338
+ }]
339
+ """
340
+ json_generator = paginate_generator(
341
+ client=self._client,
342
+ endpoint=f"slice/{self.id}/{model.id}/exportForTrainingPage",
343
+ result_key=EXPORT_FOR_TRAINING_KEY,
344
+ page_size=100000,
345
+ )
346
+ for data in json_generator:
347
+ for ip in convert_export_payload([data], has_predictions=True):
348
+ yield ip
349
+
269
350
  def export_scale_task_info(self):
270
351
  """Fetches info for all linked Scale tasks of items/scenes in the slice.
271
352
 
@@ -274,7 +355,7 @@ class Slice:
274
355
  and info on their corresponding Scale tasks within the dataset::
275
356
 
276
357
  List[{
277
- "item" | "scene": Union[:class:`DatasetItem`, :class:`Scene`],
358
+ "item" | "scene": Union[DatasetItem, Scene],
278
359
  "scale_task_info": {
279
360
  "task_id": str,
280
361
  "subtask_id": str,
@@ -285,7 +366,7 @@ class Slice:
285
366
  "batch": str,
286
367
  "created_at": str,
287
368
  "completed_at": Optional[str]
288
- }[]
369
+ }]
289
370
  }]
290
371
 
291
372
  """
@@ -107,13 +107,17 @@ class Validate:
107
107
  ).dict(),
108
108
  "validate/scenario_test",
109
109
  )
110
- return ScenarioTest(response[SCENARIO_TEST_ID_KEY], self.connection)
110
+ return ScenarioTest.from_id(
111
+ response[SCENARIO_TEST_ID_KEY], self.connection
112
+ )
111
113
 
112
114
  def get_scenario_test(self, scenario_test_id: str) -> ScenarioTest:
113
115
  response = self.connection.get(
114
116
  f"validate/scenario_test/{scenario_test_id}",
115
117
  )
116
- return ScenarioTest(response["unit_test"]["id"], self.connection)
118
+ return ScenarioTest.from_id(
119
+ response["unit_test"]["id"], self.connection
120
+ )
117
121
 
118
122
  @property
119
123
  def scenario_tests(self) -> List[ScenarioTest]:
@@ -131,12 +135,13 @@ class Validate:
131
135
  A list of ScenarioTest objects.
132
136
  """
133
137
  response = self.connection.get(
134
- "validate/scenario_test",
138
+ "validate/scenario_test/details",
135
139
  )
136
- return [
137
- ScenarioTest(test_id, self.connection)
138
- for test_id in response["scenario_test_ids"]
140
+ tests = [
141
+ ScenarioTest.from_response(payload, self.connection)
142
+ for payload in response
139
143
  ]
144
+ return tests
140
145
 
141
146
  def delete_scenario_test(self, scenario_test_id: str) -> bool:
142
147
  """Deletes a Scenario Test. ::
@@ -5,14 +5,6 @@ from pydantic import validator
5
5
  from nucleus.pydantic_base import ImmutableModel
6
6
 
7
7
 
8
- class EvalDetail(ImmutableModel):
9
- id: str
10
-
11
-
12
- class GetEvalHistory(ImmutableModel):
13
- evaluations: List[EvalDetail]
14
-
15
-
16
8
  class EvaluationResult(ImmutableModel):
17
9
  item_ref_id: str
18
10
  score: float