scale-nucleus 0.17.7__py3-none-any.whl → 0.17.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nucleus/annotation.py CHANGED
@@ -33,6 +33,7 @@ from .constants import (
33
33
  POLYGON_TYPE,
34
34
  POSITION_KEY,
35
35
  REFERENCE_ID_KEY,
36
+ TASK_ID_KEY,
36
37
  TAXONOMY_NAME_KEY,
37
38
  TRACK_REFERENCE_ID_KEY,
38
39
  TYPE_KEY,
@@ -158,6 +159,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
158
159
  metadata: Optional[Dict] = None
159
160
  embedding_vector: Optional[list] = None
160
161
  track_reference_id: Optional[str] = None
162
+ _task_id: Optional[str] = field(default=None, repr=False)
161
163
 
162
164
  def __post_init__(self):
163
165
  self.metadata = self.metadata if self.metadata else {}
@@ -178,6 +180,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
178
180
  metadata=payload.get(METADATA_KEY, {}),
179
181
  embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
180
182
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
183
+ _task_id=payload.get(TASK_ID_KEY, None),
181
184
  )
182
185
 
183
186
  def to_payload(self) -> dict:
@@ -195,6 +198,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
195
198
  METADATA_KEY: self.metadata,
196
199
  EMBEDDING_VECTOR_KEY: self.embedding_vector,
197
200
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
201
+ TASK_ID_KEY: self._task_id,
198
202
  }
199
203
 
200
204
  def __eq__(self, other):
@@ -209,6 +213,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
209
213
  and sorted(self.metadata.items()) == sorted(other.metadata.items())
210
214
  and self.embedding_vector == other.embedding_vector
211
215
  and self.track_reference_id == other.track_reference_id
216
+ and self._task_id == other._task_id
212
217
  )
213
218
 
214
219
 
@@ -275,6 +280,7 @@ class LineAnnotation(Annotation):
275
280
  annotation_id: Optional[str] = None
276
281
  metadata: Optional[Dict] = None
277
282
  track_reference_id: Optional[str] = None
283
+ _task_id: Optional[str] = field(default=None, repr=False)
278
284
 
279
285
  def __post_init__(self):
280
286
  self.metadata = self.metadata if self.metadata else {}
@@ -304,6 +310,7 @@ class LineAnnotation(Annotation):
304
310
  annotation_id=payload.get(ANNOTATION_ID_KEY, None),
305
311
  metadata=payload.get(METADATA_KEY, {}),
306
312
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
313
+ _task_id=payload.get(TASK_ID_KEY, None),
307
314
  )
308
315
 
309
316
  def to_payload(self) -> dict:
@@ -317,6 +324,7 @@ class LineAnnotation(Annotation):
317
324
  ANNOTATION_ID_KEY: self.annotation_id,
318
325
  METADATA_KEY: self.metadata,
319
326
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
327
+ TASK_ID_KEY: self._task_id,
320
328
  }
321
329
  return payload
322
330
 
@@ -367,6 +375,7 @@ class PolygonAnnotation(Annotation):
367
375
  metadata: Optional[Dict] = None
368
376
  embedding_vector: Optional[list] = None
369
377
  track_reference_id: Optional[str] = None
378
+ _task_id: Optional[str] = field(default=None, repr=False)
370
379
 
371
380
  def __post_init__(self):
372
381
  self.metadata = self.metadata if self.metadata else {}
@@ -397,6 +406,7 @@ class PolygonAnnotation(Annotation):
397
406
  metadata=payload.get(METADATA_KEY, {}),
398
407
  embedding_vector=payload.get(EMBEDDING_VECTOR_KEY, None),
399
408
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
409
+ _task_id=payload.get(TASK_ID_KEY, None),
400
410
  )
401
411
 
402
412
  def to_payload(self) -> dict:
@@ -411,6 +421,7 @@ class PolygonAnnotation(Annotation):
411
421
  METADATA_KEY: self.metadata,
412
422
  EMBEDDING_VECTOR_KEY: self.embedding_vector,
413
423
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
424
+ TASK_ID_KEY: self._task_id,
414
425
  }
415
426
  return payload
416
427
 
@@ -507,6 +518,7 @@ class KeypointsAnnotation(Annotation):
507
518
  annotation_id: Optional[str] = None
508
519
  metadata: Optional[Dict] = None
509
520
  track_reference_id: Optional[str] = None
521
+ _task_id: Optional[str] = field(default=None, repr=False)
510
522
 
511
523
  def __post_init__(self):
512
524
  self.metadata = self.metadata or {}
@@ -559,6 +571,7 @@ class KeypointsAnnotation(Annotation):
559
571
  annotation_id=payload.get(ANNOTATION_ID_KEY, None),
560
572
  metadata=payload.get(METADATA_KEY, {}),
561
573
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
574
+ _task_id=payload.get(TASK_ID_KEY, None),
562
575
  )
563
576
 
564
577
  def to_payload(self) -> dict:
@@ -574,6 +587,7 @@ class KeypointsAnnotation(Annotation):
574
587
  ANNOTATION_ID_KEY: self.annotation_id,
575
588
  METADATA_KEY: self.metadata,
576
589
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
590
+ TASK_ID_KEY: self._task_id,
577
591
  }
578
592
  return payload
579
593
 
@@ -678,6 +692,7 @@ class CuboidAnnotation(Annotation): # pylint: disable=R0902
678
692
  annotation_id: Optional[str] = None
679
693
  metadata: Optional[Dict] = None
680
694
  track_reference_id: Optional[str] = None
695
+ _task_id: Optional[str] = field(default=None, repr=False)
681
696
 
682
697
  def __post_init__(self):
683
698
  self.metadata = self.metadata if self.metadata else {}
@@ -694,6 +709,7 @@ class CuboidAnnotation(Annotation): # pylint: disable=R0902
694
709
  annotation_id=payload.get(ANNOTATION_ID_KEY, None),
695
710
  metadata=payload.get(METADATA_KEY, {}),
696
711
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
712
+ _task_id=payload.get(TASK_ID_KEY, None),
697
713
  )
698
714
 
699
715
  def to_payload(self) -> dict:
@@ -713,7 +729,8 @@ class CuboidAnnotation(Annotation): # pylint: disable=R0902
713
729
  payload[METADATA_KEY] = self.metadata
714
730
  if self.track_reference_id:
715
731
  payload[TRACK_REFERENCE_ID_KEY] = self.track_reference_id
716
-
732
+ if self._task_id:
733
+ payload[TASK_ID_KEY] = self._task_id
717
734
  return payload
718
735
 
719
736
 
@@ -926,6 +943,7 @@ class CategoryAnnotation(Annotation):
926
943
  taxonomy_name: Optional[str] = None
927
944
  metadata: Optional[Dict] = None
928
945
  track_reference_id: Optional[str] = None
946
+ _task_id: Optional[str] = field(default=None, repr=False)
929
947
 
930
948
  def __post_init__(self):
931
949
  self.metadata = self.metadata if self.metadata else {}
@@ -938,6 +956,7 @@ class CategoryAnnotation(Annotation):
938
956
  taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
939
957
  metadata=payload.get(METADATA_KEY, {}),
940
958
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
959
+ _task_id=payload.get(TASK_ID_KEY, None),
941
960
  )
942
961
 
943
962
  def to_payload(self) -> dict:
@@ -948,6 +967,7 @@ class CategoryAnnotation(Annotation):
948
967
  REFERENCE_ID_KEY: self.reference_id,
949
968
  METADATA_KEY: self.metadata,
950
969
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
970
+ TASK_ID_KEY: self._task_id,
951
971
  }
952
972
  if self.taxonomy_name is not None:
953
973
  payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
@@ -963,6 +983,7 @@ class MultiCategoryAnnotation(Annotation):
963
983
  taxonomy_name: Optional[str] = None
964
984
  metadata: Optional[Dict] = None
965
985
  track_reference_id: Optional[str] = None
986
+ _task_id: Optional[str] = field(default=None, repr=False)
966
987
 
967
988
  def __post_init__(self):
968
989
  self.metadata = self.metadata if self.metadata else {}
@@ -975,6 +996,7 @@ class MultiCategoryAnnotation(Annotation):
975
996
  taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
976
997
  metadata=payload.get(METADATA_KEY, {}),
977
998
  track_reference_id=payload.get(TRACK_REFERENCE_ID_KEY, None),
999
+ _task_id=payload.get(TASK_ID_KEY, None),
978
1000
  )
979
1001
 
980
1002
  def to_payload(self) -> dict:
@@ -985,6 +1007,7 @@ class MultiCategoryAnnotation(Annotation):
985
1007
  REFERENCE_ID_KEY: self.reference_id,
986
1008
  METADATA_KEY: self.metadata,
987
1009
  TRACK_REFERENCE_ID_KEY: self.track_reference_id,
1010
+ TASK_ID_KEY: self._task_id,
988
1011
  }
989
1012
  if self.taxonomy_name is not None:
990
1013
  payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
@@ -1023,6 +1046,7 @@ class SceneCategoryAnnotation(Annotation):
1023
1046
  reference_id: str
1024
1047
  taxonomy_name: Optional[str] = None
1025
1048
  metadata: Optional[Dict] = field(default_factory=dict)
1049
+ _task_id: Optional[str] = field(default=None, repr=False)
1026
1050
 
1027
1051
  @classmethod
1028
1052
  def from_json(cls, payload: dict):
@@ -1031,6 +1055,7 @@ class SceneCategoryAnnotation(Annotation):
1031
1055
  reference_id=payload[REFERENCE_ID_KEY],
1032
1056
  taxonomy_name=payload.get(TAXONOMY_NAME_KEY, None),
1033
1057
  metadata=payload.get(METADATA_KEY, {}),
1058
+ _task_id=payload.get(TASK_ID_KEY, None),
1034
1059
  )
1035
1060
 
1036
1061
  def to_payload(self) -> dict:
@@ -1040,6 +1065,7 @@ class SceneCategoryAnnotation(Annotation):
1040
1065
  GEOMETRY_KEY: {},
1041
1066
  REFERENCE_ID_KEY: self.reference_id,
1042
1067
  METADATA_KEY: self.metadata,
1068
+ TASK_ID_KEY: self._task_id,
1043
1069
  }
1044
1070
  if self.taxonomy_name is not None:
1045
1071
  payload[TAXONOMY_NAME_KEY] = self.taxonomy_name
@@ -1057,9 +1083,7 @@ class AnnotationList:
1057
1083
  default_factory=list
1058
1084
  )
1059
1085
  cuboid_annotations: List[CuboidAnnotation] = field(default_factory=list)
1060
- category_annotations: List[CategoryAnnotation] = field(
1061
- default_factory=list
1062
- )
1086
+ category_annotations: List[CategoryAnnotation] = field(default_factory=list)
1063
1087
  multi_category_annotations: List[MultiCategoryAnnotation] = field(
1064
1088
  default_factory=list
1065
1089
  )
@@ -176,9 +176,7 @@ class AnnotationUploader:
176
176
  """
177
177
 
178
178
  def fn():
179
- request_json = construct_segmentation_payload(
180
- segmentations, update
181
- )
179
+ request_json = construct_segmentation_payload(segmentations, update)
182
180
  form_data = [
183
181
  FileFormField(
184
182
  name=SERIALIZED_REQUEST_KEY,
@@ -212,13 +210,15 @@ class AnnotationUploader:
212
210
 
213
211
  return fn
214
212
 
215
- @staticmethod
216
- def check_for_duplicate_ids(annotations: Iterable[Annotation]):
217
- """Do not allow annotations to have the same (annotation_id, reference_id) tuple"""
213
+ def check_for_duplicate_ids(self, annotations: Iterable[Annotation]):
214
+ """Do not allow annotations to have the same (annotation_id, reference_id, task_id) tuple"""
218
215
 
219
- # some annotations like CategoryAnnotation do not have annotation_id attribute, and as such, we allow duplicates
220
216
  tuple_ids = [
221
- (ann.reference_id, ann.annotation_id) # type: ignore
217
+ (
218
+ ann.reference_id,
219
+ ann.annotation_id,
220
+ getattr(ann, "_task_id", None),
221
+ )
222
222
  for ann in annotations
223
223
  if hasattr(ann, "annotation_id")
224
224
  ]
@@ -226,7 +226,7 @@ class AnnotationUploader:
226
226
  duplicates = {key for key, value in tuple_count.items() if value > 1}
227
227
  if len(duplicates) > 0:
228
228
  raise DuplicateIDError(
229
- f"Duplicate annotations with the same (reference_id, annotation_id) properties found.\n"
229
+ f"Duplicate annotations with the same (reference_id, annotation_id, task_id) properties found.\n"
230
230
  f"Duplicates: {duplicates}\n"
231
231
  f"To fix this, avoid duplicate annotations, or specify a different annotation_id attribute "
232
232
  f"for the failing items."
@@ -255,3 +255,20 @@ class PredictionUploader(AnnotationUploader):
255
255
  self._route = (
256
256
  f"dataset/{dataset_id}/model/{model_id}/uploadPredictions"
257
257
  )
258
+
259
+ def check_for_duplicate_ids(self, annotations: Iterable[Annotation]):
260
+ """Do not allow predictions to have the same (annotation_id, reference_id) tuple"""
261
+ tuple_ids = [
262
+ (pred.annotation_id, pred.reference_id) # type: ignore
263
+ for pred in annotations
264
+ if hasattr(pred, "annotation_id") and hasattr(pred, "reference_id")
265
+ ]
266
+ tuple_count = Counter(tuple_ids)
267
+ duplicates = {key for key, value in tuple_count.items() if value > 1}
268
+ if len(duplicates) > 0:
269
+ raise DuplicateIDError(
270
+ f"Duplicate predictions with the same (annotation_id, reference_id) properties found.\n"
271
+ f"Duplicates: {duplicates}\n"
272
+ f"To fix this, avoid duplicate predictions, or specify a different annotation_id attribute "
273
+ f"for the failing items."
274
+ )
nucleus/constants.py CHANGED
@@ -148,6 +148,7 @@ STATUS_KEY = "status"
148
148
  SUCCESS_STATUS_CODES = [200, 201, 202]
149
149
  SLICE_TAGS_KEY = "slice_tags"
150
150
  TAXONOMY_NAME_KEY = "taxonomy_name"
151
+ TASK_ID_KEY = "task_id"
151
152
  TRACK_REFERENCE_ID_KEY = "track_reference_id"
152
153
  TRACK_REFERENCE_IDS_KEY = "track_reference_ids"
153
154
  TRACKS_KEY = "tracks"
nucleus/dataset.py CHANGED
@@ -332,8 +332,7 @@ class Dataset:
332
332
  dataset_item_jsons = response.get(DATASET_ITEMS_KEY, None)
333
333
 
334
334
  return [
335
- DatasetItem.from_json(item_json)
336
- for item_json in dataset_item_jsons
335
+ DatasetItem.from_json(item_json) for item_json in dataset_item_jsons
337
336
  ]
338
337
 
339
338
  @property
@@ -699,9 +698,7 @@ class Dataset:
699
698
  asynchronous
700
699
  ), "In order to avoid timeouts, you must set asynchronous=True when uploading videos."
701
700
 
702
- return self._append_video_scenes(
703
- video_scenes, update, asynchronous
704
- )
701
+ return self._append_video_scenes(video_scenes, update, asynchronous)
705
702
 
706
703
  if len(dataset_items) > WARN_FOR_LARGE_UPLOAD and not asynchronous:
707
704
  print(
@@ -1450,13 +1447,14 @@ class Dataset:
1450
1447
  return convert_export_payload(api_payload[EXPORTED_ROWS])
1451
1448
 
1452
1449
  def scene_and_annotation_generator(
1453
- self, slice_id=None, page_size: int = 10
1450
+ self, slice_id=None, page_size: int = 10, only_most_recent_tasks=True
1454
1451
  ):
1455
1452
  """Provides a generator of all Scenes and Annotations in the dataset grouped by scene.
1456
1453
 
1457
1454
  Args:
1458
1455
  slice_id: Optional slice ID to filter the scenes and annotations.
1459
1456
  page_size: Number of scenes to fetch per page. Default is 10.
1457
+ only_most_recent_tasks: If True, only the annotations corresponding to the most recent task for each item is returned.
1460
1458
 
1461
1459
  Returns:
1462
1460
  Generator where each element is a nested dict containing scene and annotation information of the dataset structured as a JSON.
@@ -1509,6 +1507,7 @@ class Dataset:
1509
1507
  result_key=EXPORT_FOR_TRAINING_KEY,
1510
1508
  page_size=page_size,
1511
1509
  sliceId=slice_id,
1510
+ onlyMostRecentTask=only_most_recent_tasks,
1512
1511
  )
1513
1512
 
1514
1513
  for data in json_generator:
@@ -1518,12 +1517,14 @@ class Dataset:
1518
1517
  self,
1519
1518
  query: Optional[str] = None,
1520
1519
  use_mirrored_images: bool = False,
1520
+ only_most_recent_tasks: bool = True,
1521
1521
  ) -> Iterable[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
1522
1522
  """Provides a generator of all DatasetItems and Annotations in the dataset.
1523
1523
 
1524
1524
  Args:
1525
1525
  query: Structured query compatible with the `Nucleus query language <https://nucleus.scale.com/docs/query-language-reference>`_.
1526
1526
  use_mirrored_images: If True, returns the location of the mirrored image hosted in Scale S3. Useful when the original image is no longer available.
1527
+ only_most_recent_tasks: If True, only the annotations corresponding to the most recent task for each item is returned.
1527
1528
 
1528
1529
  Returns:
1529
1530
  Generator where each element is a dict containing the DatasetItem
@@ -1550,6 +1551,7 @@ class Dataset:
1550
1551
  page_size=10000, # max ES page size
1551
1552
  query=query,
1552
1553
  chip=use_mirrored_images,
1554
+ onlyMostRecentTask=only_most_recent_tasks,
1553
1555
  )
1554
1556
  for data in json_generator:
1555
1557
  for ia in convert_export_payload([data], has_predictions=False):
@@ -2356,10 +2358,7 @@ class Dataset:
2356
2358
  )
2357
2359
 
2358
2360
  if len(items) > 0:
2359
- if (
2360
- len(items) > GLOB_SIZE_THRESHOLD_CHECK
2361
- and not skip_size_warning
2362
- ):
2361
+ if len(items) > GLOB_SIZE_THRESHOLD_CHECK and not skip_size_warning:
2363
2362
  raise Exception(
2364
2363
  f"Found over {GLOB_SIZE_THRESHOLD_CHECK} items in {dirname}. If this is intended,"
2365
2364
  f" set skip_size_warning=True when calling this function."
@@ -2406,3 +2405,26 @@ class Dataset:
2406
2405
  route=f"dataset/{self.id}/model/{model.id}/pointcloud/{pointcloud_ref_id}/uploadLSSPrediction",
2407
2406
  requests_command=requests.post,
2408
2407
  )
2408
+
2409
+ def export_class_labels(self, slice_id: Optional[str] = None):
2410
+ """Fetches a list of class labels for the dataset.
2411
+
2412
+ Args:
2413
+ slice_id (str | None): The ID of the slice to export class labels for. If None, export class labels for the entire dataset.
2414
+
2415
+ Returns:
2416
+ A list of class labels for the dataset.
2417
+ """
2418
+ if slice_id:
2419
+ api_payload = self._client.make_request(
2420
+ payload=None,
2421
+ route=f"slice/{slice_id}/class_labels",
2422
+ requests_command=requests.get,
2423
+ )
2424
+ else:
2425
+ api_payload = self._client.make_request(
2426
+ payload=None,
2427
+ route=f"dataset/{self.id}/class_labels",
2428
+ requests_command=requests.get,
2429
+ )
2430
+ return api_payload.get("data", [])
nucleus/slice.py CHANGED
@@ -168,9 +168,7 @@ class Slice:
168
168
  @property
169
169
  def pending_job_count(self) -> Optional[int]:
170
170
  if self._pending_job_count is None:
171
- self._pending_job_count = self.info().get(
172
- "pending_job_count", None
173
- )
171
+ self._pending_job_count = self.info().get("pending_job_count", None)
174
172
  return self._pending_job_count
175
173
 
176
174
  @classmethod
@@ -410,9 +408,13 @@ class Slice:
410
408
 
411
409
  def items_and_annotation_generator(
412
410
  self,
411
+ use_mirrored_images: bool = False,
413
412
  ) -> Iterable[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
414
413
  """Provides a generator of all DatasetItems and Annotations in the slice.
415
414
 
415
+ Args:
416
+ use_mirrored_images: If True, returns the location of the mirrored image hosted in Scale S3. Useful when the original image is no longer available.
417
+
416
418
  Returns:
417
419
  Generator where each element is a dict containing the DatasetItem
418
420
  and all of its associated Annotations, grouped by type (e.g. box).
@@ -436,6 +438,7 @@ class Slice:
436
438
  endpoint=f"slice/{self.id}/exportForTrainingPage",
437
439
  result_key=EXPORT_FOR_TRAINING_KEY,
438
440
  page_size=10000, # max ES page size
441
+ chip=use_mirrored_images,
439
442
  )
440
443
  for data in json_generator:
441
444
  for ia in convert_export_payload([data], has_predictions=False):
@@ -700,6 +703,15 @@ class Slice:
700
703
  )
701
704
  return api_payload
702
705
 
706
+ def export_class_labels(self):
707
+ """Fetches a list of class labels for the slice."""
708
+ api_payload = self._client.make_request(
709
+ payload=None,
710
+ route=f"slice/{self.id}/class_labels",
711
+ requests_command=requests.get,
712
+ )
713
+ return api_payload.get("data", [])
714
+
703
715
 
704
716
  def check_annotations_are_in_slice(
705
717
  annotations: List[Annotation], slice_to_check: Slice
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scale-nucleus
3
- Version: 0.17.7
3
+ Version: 0.17.9
4
4
  Summary: The official Python client library for Nucleus, the Data Platform for AI
5
5
  Home-page: https://scale.com/nucleus
6
6
  License: MIT
@@ -11,22 +11,22 @@ cli/reference.py,sha256=RuHVhmGTZNe0MfwpL96YjJdaH0OJzg98rz4xeIu4hJU,256
11
11
  cli/slices.py,sha256=nxq_Zg1m5oXuhz0ibyHkElvyVWt1AcE9tG-fN4CQxF8,1397
12
12
  cli/tests.py,sha256=NiwEVGuF08_jlCiKEIjKhwq55NvyU4xvPEJW5MJmdZg,4590
13
13
  nucleus/__init__.py,sha256=RSXlW0oL5ThX9LdRb5Eng6W9Fn_H9bqNZhAQrFQWMx8,49712
14
- nucleus/annotation.py,sha256=qogOLOmNwv2o13kNEZkIRbI2lkp1owNZ2OnRsFJUZwU,42904
15
- nucleus/annotation_uploader.py,sha256=ipXw7QhJQXqaftHrbJn8oCGcm6aXyoL0GhQA9EcZWD4,9580
14
+ nucleus/annotation.py,sha256=0JpBMl3UA5CXL4mdiDoBK6yTpLpY7B-TRY1nYopEcNI,44229
15
+ nucleus/annotation_uploader.py,sha256=LvK5vcP2dVWglPZZ36kofrm14DE0XGaXpNOhXBnMNn8,10400
16
16
  nucleus/async_job.py,sha256=yjPDwyyLIrF0K67anGB40xux1AMhWrq1X_hPvQ_ewzc,6890
17
17
  nucleus/async_utils.py,sha256=ayqajeSonX68fre3u8AoNRYT8GFGPd4_iu6YPQTvpvU,8226
18
18
  nucleus/autocurate.py,sha256=kI0vRqad_An8SN5JX6sSdGP_vNHJI2Pq4NINHuhNf2U,1080
19
19
  nucleus/camera_params.py,sha256=fl17aaSAZDAJIWo6F2HFvM6HKGcQh9fXvo4t3RzGMc4,3726
20
20
  nucleus/chip_utils.py,sha256=1J1NHCh0ZptW8cdeuLWFM_cXuwQVSQFtSF8kXU8s2tI,6743
21
21
  nucleus/connection.py,sha256=q212plDtWoonfXWMVaCqTZBPZTy8dnNSGj0YeAR1Qmk,2990
22
- nucleus/constants.py,sha256=K5boyMm3aMHCoT81zKurDfLRUIDcZM6W2GBU1Knn0Gg,5466
22
+ nucleus/constants.py,sha256=6Ce2RUb2S9vd4ww6sDG27ueE_HChIPVHC0CwQ60Czig,5490
23
23
  nucleus/data_transfer_object/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  nucleus/data_transfer_object/dataset_details.py,sha256=1YGvfKkPSqDrXK_y5mBXyRThY07tU-nwOCYTkYCSl6k,214
25
25
  nucleus/data_transfer_object/dataset_info.py,sha256=5P_gpvAyaqXxj2ZQuzLkGN2XROaN9Me56OLybCmO3R4,940
26
26
  nucleus/data_transfer_object/dataset_size.py,sha256=oe-dXaMLpsQRDcJQRZ9Ja8JTagYz4dviZuTognEylp0,111
27
27
  nucleus/data_transfer_object/job_status.py,sha256=hxvyNdrdVdj3UpEfwvryKC_QCJQEC9ru6IPjhPFcK44,2038
28
28
  nucleus/data_transfer_object/scenes_list.py,sha256=iTHE6vA47bRB6ciyEU4LArUXEXco4ArnGvZTGTeK8xs,432
29
- nucleus/dataset.py,sha256=ipAF9oS_5GDkdrCD-BH7kkfw9swEhVef6TeIp4h8iUw,94217
29
+ nucleus/dataset.py,sha256=W-QR-RDvRk369m42osZjNNbdnMVk64CzWDjLma9T9fk,95412
30
30
  nucleus/dataset_item.py,sha256=y9ia47i31lX2wvw6EkVAxeHburMrrZpuyjEGlstWa2A,10166
31
31
  nucleus/dataset_item_uploader.py,sha256=BD0FTgimEFYmDbnOLIaQZS3OLDfLe5wumADDmgMX598,6684
32
32
  nucleus/deprecation_warning.py,sha256=5C9dVusR5UkUQnW2MrRkIXCfbc8ULc7xOaB134agNKk,976
@@ -61,7 +61,7 @@ nucleus/pydantic_base.py,sha256=ZBUVrf948qzaxSuTaiDWxPC_Y8AOBdLKfi52ozGpGWk,1388
61
61
  nucleus/quaternion.py,sha256=TAnwj4arQXoTeofFgZMdZsCyxAMnu23N6to0F1WFNwk,1111
62
62
  nucleus/retry_strategy.py,sha256=daKZqjZYCh87WtXoVUuR9BZu2TTE-CtOFEYZ-d6xVMY,312
63
63
  nucleus/scene.py,sha256=qZQD7QdF6Ics8kuszsl278NCowKVnAkVNGHvPr5luRo,26937
64
- nucleus/slice.py,sha256=VlFRE4NSLDcus86EfSHniWGLKBdjEokrnrmBtUz8pT0,28056
64
+ nucleus/slice.py,sha256=1WZLPrfEywFY8d65ekMnduwj0RFPqWm7dgP307ajFRM,28600
65
65
  nucleus/test_launch_integration.py,sha256=oFKLZWjFGeUvwVV0XAAjP1Y_oKFkaouh_SXVPXtCvcE,10688
66
66
  nucleus/track.py,sha256=ROmOyzYZKrHVTnLBhnk-qEBtklD_EDsSnRcGYE8xG4E,3247
67
67
  nucleus/upload_response.py,sha256=wR_pfZCBju1vGiGqbVgk8zhM6GhD3ebYxyGBm8y0GvY,3287
@@ -85,8 +85,8 @@ nucleus/validate/scenario_test.py,sha256=pCmM157dblSciZCDTw-f47Fpy3OUZFgXmokdhIL
85
85
  nucleus/validate/scenario_test_evaluation.py,sha256=Q0WzaEE9uUbPVc4EHlCoKjhJcqMNt4QbyiiJx12VOR0,4075
86
86
  nucleus/validate/scenario_test_metric.py,sha256=AhVFOB1ULwBqlZ2X_Au1TXy4iQELljtzR4ZpeLB35So,1209
87
87
  nucleus/validate/utils.py,sha256=VjdIJj9Pii4z4L6xbvClAc7ra_J7cX0vWB_J2X6yrGE,185
88
- scale_nucleus-0.17.7.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
89
- scale_nucleus-0.17.7.dist-info/METADATA,sha256=KARLBnMTHNhbe9MHtOZQNUiQZXILdtyFRDxTngLe30o,7920
90
- scale_nucleus-0.17.7.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
91
- scale_nucleus-0.17.7.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
92
- scale_nucleus-0.17.7.dist-info/RECORD,,
88
+ scale_nucleus-0.17.9.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
89
+ scale_nucleus-0.17.9.dist-info/METADATA,sha256=v7fMJK93-vcbjE68FfsV4ADjpgqRJDGB57qNeSoUhzA,7920
90
+ scale_nucleus-0.17.9.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
91
+ scale_nucleus-0.17.9.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
92
+ scale_nucleus-0.17.9.dist-info/RECORD,,