scale-nucleus 0.14.7__py3-none-any.whl → 0.14.14b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cli/slices.py CHANGED
@@ -23,12 +23,9 @@ def slices(ctx, web):
23
23
  @slices.command("list")
24
24
  def list_slices():
25
25
  """List all available Slices"""
26
- with Live(
27
- Spinner("dots4", text="Finding your Slices!"),
28
- vertical_overflow="visible",
29
- ) as live:
30
- client = init_client()
31
- datasets = client.datasets
26
+ client = init_client()
27
+ console = Console()
28
+ with console.status("Finding your Slices!", spinner="dots4"):
32
29
  table = Table(
33
30
  Column("id", overflow="fold", min_width=24),
34
31
  "name",
@@ -37,26 +34,15 @@ def list_slices():
37
34
  title=":cake: Slices",
38
35
  title_justify="left",
39
36
  )
40
- errors = {}
41
- for ds in datasets:
42
- try:
43
- ds_slices = ds.slices
44
- if ds_slices:
45
- for slc_id in ds_slices:
46
- slice_url = nucleus_url(f"{ds.id}/{slc_id}")
47
- slice_info = client.get_slice(slc_id).info()
48
- table.add_row(
49
- slc_id, slice_info["name"], ds.name, slice_url
50
- )
51
- live.update(table)
52
- except NucleusAPIError as e:
53
- errors[ds.id] = e
54
-
55
- error_tree = Tree(
56
- ":x: Encountered the following errors while fetching information"
57
- )
58
- for ds_id, error in errors.items():
59
- dataset_branch = error_tree.add(f"Dataset: {ds_id}")
60
- dataset_branch.add(f"Error: {error}")
37
+ datasets = client.datasets
38
+ id_to_datasets = {d.id: d for d in datasets}
39
+ all_slices = client.slices
40
+ for s in all_slices:
41
+ table.add_row(
42
+ s.id,
43
+ s.name,
44
+ id_to_datasets[s.dataset_id].name,
45
+ nucleus_url(f"{s.dataset_id}/{s.id}"),
46
+ )
61
47
 
62
- Console().print(error_tree)
48
+ console.print(table)
nucleus/__init__.py CHANGED
@@ -235,6 +235,12 @@ class NucleusClient:
235
235
  """
236
236
  return self.list_jobs()
237
237
 
238
+ @property
239
+ def slices(self) -> List[Slice]:
240
+ response = self.make_request({}, "slice/", requests.get)
241
+ slices = [Slice.from_request(info, self) for info in response]
242
+ return slices
243
+
238
244
  @deprecated(msg="Use the NucleusClient.models property in the future.")
239
245
  def list_models(self) -> List[Model]:
240
246
  return self.models
@@ -441,7 +447,8 @@ class NucleusClient:
441
447
  Deletes a dataset by ID.
442
448
 
443
449
  All items, annotations, and predictions associated with the dataset will
444
- be deleted as well.
450
+ be deleted as well. Note that if this dataset is linked to a Scale or Rapid
451
+ labeling project, the project itself will not be deleted.
445
452
 
446
453
  Parameters:
447
454
  dataset_id: The ID of the dataset to delete.
@@ -1020,6 +1027,7 @@ class NucleusClient:
1020
1027
  payload: Optional[dict],
1021
1028
  route: str,
1022
1029
  requests_command=requests.post,
1030
+ return_raw_response: bool = False,
1023
1031
  ) -> dict:
1024
1032
  """Makes a request to a Nucleus API endpoint.
1025
1033
 
@@ -1029,9 +1037,10 @@ class NucleusClient:
1029
1037
  payload: Given request payload.
1030
1038
  route: Route for the request.
1031
1039
  Requests command: ``requests.post``, ``requests.get``, or ``requests.delete``.
1040
+ return_raw_response: return the request's response object entirely
1032
1041
 
1033
1042
  Returns:
1034
- Response payload as JSON dict.
1043
+ Response payload as JSON dict or request object.
1035
1044
  """
1036
1045
  if payload is None:
1037
1046
  payload = {}
@@ -1041,18 +1050,7 @@ class NucleusClient:
1041
1050
  "Received defined payload with GET request! Will ignore payload"
1042
1051
  )
1043
1052
  payload = None
1044
- return self._connection.make_request(payload, route, requests_command) # type: ignore
1045
-
1046
- def handle_bad_response(
1047
- self,
1048
- endpoint,
1049
- requests_command,
1050
- requests_response=None,
1051
- aiohttp_response=None,
1052
- ):
1053
- self._connection.handle_bad_response(
1054
- endpoint, requests_command, requests_response, aiohttp_response
1055
- )
1053
+ return self._connection.make_request(payload, route, requests_command, return_raw_response) # type: ignore
1056
1054
 
1057
1055
  def _set_api_key(self, api_key):
1058
1056
  """Fetch API key from environment variable NUCLEUS_API_KEY if not set"""
nucleus/annotation.py CHANGED
@@ -105,7 +105,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
105
105
  reference_id="image_1",
106
106
  annotation_id="image_1_car_box_1",
107
107
  metadata={"vehicle_color": "red"},
108
- embedding_vector=[0.1423, 1.432, ...3.829],
108
+ embedding_vector=[0.1423, 1.432, ..., 3.829],
109
109
  )
110
110
 
111
111
  Parameters:
@@ -310,7 +310,7 @@ class PolygonAnnotation(Annotation):
310
310
  reference_id="image_2",
311
311
  annotation_id="image_2_bus_polygon_1",
312
312
  metadata={"vehicle_color": "yellow"},
313
- embedding_vector=[0.1423, 1.432, ...3.829],
313
+ embedding_vector=[0.1423, 1.432, ..., 3.829],
314
314
  )
315
315
 
316
316
  Parameters:
@@ -691,7 +691,7 @@ class SegmentationAnnotation(Annotation):
691
691
  from nucleus import SegmentationAnnotation
692
692
 
693
693
  segmentation = SegmentationAnnotation(
694
- mask_url="s3://your-bucket-name/segmentation-masks/image_2_mask_id1.png",
694
+ mask_url="s3://your-bucket-name/segmentation-masks/image_2_mask_id_1.png",
695
695
  annotations=[
696
696
  Segment(label="grass", index="1"),
697
697
  Segment(label="road", index="2"),
@@ -704,7 +704,8 @@ class SegmentationAnnotation(Annotation):
704
704
 
705
705
  Parameters:
706
706
  mask_url (str): A URL pointing to the segmentation prediction mask which is
707
- accessible to Scale, or a local path. The mask is an HxW int8 array saved in PNG format,
707
+ accessible to Scale. This "URL" can also be a path to a local file.
708
+ The mask is an HxW int8 array saved in PNG format,
708
709
  with each pixel value ranging from [0, N), where N is the number of
709
710
  possible classes (for semantic segmentation) or instances (for instance
710
711
  segmentation).
nucleus/connection.py CHANGED
@@ -40,7 +40,11 @@ class Connection:
40
40
  return self.make_request(payload, route, requests_command=requests.put)
41
41
 
42
42
  def make_request(
43
- self, payload: dict, route: str, requests_command=requests.post
43
+ self,
44
+ payload: dict,
45
+ route: str,
46
+ requests_command=requests.post,
47
+ return_raw_response: bool = False,
44
48
  ) -> dict:
45
49
  """
46
50
  Makes a request to Nucleus endpoint and logs a warning if not
@@ -49,6 +53,7 @@ class Connection:
49
53
  :param payload: given payload
50
54
  :param route: route for the request
51
55
  :param requests_command: requests.post, requests.get, requests.delete
56
+ :param return_raw_response: return the request's response object entirely
52
57
  :return: response JSON
53
58
  """
54
59
  endpoint = f"{self.endpoint}/{route}"
@@ -73,6 +78,9 @@ class Connection:
73
78
  if not response.ok:
74
79
  self.handle_bad_response(endpoint, requests_command, response)
75
80
 
81
+ if return_raw_response:
82
+ return response
83
+
76
84
  return response.json()
77
85
 
78
86
  def handle_bad_response(
nucleus/constants.py CHANGED
@@ -28,6 +28,7 @@ AUTOTAGS_KEY = "autotags"
28
28
  AUTOTAG_SCORE_THRESHOLD = "score_threshold"
29
29
  EXPORTED_ROWS = "exportedRows"
30
30
  EXPORTED_SCALE_TASK_INFO_ROWS = "exportedScaleTaskInfoRows"
31
+ EXPORT_FOR_TRAINING_KEY = "data"
31
32
  CAMERA_MODEL_KEY = "camera_model"
32
33
  CAMERA_PARAMS_KEY = "camera_params"
33
34
  CLASS_PDF_KEY = "class_pdf"
nucleus/dataset.py CHANGED
@@ -33,6 +33,7 @@ from .constants import (
33
33
  DEFAULT_ANNOTATION_UPDATE_MODE,
34
34
  EMBEDDING_DIMENSION_KEY,
35
35
  EMBEDDINGS_URL_KEY,
36
+ EXPORT_FOR_TRAINING_KEY,
36
37
  EXPORTED_ROWS,
37
38
  FRAME_RATE_KEY,
38
39
  ITEMS_KEY,
@@ -132,7 +133,7 @@ class Dataset:
132
133
 
133
134
  @property
134
135
  def is_scene(self) -> bool:
135
- """If the dataset can contain scenes or not."""
136
+ """Whether or not the dataset contains scenes exclusively."""
136
137
  response = self._client.make_request(
137
138
  {}, f"dataset/{self.id}/is_scene", requests.get
138
139
  )[DATASET_IS_SCENE_KEY]
@@ -167,11 +168,12 @@ class Dataset:
167
168
  def items_generator(self, page_size=100000) -> Iterable[DatasetItem]:
168
169
  """Generator yielding all dataset items in the dataset.
169
170
 
170
-
171
171
  ::
172
- sum_example_field = 0
172
+
173
+ collected_ref_ids = []
173
174
  for item in dataset.items_generator():
174
- sum += item.metadata["example_field"]
175
+ print(f"Exporting item: {item.reference_id}")
176
+ collected_ref_ids.append(item.reference_id)
175
177
 
176
178
  Args:
177
179
  page_size (int, optional): Number of items to return per page. If you are
@@ -179,7 +181,7 @@ class Dataset:
179
181
  the page size.
180
182
 
181
183
  Yields:
182
- an iterable of DatasetItem objects.
184
+ :class:`DatasetItem`: A single DatasetItem object.
183
185
  """
184
186
  json_generator = paginate_generator(
185
187
  client=self._client,
@@ -194,7 +196,7 @@ class Dataset:
194
196
  def items(self) -> List[DatasetItem]:
195
197
  """List of all DatasetItem objects in the Dataset.
196
198
 
197
- For fetching more than 200k items see :meth:`NucleusDataset.items_generator`.
199
+ We recommend using :meth:`Dataset.items_generator` if the Dataset has more than 200k items.
198
200
  """
199
201
  try:
200
202
  response = self._client.make_request(
@@ -269,11 +271,11 @@ class Dataset:
269
271
  dict as follows::
270
272
 
271
273
  {
272
- "autotagPositiveTrainingItems": {
274
+ "autotagPositiveTrainingItems": List[{
273
275
  ref_id: str,
274
276
  model_prediction_annotation_id: str | None,
275
277
  ground_truth_annotation_id: str | None,
276
- }[],
278
+ }],
277
279
  "autotag": {
278
280
  id: str,
279
281
  name: str,
@@ -293,10 +295,11 @@ class Dataset:
293
295
  return response
294
296
 
295
297
  def info(self) -> DatasetInfo:
296
- """Retrieve information about the dataset
298
+ """Fetches information about the dataset.
297
299
 
298
300
  Returns:
299
- :class:`DatasetInfo`
301
+ :class:`DatasetInfo`: Information about the dataset including its
302
+ Scale-generated ID, name, length, associated Models, Slices, and more.
300
303
  """
301
304
  response = self._client.make_request(
302
305
  {}, f"dataset/{self.id}/info", requests.get
@@ -513,7 +516,7 @@ class Dataset:
513
516
  )
514
517
 
515
518
  Parameters:
516
- dataset_items ( \
519
+ items: ( \
517
520
  Union[ \
518
521
  Sequence[:class:`DatasetItem`], \
519
522
  Sequence[:class:`LidarScene`] \
@@ -527,13 +530,12 @@ class Dataset:
527
530
  asynchronous: Whether or not to process the upload asynchronously (and
528
531
  return an :class:`AsyncJob` object). This is required when uploading
529
532
  scenes. Default is False.
530
- files_per_upload_request: How large to make each upload request when your
531
- files are local. If you get timeouts, you may need to lower this from
532
- its default of 10. The default is 10.
533
- local_file_upload_concurrency: How many local file requests to send
534
- concurrently. If you start to see gateway timeouts or cloudflare related
535
- errors, you may need to lower this from its default of 30.
536
-
533
+ files_per_upload_request: Optional; default is 10. We recommend lowering
534
+ this if you encounter timeouts.
535
+ local_files_per_upload_request: Optional; default is 10. We recommend
536
+ lowering this if you encounter timeouts.
537
+ local_file_upload_concurrency: Optional; default is 30. We recommend
538
+ lowering this if you encounter gateway timeouts or Cloudflare errors.
537
539
  Returns:
538
540
  For scenes
539
541
  If synchronous, returns a payload describing the upload result::
@@ -548,7 +550,8 @@ class Dataset:
548
550
 
549
551
  Otherwise, returns an :class:`AsyncJob` object.
550
552
  For images
551
- If synchronous returns UploadResponse otherwise :class:`AsyncJob`
553
+ If synchronous returns :class:`nucleus.upload_response.UploadResponse`
554
+ otherwise :class:`AsyncJob`
552
555
  """
553
556
  assert (
554
557
  batch_size is None or batch_size < 30
@@ -707,7 +710,7 @@ class Dataset:
707
710
  return response
708
711
 
709
712
  def iloc(self, i: int) -> dict:
710
- """Retrieves dataset item by absolute numerical index.
713
+ """Fetches dataset item and associated annotations by absolute numerical index.
711
714
 
712
715
  Parameters:
713
716
  i: Absolute numerical index of the dataset item within the dataset.
@@ -735,7 +738,7 @@ class Dataset:
735
738
 
736
739
  @sanitize_string_args
737
740
  def refloc(self, reference_id: str) -> dict:
738
- """Retrieves a dataset item by reference ID.
741
+ """Fetches a dataset item and associated annotations by reference ID.
739
742
 
740
743
  Parameters:
741
744
  reference_id: User-defined reference ID of the dataset item.
@@ -762,7 +765,7 @@ class Dataset:
762
765
  return format_dataset_item_response(response)
763
766
 
764
767
  def loc(self, dataset_item_id: str) -> dict:
765
- """Retrieves a dataset item by Nucleus-generated ID.
768
+ """Fetches a dataset item and associated annotations by Nucleus-generated ID.
766
769
 
767
770
  Parameters:
768
771
  dataset_item_id: Nucleus-generated dataset item ID (starts with ``di_``).
@@ -790,7 +793,7 @@ class Dataset:
790
793
  return format_dataset_item_response(response)
791
794
 
792
795
  def ground_truth_loc(self, reference_id: str, annotation_id: str):
793
- """Fetches a single ground truth annotation by id.
796
+ """Fetches a single ground truth annotation by ID.
794
797
 
795
798
  Parameters:
796
799
  reference_id: User-defined reference ID of the dataset item associated
@@ -856,9 +859,9 @@ class Dataset:
856
859
 
857
860
  @sanitize_string_args
858
861
  def delete_scene(self, reference_id: str):
859
- """Deletes a Scene associated with the Dataset
862
+ """Deletes a sene from the Dataset by scene reference ID.
860
863
 
861
- All items, annotations and predictions associated with the scene will be
864
+ All items, annotations, and predictions associated with the scene will be
862
865
  deleted as well.
863
866
 
864
867
  Parameters:
@@ -917,21 +920,25 @@ class Dataset:
917
920
  client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
918
921
  dataset = client.get_dataset("YOUR_DATASET_ID")
919
922
 
920
- embeddings = {
923
+ all_embeddings = {
921
924
  "reference_id_0": [0.1, 0.2, 0.3],
922
925
  "reference_id_1": [0.4, 0.5, 0.6],
923
- } # uploaded to s3 with the below URL
926
+ ...
927
+ "reference_id_10000": [0.7, 0.8, 0.9]
928
+ } # sharded and uploaded to s3 with the two below URLs
924
929
 
925
- embeddings_url = "s3://dataset/embeddings_map.json"
930
+ embeddings_url_1 = "s3://dataset/embeddings_map_1.json"
931
+ embeddings_url_2 = "s3://dataset/embeddings_map_2.json"
926
932
 
927
933
  response = dataset.create_custom_index(
928
- embeddings_url=[embeddings_url],
934
+ embeddings_url=[embeddings_url_1, embeddings_url_2],
929
935
  embedding_dim=3
930
936
  )
931
937
 
932
938
  Parameters:
933
939
  embeddings_urls: List of URLs, each of which pointing to
934
- a JSON mapping reference_id -> embedding vector.
940
+ a JSON mapping reference_id -> embedding vector. Each embedding JSON must
941
+ contain <5000 rows.
935
942
  embedding_dim: The dimension of the embedding vectors. Must be consistent
936
943
  across all embedding vectors in the index.
937
944
 
@@ -967,6 +974,11 @@ class Dataset:
967
974
  def set_primary_index(self, image: bool = True, custom: bool = False):
968
975
  """Sets the primary index used for Autotag and Similarity Search on this dataset.
969
976
 
977
+ Parameters:
978
+ image: Whether to configure the primary index for images or objects.
979
+ Default is True (set primary image index).
980
+ custom: Whether to set the primary index to use custom or Nucleus-generated
981
+ embeddings. Default is True (use custom embeddings as the primary index).
970
982
  Returns:
971
983
 
972
984
  {
@@ -1055,7 +1067,7 @@ class Dataset:
1055
1067
  This endpoint is limited to index up to 2 million images at a time and the
1056
1068
  job will fail for payloads that exceed this limit.
1057
1069
 
1058
- Response:
1070
+ Returns:
1059
1071
  :class:`AsyncJob`: Asynchronous job object to track processing status.
1060
1072
  """
1061
1073
  response = self._client.create_image_index(self.id)
@@ -1096,7 +1108,7 @@ class Dataset:
1096
1108
  in the absence of ``model_run_id``.
1097
1109
 
1098
1110
  Returns:
1099
- Payload containing an :class:`AsyncJob` object to monitor progress.
1111
+ :class:`AsyncJob`: Asynchronous job object to track processing status.
1100
1112
  """
1101
1113
  response = self._client.create_object_index(
1102
1114
  self.id, model_run_id, gt_only
@@ -1111,11 +1123,15 @@ class Dataset:
1111
1123
  update: bool = False,
1112
1124
  ):
1113
1125
  """Creates a new taxonomy.
1126
+
1127
+ At the moment we only support taxonomies for category annotations and
1128
+ predictions.
1129
+
1114
1130
  ::
1115
1131
 
1116
1132
  import nucleus
1117
1133
  client = nucleus.NucleusClient("YOUR_SCALE_API_KEY")
1118
- dataset = client.get_dataset("YOUR_DATASET_ID")
1134
+ dataset = client.get_dataset("ds_bwkezj6g5c4g05gqp1eg")
1119
1135
 
1120
1136
  response = dataset.add_taxonomy(
1121
1137
  taxonomy_name="clothing_type",
@@ -1128,12 +1144,23 @@ class Dataset:
1128
1144
  taxonomy_name: The name of the taxonomy. Taxonomy names must be
1129
1145
  unique within a dataset.
1130
1146
  taxonomy_type: The type of this taxonomy as a string literal.
1131
- Currently, the only supported taxonomy type is "category".
1147
+ Currently, the only supported taxonomy type is "category."
1132
1148
  labels: The list of possible labels for the taxonomy.
1133
- update: Whether or not to update taxonomy labels on taxonomy name collision. Default is False. Note that taxonomy labels will not be deleted on update, they can only be appended.
1149
+ update: Whether or not to update taxonomy labels on taxonomy name
1150
+ collision. Default is False. Note that taxonomy labels will not be
1151
+ deleted on update, they can only be appended.
1134
1152
 
1135
1153
  Returns:
1136
- Returns a response with dataset_id, taxonomy_name and status of the add taxonomy operation.
1154
+ Returns a response with dataset_id, taxonomy_name, and status of the
1155
+ add taxonomy operation.
1156
+
1157
+ ::
1158
+
1159
+ {
1160
+ "dataset_id": str,
1161
+ "taxonomy_name": str,
1162
+ "status": "Taxonomy created"
1163
+ }
1137
1164
  """
1138
1165
  return self._client.make_request(
1139
1166
  construct_taxonomy_payload(
@@ -1149,13 +1176,23 @@ class Dataset:
1149
1176
  ):
1150
1177
  """Deletes the given taxonomy.
1151
1178
 
1152
- All annotations and predictions associated with the taxonomy will be deleted as well.
1179
+ All annotations and predictions associated with the taxonomy will be
1180
+ deleted as well.
1153
1181
 
1154
1182
  Parameters:
1155
1183
  taxonomy_name: The name of the taxonomy.
1156
1184
 
1157
1185
  Returns:
1158
- Returns a response with dataset_id, taxonomy_name and status of the delete taxonomy operation.
1186
+ Returns a response with dataset_id, taxonomy_name, and status of the
1187
+ delete taxonomy operation.
1188
+
1189
+ ::
1190
+
1191
+ {
1192
+ "dataset_id": str,
1193
+ "taxonomy_name": str,
1194
+ "status": "Taxonomy successfully deleted"
1195
+ }
1159
1196
  """
1160
1197
  return self._client.make_request(
1161
1198
  {},
@@ -1166,7 +1203,7 @@ class Dataset:
1166
1203
  def items_and_annotations(
1167
1204
  self,
1168
1205
  ) -> List[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
1169
- """Returns a list of all DatasetItems and Annotations in this slice.
1206
+ """Returns a list of all DatasetItems and Annotations in this dataset.
1170
1207
 
1171
1208
  Returns:
1172
1209
  A list of dicts, each with two keys representing a row in the dataset::
@@ -1178,9 +1215,9 @@ class Dataset:
1178
1215
  "cuboid": Optional[List[CuboidAnnotation]],
1179
1216
  "line": Optional[List[LineAnnotation]],
1180
1217
  "polygon": Optional[List[PolygonAnnotation]],
1181
- "keypoints": Optional[List[KeypointsAnnotation]],
1182
1218
  "segmentation": Optional[List[SegmentationAnnotation]],
1183
1219
  "category": Optional[List[CategoryAnnotation]],
1220
+ "keypoints": Optional[List[KeypointsAnnotation]],
1184
1221
  }
1185
1222
  }]
1186
1223
  """
@@ -1191,6 +1228,39 @@ class Dataset:
1191
1228
  )
1192
1229
  return convert_export_payload(api_payload[EXPORTED_ROWS])
1193
1230
 
1231
+ def items_and_annotation_generator(
1232
+ self,
1233
+ ) -> Iterable[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
1234
+ """Provides a generator of all DatasetItems and Annotations in the dataset.
1235
+
1236
+ Returns:
1237
+ Generator where each element is a dict containing the DatasetItem
1238
+ and all of its associated Annotations, grouped by type.
1239
+ ::
1240
+
1241
+ Iterable[{
1242
+ "item": DatasetItem,
1243
+ "annotations": {
1244
+ "box": List[BoxAnnotation],
1245
+ "polygon": List[PolygonAnnotation],
1246
+ "cuboid": List[CuboidAnnotation],
1247
+ "line": Optional[List[LineAnnotation]],
1248
+ "segmentation": List[SegmentationAnnotation],
1249
+ "category": List[CategoryAnnotation],
1250
+ "keypoints": List[KeypointsAnnotation],
1251
+ }
1252
+ }]
1253
+ """
1254
+ json_generator = paginate_generator(
1255
+ client=self._client,
1256
+ endpoint=f"dataset/{self.id}/exportForTrainingPage",
1257
+ result_key=EXPORT_FOR_TRAINING_KEY,
1258
+ page_size=100000,
1259
+ )
1260
+ for data in json_generator:
1261
+ for ia in convert_export_payload([data], has_predictions=False):
1262
+ yield ia
1263
+
1194
1264
  def export_embeddings(
1195
1265
  self,
1196
1266
  ) -> List[Dict[str, Union[str, List[float]]]]:
@@ -1213,18 +1283,15 @@ class Dataset:
1213
1283
  return api_payload # type: ignore
1214
1284
 
1215
1285
  def delete_annotations(
1216
- self, reference_ids: list = None, keep_history=True
1286
+ self, reference_ids: list = None, keep_history: bool = True
1217
1287
  ) -> AsyncJob:
1218
1288
  """Deletes all annotations associated with the specified item reference IDs.
1219
1289
 
1220
1290
  Parameters:
1221
1291
  reference_ids: List of user-defined reference IDs of the dataset items
1222
1292
  from which to delete annotations. Defaults to an empty list.
1223
- keep_history: Whether to preserve version history. If False, all
1224
- previous versions will be deleted along with the annotations. If
1225
- True, the version history (including deletion) wil persist.
1226
- Default is True.
1227
-
1293
+ keep_history: Whether to preserve version history. We recommend
1294
+ skipping this parameter and using the default value of True.
1228
1295
  Returns:
1229
1296
  :class:`AsyncJob`: Empty payload response.
1230
1297
  """
@@ -1245,7 +1312,7 @@ class Dataset:
1245
1312
  """Fetches a single scene in the dataset by its reference ID.
1246
1313
 
1247
1314
  Parameters:
1248
- reference_id: User-defined reference ID of the scene.
1315
+ reference_id: The user-defined reference ID of the scene to fetch.
1249
1316
 
1250
1317
  Returns:
1251
1318
  :class:`Scene<LidarScene>`: A scene object containing frames, which
@@ -1272,6 +1339,8 @@ class Dataset:
1272
1339
  :class:`PolygonPrediction`, \
1273
1340
  :class:`CuboidPrediction`, \
1274
1341
  :class:`SegmentationPrediction` \
1342
+ :class:`CategoryPrediction`, \
1343
+ :class:`KeypointsPrediction`, \
1275
1344
  ]]: List of prediction objects from the model.
1276
1345
 
1277
1346
  """
@@ -1518,9 +1587,15 @@ class Dataset:
1518
1587
  index (int): Absolute index of the dataset item within the dataset.
1519
1588
 
1520
1589
  Returns:
1521
- Dict[str, List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction,
1522
- SegmentationPrediction, CategoryPrediction]]]: Dictionary mapping prediction
1523
- type to a list of such prediction objects from the given model::
1590
+ List[Union[\
1591
+ :class:`BoxPrediction`, \
1592
+ :class:`PolygonPrediction`, \
1593
+ :class:`CuboidPrediction`, \
1594
+ :class:`SegmentationPrediction` \
1595
+ :class:`CategoryPrediction`, \
1596
+ :class:`KeypointsPrediction`, \
1597
+ ]]: Dictionary mapping prediction type to a list of such prediction
1598
+ objects from the given model::
1524
1599
 
1525
1600
  {
1526
1601
  "box": List[BoxPrediction],
@@ -1528,6 +1603,7 @@ class Dataset:
1528
1603
  "cuboid": List[CuboidPrediction],
1529
1604
  "segmentation": List[SegmentationPrediction],
1530
1605
  "category": List[CategoryPrediction],
1606
+ "keypoints": List[KeypointsPrediction],
1531
1607
  }
1532
1608
  """
1533
1609
  return format_prediction_response(
@@ -1547,9 +1623,15 @@ class Dataset:
1547
1623
  all predictions.
1548
1624
 
1549
1625
  Returns:
1550
- Dict[str, List[Union[BoxPrediction, PolygonPrediction, CuboidPrediction,
1551
- SegmentationPrediction, CategoryPrediction]]]: Dictionary mapping prediction
1552
- type to a list of such prediction objects from the given model::
1626
+ List[Union[\
1627
+ :class:`BoxPrediction`, \
1628
+ :class:`PolygonPrediction`, \
1629
+ :class:`CuboidPrediction`, \
1630
+ :class:`SegmentationPrediction` \
1631
+ :class:`CategoryPrediction`, \
1632
+ :class:`KeypointsPrediction`, \
1633
+ ]]: Dictionary mapping prediction type to a list of such prediction
1634
+ objects from the given model::
1553
1635
 
1554
1636
  {
1555
1637
  "box": List[BoxPrediction],
@@ -1557,6 +1639,7 @@ class Dataset:
1557
1639
  "cuboid": List[CuboidPrediction],
1558
1640
  "segmentation": List[SegmentationPrediction],
1559
1641
  "category": List[CategoryPrediction],
1642
+ "keypoints": List[KeypointsPrediction],
1560
1643
  }
1561
1644
  """
1562
1645
  return format_prediction_response(
@@ -1583,6 +1666,7 @@ class Dataset:
1583
1666
  :class:`CuboidPrediction`, \
1584
1667
  :class:`SegmentationPrediction` \
1585
1668
  :class:`CategoryPrediction` \
1669
+ :class:`KeypointsPrediction` \
1586
1670
  ]: Model prediction object with the specified annotation ID.
1587
1671
  """
1588
1672
  return from_json(
@@ -1639,7 +1723,7 @@ class Dataset:
1639
1723
  def update_scene_metadata(self, mapping: Dict[str, dict]):
1640
1724
  """
1641
1725
  Update (merge) scene metadata for each reference_id given in the mapping.
1642
- The backed will join the specified mapping metadata to the exisiting metadata.
1726
+ The backend will join the specified mapping metadata to the existing metadata.
1643
1727
  If there is a key-collision, the value given in the mapping will take precedence.
1644
1728
 
1645
1729
  Args:
@@ -1660,7 +1744,7 @@ class Dataset:
1660
1744
  def update_item_metadata(self, mapping: Dict[str, dict]):
1661
1745
  """
1662
1746
  Update (merge) dataset item metadata for each reference_id given in the mapping.
1663
- The backed will join the specified mapping metadata to the exisiting metadata.
1747
+ The backend will join the specified mapping metadata to the existing metadata.
1664
1748
  If there is a key-collision, the value given in the mapping will take precedence.
1665
1749
 
1666
1750
  This method may also be used to udpate the `camera_params` for a particular set of items.