scale-nucleus 0.17.3__py3-none-any.whl → 0.17.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nucleus/dataset.py CHANGED
@@ -104,7 +104,7 @@ from .slice import (
104
104
  from .upload_response import UploadResponse
105
105
 
106
106
  if TYPE_CHECKING:
107
- from . import NucleusClient
107
+ from . import Model, NucleusClient
108
108
 
109
109
  # TODO: refactor to reduce this file to under 1000 lines.
110
110
  # pylint: disable=C0302
@@ -1449,6 +1449,49 @@ class Dataset:
1449
1449
  )
1450
1450
  return convert_export_payload(api_payload[EXPORTED_ROWS])
1451
1451
 
1452
+ def scene_and_annotation_generator(self, page_size=10):
1453
+ """Provides a generator of all DatasetItems and Annotations in the dataset grouped by scene.
1454
+
1455
+
1456
+ Returns:
1457
+ Generator where each element is a nested dict (representing a JSON) structured in the following way:
1458
+
1459
+ Iterable[{
1460
+ "file_location": str,
1461
+ "metadata": Dict[str, Any],
1462
+ "annotations": {
1463
+ "{trackId}": {
1464
+ "label": str,
1465
+ "name": str,
1466
+ "frames": List[{
1467
+ "left": int,
1468
+ "top": int,
1469
+ "width": int,
1470
+ "height": int,
1471
+ "key": str, # frame key
1472
+ "metadata": Dict[str, Any]
1473
+ }]
1474
+ }
1475
+ }
1476
+ }]
1477
+
1478
+ This is similar to how the Scale API returns task data
1479
+ """
1480
+
1481
+ if page_size > 30:
1482
+ raise ValueError("Page size must be less than or equal to 30")
1483
+
1484
+ endpoint_name = "exportForTrainingByScene"
1485
+ json_generator = paginate_generator(
1486
+ client=self._client,
1487
+ endpoint=f"dataset/{self.id}/{endpoint_name}",
1488
+ result_key=EXPORT_FOR_TRAINING_KEY,
1489
+ page_size=page_size,
1490
+ )
1491
+
1492
+ for data in json_generator:
1493
+ yield data
1494
+
1452
1495
  def items_and_annotation_generator(
1453
1496
  self,
1454
1497
  query: Optional[str] = None,
@@ -2303,3 +2346,41 @@ class Dataset:
2303
2346
 
2304
2347
  else:
2305
2348
  print(f"Did not find any items in {dirname}.")
2349
+
2350
+ def upload_lidar_semseg_predictions(
2351
+ self,
2352
+ model: "Model",
2353
+ pointcloud_ref_id: str,
2354
+ predictions_s3_path: str,
2355
+ ):
2356
+ """Upload Lidar Semantic Segmentation predictions for a given point-cloud.
2357
+
2358
+ Assuming a point-cloud with only 4 points (three labeled as Car, one labeled as Person),
2359
+ the contents of the predictions s3 object should be formatted as such:
2360
+
2361
+ .. code-block:: json
2362
+
2363
+ {
2364
+ "objects": [
2365
+ { "label": "Car", "index": 1},
2366
+ { "label": "Person", "index": 2}
2367
+ ],
2368
+ "point_objects": [1, 1, 1, 2],
2369
+ "point_confidence": [0.5, 0.9, 0.9, 0.3]
2370
+ }
2371
+
2372
+ The order of the points in the `"point_objects"` should be in the same order as the points that
2373
+ were originally uploaded to Scale.
2374
+
2375
+ Parameters:
2376
+ model (:class:`Model`): Nucleus model used to store these predictions
2377
+ pointcloud_ref_id (str): The reference ID of the pointcloud for which these predictions belong to
2378
+ predictions_s3_path (str): S3 path to where the predictions are stored
2379
+
2380
+ """
2381
+
2382
+ return self._client.make_request(
2383
+ payload={"pointsSegmentationUrl": predictions_s3_path},
2384
+ route=f"dataset/{self.id}/model/{model.id}/pointcloud/{pointcloud_ref_id}/uploadLSSPrediction",
2385
+ requests_command=requests.post,
2386
+ )
nucleus/model.py CHANGED
@@ -211,7 +211,9 @@ class Model:
211
211
  )
212
212
  return AsyncJob.from_json(response, self._client)
213
213
 
214
- def run(self, dataset_id: str, slice_id: Optional[str]) -> str:
214
+ def run(
215
+ self, dataset_id: str, model_run_name: str, slice_id: Optional[str]
216
+ ) -> str:
215
217
  """Runs inference on the bundle associated with the model on the dataset. ::
216
218
 
217
219
  import nucleus
@@ -222,11 +224,18 @@ class Model:
222
224
 
223
225
  Args:
224
226
  dataset_id: The ID of the dataset to run inference on.
225
- job_id: The ID of the :class:`AsyncJob` used to track job progress.
227
+ model_run_name: The name of the model run.
226
228
  slice_id: The ID of the slice of the dataset to run inference on.
229
+
230
+ Returns:
231
+ job_id: The ID of the :class:`AsyncJob` used to track job progress.
227
232
  """
228
233
  response = self._client.make_request(
229
- {"dataset_id": dataset_id, "slice_id": slice_id},
234
+ {
235
+ "dataset_id": dataset_id,
236
+ "slice_id": slice_id,
237
+ "model_run_name": model_run_name,
238
+ },
230
239
  f"model/run/{self.id}/",
231
240
  requests_command=requests.post,
232
241
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scale-nucleus
3
- Version: 0.17.3
3
+ Version: 0.17.6
4
4
  Summary: The official Python client library for Nucleus, the Data Platform for AI
5
5
  Home-page: https://scale.com/nucleus
6
6
  License: MIT
@@ -26,7 +26,7 @@ nucleus/data_transfer_object/dataset_info.py,sha256=5P_gpvAyaqXxj2ZQuzLkGN2XROaN
26
26
  nucleus/data_transfer_object/dataset_size.py,sha256=oe-dXaMLpsQRDcJQRZ9Ja8JTagYz4dviZuTognEylp0,111
27
27
  nucleus/data_transfer_object/job_status.py,sha256=hxvyNdrdVdj3UpEfwvryKC_QCJQEC9ru6IPjhPFcK44,2038
28
28
  nucleus/data_transfer_object/scenes_list.py,sha256=iTHE6vA47bRB6ciyEU4LArUXEXco4ArnGvZTGTeK8xs,432
29
- nucleus/dataset.py,sha256=LdhT-wGToWySUEZBgLSdXiNxSIIfeNeoh6IWRLKmBbQ,89824
29
+ nucleus/dataset.py,sha256=ekYreXpUY2kUyKJLJEopNXfezc0u9EzQyWlAYAtt3-8,92751
30
30
  nucleus/dataset_item.py,sha256=y9ia47i31lX2wvw6EkVAxeHburMrrZpuyjEGlstWa2A,10166
31
31
  nucleus/dataset_item_uploader.py,sha256=BD0FTgimEFYmDbnOLIaQZS3OLDfLe5wumADDmgMX598,6684
32
32
  nucleus/deprecation_warning.py,sha256=5C9dVusR5UkUQnW2MrRkIXCfbc8ULc7xOaB134agNKk,976
@@ -52,7 +52,7 @@ nucleus/metrics/segmentation_loader.py,sha256=SdEhEYB5azCWp3iR8UaW-MXB23O-NQSTkE
52
52
  nucleus/metrics/segmentation_metrics.py,sha256=rvjfFeyK-4ZEIgxl6nelYyDkAr767WjNTnVFkcQHDh8,29556
53
53
  nucleus/metrics/segmentation_to_poly_metrics.py,sha256=92SuotttylxsTgebm3476wN21EJM19MT4rnjmiOlb68,29107
54
54
  nucleus/metrics/segmentation_utils.py,sha256=AkqCbyim67K9DA0VQYOUpYHe8vOwSvanFqWB1oOz1sU,10368
55
- nucleus/model.py,sha256=ZYGXfwZVRiM6aPB1CexiwE605AJeXmkRFq28y_5lOpE,10954
55
+ nucleus/model.py,sha256=qZ8D7ZS0HobheEME5fZsdfWqjs472U32Q8_4RVuFZH8,11159
56
56
  nucleus/model_run.py,sha256=WtGy8cD86M_6aVNAp0ELZgunwbztNeOO8nv8ZSpbwhY,9280
57
57
  nucleus/package_not_installed.py,sha256=1ae0aqKAM3KrB0C-5MuPPXoz9tLWJUKtP1UZ-vw9Zik,1117
58
58
  nucleus/payload_constructor.py,sha256=EI5VDt5GG4jlJMI5k4vRve0GQ-zxJ79InnyRrjf5ZUY,4887
@@ -85,8 +85,8 @@ nucleus/validate/scenario_test.py,sha256=pCmM157dblSciZCDTw-f47Fpy3OUZFgXmokdhIL
85
85
  nucleus/validate/scenario_test_evaluation.py,sha256=Q0WzaEE9uUbPVc4EHlCoKjhJcqMNt4QbyiiJx12VOR0,4075
86
86
  nucleus/validate/scenario_test_metric.py,sha256=AhVFOB1ULwBqlZ2X_Au1TXy4iQELljtzR4ZpeLB35So,1209
87
87
  nucleus/validate/utils.py,sha256=VjdIJj9Pii4z4L6xbvClAc7ra_J7cX0vWB_J2X6yrGE,185
88
- scale_nucleus-0.17.3.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
89
- scale_nucleus-0.17.3.dist-info/METADATA,sha256=LgJefNOXq0dWMpOsMJCQywR19rRqn1mN5HNNSdJb5gA,7920
90
- scale_nucleus-0.17.3.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
91
- scale_nucleus-0.17.3.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
92
- scale_nucleus-0.17.3.dist-info/RECORD,,
88
+ scale_nucleus-0.17.6.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
89
+ scale_nucleus-0.17.6.dist-info/METADATA,sha256=HHN0a8wl3wVYBELMNFiG5uQ1-59ke402OCTm_dkty6Q,7920
90
+ scale_nucleus-0.17.6.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
91
+ scale_nucleus-0.17.6.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
92
+ scale_nucleus-0.17.6.dist-info/RECORD,,