scale-nucleus 0.15.10b0__py3-none-any.whl → 0.16.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nucleus/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  __all__ = [
4
4
  "AsyncJob",
5
+ "EmbeddingsExportJob",
5
6
  "BoxAnnotation",
6
7
  "BoxPrediction",
7
8
  "CameraParams",
@@ -68,7 +69,7 @@ from .annotation import (
68
69
  Segment,
69
70
  SegmentationAnnotation,
70
71
  )
71
- from .async_job import AsyncJob
72
+ from .async_job import AsyncJob, EmbeddingsExportJob
72
73
  from .camera_params import CameraParams
73
74
  from .connection import Connection
74
75
  from .constants import (
@@ -170,7 +171,7 @@ class NucleusClient:
170
171
  self,
171
172
  api_key: Optional[str] = None,
172
173
  use_notebook: bool = False,
173
- endpoint: str = None,
174
+ endpoint: Optional[str] = None,
174
175
  ):
175
176
  self.api_key = self._set_api_key(api_key)
176
177
  self.tqdm_bar = tqdm.tqdm
@@ -236,7 +237,7 @@ class NucleusClient:
236
237
  def jobs(
237
238
  self,
238
239
  ) -> List[AsyncJob]:
239
- """Lists all jobs, see NucleusClinet.list_jobs(...) for advanced options
240
+ """Lists all jobs, see NucleusClient.list_jobs(...) for advanced options
240
241
 
241
242
  Returns:
242
243
  List of all AsyncJobs
@@ -343,7 +344,9 @@ class NucleusClient:
343
344
  return AsyncJob.from_json(payload=payload, client=self)
344
345
 
345
346
  def get_model(
346
- self, model_id: str = None, model_run_id: str = None
347
+ self,
348
+ model_id: Optional[str] = None,
349
+ model_run_id: Optional[str] = None,
347
350
  ) -> Model:
348
351
  """Fetches a model by its ID.
349
352
 
@@ -388,7 +391,10 @@ class NucleusClient:
388
391
  )
389
392
 
390
393
  def create_dataset_from_project(
391
- self, project_id: str, last_n_tasks: int = None, name: str = None
394
+ self,
395
+ project_id: str,
396
+ last_n_tasks: Optional[int] = None,
397
+ name: Optional[str] = None,
392
398
  ) -> Dataset:
393
399
  """Create a new dataset from an existing Scale or Rapid project.
394
400
 
@@ -922,7 +928,10 @@ class NucleusClient:
922
928
 
923
929
  @deprecated("Prefer calling Dataset.delete_annotations instead.")
924
930
  def delete_annotations(
925
- self, dataset_id: str, reference_ids: list = None, keep_history=True
931
+ self,
932
+ dataset_id: str,
933
+ reference_ids: Optional[list] = None,
934
+ keep_history=True,
926
935
  ) -> AsyncJob:
927
936
  dataset = self.get_dataset(dataset_id)
928
937
  return dataset.delete_annotations(reference_ids, keep_history)
nucleus/async_job.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import time
2
2
  from dataclasses import dataclass
3
- from typing import Dict, List
3
+ from enum import Enum
4
+ from typing import Dict, List, Set
4
5
 
5
6
  import requests
6
7
 
@@ -16,6 +17,29 @@ from nucleus.utils import replace_double_slashes
16
17
  JOB_POLLING_INTERVAL = 5
17
18
 
18
19
 
20
+ class JobStatus(str, Enum):
21
+ QUEUED = "Queued"
22
+ RUNNING = "Running"
23
+ COMPLETED = "Completed"
24
+ ERRORED_DEPRECATED = "Errored"
25
+ ERRORED_SERVER = "Errored_Server" # Server Error
26
+ ERRORED_USER = "Errored_User" # User Error
27
+ ERRORED_PARTIAL = "Errored_Partial" # Partially Completed
28
+ ERRORED_HANGING = "Errored_Hanging" # Hanging
29
+ CANCELLED = "Cancelled"
30
+ RETRIED = "Retried"
31
+
32
+
33
+ JOB_ERROR_PREFIX = JobStatus.ERRORED_DEPRECATED
34
+ JOB_ERROR_STATES: Set[JobStatus] = {
35
+ JobStatus.ERRORED_DEPRECATED,
36
+ JobStatus.ERRORED_SERVER,
37
+ JobStatus.ERRORED_USER,
38
+ JobStatus.ERRORED_PARTIAL,
39
+ JobStatus.ERRORED_HANGING,
40
+ }
41
+
42
+
19
43
  @dataclass
20
44
  class AsyncJob:
21
45
  """Object used to check the status or errors of a long running asynchronous operation.
@@ -116,9 +140,25 @@ class AsyncJob:
116
140
  f"Finished at {time.perf_counter() - start_time} s: {status}"
117
141
  )
118
142
  final_status = status
119
- if final_status["status"] == "Errored":
143
+ if final_status["status"] in JOB_ERROR_STATES or final_status[
144
+ "status"
145
+ ].startswith(JOB_ERROR_PREFIX):
120
146
  raise JobError(final_status, self)
121
147
 
148
+ @classmethod
149
+ def from_id(cls, job_id: str, client: "NucleusClient"): # type: ignore # noqa: F821
150
+ """Creates a job instance from a specific job Id.
151
+
152
+ Parameters:
153
+ job_id: Defines the job Id
154
+ client: The client to use for the request.
155
+
156
+ Returns:
157
+ The specific AsyncMethod (or inherited) instance.
158
+ """
159
+ job = client.get_job(job_id)
160
+ return cls.from_json(job.__dict__, client)
161
+
122
162
  @classmethod
123
163
  def from_json(cls, payload: dict, client):
124
164
  # TODO: make private
@@ -131,6 +171,34 @@ class AsyncJob:
131
171
  )
132
172
 
133
173
 
174
+ class EmbeddingsExportJob(AsyncJob):
175
+ def result_urls(self, wait_for_completion=True) -> List[str]:
176
+ """Gets a list of signed Scale URLs for each embedding batch.
177
+
178
+ Parameters:
179
+ wait_for_completion: Defines whether the call shall wait for
180
+ the job to complete. Defaults to True
181
+
182
+ Returns:
183
+ A list of signed Scale URLs which contain batches of embeddings.
184
+
185
+ The files contain a JSON array of embedding records with the following schema:
186
+ [{
187
+ "reference_id": str,
188
+ "embedding_vector": List[float]
189
+ }]
190
+ """
191
+ if wait_for_completion:
192
+ self.sleep_until_complete(verbose_std_out=False)
193
+
194
+ status = self.status()
195
+
196
+ if status["status"] != "Completed":
197
+ raise JobError(status, self)
198
+
199
+ return status["message"]["result"] # type: ignore
200
+
201
+
134
202
  class JobError(Exception):
135
203
  def __init__(self, job_status: Dict[str, str], job: AsyncJob):
136
204
  final_status_message = job_status["message"]
nucleus/connection.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import time
2
+ from typing import Optional
2
3
 
3
4
  import requests
4
5
 
@@ -11,7 +12,7 @@ from .retry_strategy import RetryStrategy
11
12
  class Connection:
12
13
  """Wrapper of HTTP requests to the Nucleus endpoint."""
13
14
 
14
- def __init__(self, api_key: str, endpoint: str = None):
15
+ def __init__(self, api_key: str, endpoint: Optional[str] = None):
15
16
  self.api_key = api_key
16
17
  self.endpoint = endpoint
17
18
 
nucleus/dataset.py CHANGED
@@ -15,7 +15,7 @@ from typing import (
15
15
  import requests
16
16
 
17
17
  from nucleus.annotation_uploader import AnnotationUploader, PredictionUploader
18
- from nucleus.async_job import AsyncJob
18
+ from nucleus.async_job import AsyncJob, EmbeddingsExportJob
19
19
  from nucleus.prediction import Prediction, from_json
20
20
  from nucleus.track import Track
21
21
  from nucleus.url_utils import sanitize_string_args
@@ -1230,7 +1230,9 @@ class Dataset:
1230
1230
  return AsyncJob.from_json(response, self._client)
1231
1231
 
1232
1232
  def create_object_index(
1233
- self, model_run_id: str = None, gt_only: bool = None
1233
+ self,
1234
+ model_run_id: Optional[str] = None,
1235
+ gt_only: Optional[bool] = None,
1234
1236
  ):
1235
1237
  """Creates or updates object index by generating embeddings for objects that do not already have embeddings.
1236
1238
 
@@ -1419,18 +1421,34 @@ class Dataset:
1419
1421
 
1420
1422
  def export_embeddings(
1421
1423
  self,
1422
- ) -> List[Dict[str, Union[str, List[float]]]]:
1424
+ asynchronous: bool = True,
1425
+ ) -> Union[List[Dict[str, Union[str, List[float]]]], EmbeddingsExportJob]:
1423
1426
  """Fetches a pd.DataFrame-ready list of dataset embeddings.
1424
1427
 
1428
+ Parameters:
1429
+ asynchronous: Whether or not to process the export asynchronously (and
1430
+ return an :class:`EmbeddingsExportJob` object). Default is True.
1431
+
1425
1432
  Returns:
1426
- A list, where each item is a dict with two keys representing a row
1433
+ If synchronous, a list where each item is a dict with two keys representing a row
1427
1434
  in the dataset::
1428
1435
 
1429
1436
  List[{
1430
1437
  "reference_id": str,
1431
1438
  "embedding_vector": List[float]
1432
1439
  }]
1440
+
1441
+ Otherwise, returns an :class:`EmbeddingsExportJob` object.
1433
1442
  """
1443
+ if asynchronous:
1444
+ api_payload = self._client.make_request(
1445
+ payload=None,
1446
+ route=f"dataset/{self.id}/async_export_embeddings",
1447
+ requests_command=requests.post,
1448
+ )
1449
+
1450
+ return EmbeddingsExportJob.from_json(api_payload, self._client)
1451
+
1434
1452
  api_payload = self._client.make_request(
1435
1453
  payload=None,
1436
1454
  route=f"dataset/{self.id}/embeddings",
@@ -1439,7 +1457,7 @@ class Dataset:
1439
1457
  return api_payload # type: ignore
1440
1458
 
1441
1459
  def delete_annotations(
1442
- self, reference_ids: list = None, keep_history: bool = True
1460
+ self, reference_ids: Optional[list] = None, keep_history: bool = True
1443
1461
  ) -> AsyncJob:
1444
1462
  """Deletes all annotations associated with the specified item reference IDs.
1445
1463
 
@@ -1555,7 +1573,9 @@ class Dataset:
1555
1573
  )
1556
1574
  return format_scale_task_info_response(response)
1557
1575
 
1558
- def calculate_evaluation_metrics(self, model, options: dict = None):
1576
+ def calculate_evaluation_metrics(
1577
+ self, model, options: Optional[dict] = None
1578
+ ):
1559
1579
  """Starts computation of evaluation metrics for a model on the dataset.
1560
1580
 
1561
1581
  To update matches and metrics calculated for a model on a given dataset you
nucleus/dataset_item.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import json
2
- import os.path
2
+ import os
3
3
  from collections import Counter
4
4
  from dataclasses import dataclass
5
5
  from enum import Enum
nucleus/job.py CHANGED
@@ -27,6 +27,7 @@ class CustomerJobTypes(str, Enum):
27
27
  CLONE_DATASET = "cloneDataset"
28
28
  METADATA_UPDATE = "metadataUpdate"
29
29
  TRIGGER_EVALUATE = "triggerEvaluate"
30
+ EXPORT_EMBEDDINGS = "exportEmbeddings"
30
31
 
31
32
  def __contains__(self, item):
32
33
  try:
nucleus/logger.py CHANGED
@@ -5,5 +5,5 @@ import requests
5
5
  logger = logging.getLogger(__name__)
6
6
  logging.basicConfig()
7
7
  logging.getLogger(
8
- requests.packages.urllib3.__package__ # pylint: disable=no-member
8
+ requests.packages.urllib3.__package__ # type: ignore # pylint: disable=no-member
9
9
  ).setLevel(logging.ERROR)
@@ -174,7 +174,6 @@ class CuboidPrecision(CuboidMetric):
174
174
  prediction_filters: Optional[
175
175
  Union[ListOfOrAndFilters, ListOfAndFilters]
176
176
  ] = None,
177
- use_2d_iou: bool = False,
178
177
  ):
179
178
  """Initializes CuboidIOU object.
180
179
 
@@ -192,13 +191,11 @@ class CuboidPrecision(CuboidMetric):
192
191
  predicates. The innermost structures each describe a single column predicate. The list of inner predicates is
193
192
  interpreted as a conjunction (AND), forming a more selective and multiple column predicate.
194
193
  Finally, the most outer list combines these filters as a disjunction (OR).
195
- use_2d_iou: whether to use 2D or 3D IOU for precision calculation.
196
194
  """
197
195
  assert (
198
196
  0 <= iou_threshold <= 1
199
197
  ), "IoU threshold must be between 0 and 1."
200
198
  self.iou_threshold = iou_threshold
201
- self.use_2d_iou = use_2d_iou
202
199
  super().__init__(
203
200
  enforce_label_match=enforce_label_match,
204
201
  confidence_threshold=confidence_threshold,
@@ -215,7 +212,6 @@ class CuboidPrecision(CuboidMetric):
215
212
  predictions,
216
213
  annotations,
217
214
  threshold_in_overlap_ratio=self.iou_threshold,
218
- use_2d=self.use_2d_iou,
219
215
  )
220
216
  weight = stats["tp_sum"] + stats["fp_sum"]
221
217
  precision = stats["tp_sum"] / max(weight, sys.float_info.epsilon)
@@ -237,7 +233,6 @@ class CuboidRecall(CuboidMetric):
237
233
  prediction_filters: Optional[
238
234
  Union[ListOfOrAndFilters, ListOfAndFilters]
239
235
  ] = None,
240
- use_2d_iou: bool = False,
241
236
  ):
242
237
  """Initializes CuboidIOU object.
243
238
 
@@ -245,13 +240,11 @@ class CuboidRecall(CuboidMetric):
245
240
  enforce_label_match: whether to enforce that annotation and prediction labels must match. Defaults to True
246
241
  iou_threshold: IOU threshold to consider detection as valid. Must be in [0, 1]. Default 0.0
247
242
  confidence_threshold: minimum confidence threshold for predictions. Must be in [0, 1]. Default 0.0
248
- use_2d_iou: whether to use 2D or 3D IOU for calculation.
249
243
  """
250
244
  assert (
251
245
  0 <= iou_threshold <= 1
252
246
  ), "IoU threshold must be between 0 and 1."
253
247
  self.iou_threshold = iou_threshold
254
- self.use_2d_iou = use_2d_iou
255
248
  super().__init__(
256
249
  enforce_label_match=enforce_label_match,
257
250
  confidence_threshold=confidence_threshold,
@@ -268,7 +261,6 @@ class CuboidRecall(CuboidMetric):
268
261
  predictions,
269
262
  annotations,
270
263
  threshold_in_overlap_ratio=self.iou_threshold,
271
- use_2d_iou=self.use_2d_iou
272
264
  )
273
265
  weight = stats["tp_sum"] + stats["fn_sum"]
274
266
  recall = stats["tp_sum"] / max(weight, sys.float_info.epsilon)
@@ -1,5 +1,5 @@
1
1
  from functools import wraps
2
- from typing import Dict, List, Tuple
2
+ from typing import Dict, List, Optional, Tuple
3
3
 
4
4
  import numpy as np
5
5
 
@@ -176,8 +176,8 @@ def get_batch_cuboid_corners(
176
176
  xyz: "np.ndarray",
177
177
  wlh: "np.ndarray",
178
178
  yaw: "np.ndarray",
179
- pitch: "np.ndarray" = None,
180
- roll: "np.ndarray" = None,
179
+ pitch: Optional["np.ndarray"] = None,
180
+ roll: Optional["np.ndarray"] = None,
181
181
  scale_convention: bool = True,
182
182
  ) -> "np.ndarray":
183
183
  """
@@ -211,7 +211,9 @@ def get_batch_cuboid_corners(
211
211
 
212
212
 
213
213
  def get_batch_rotation_matrices(
214
- yaw: "np.ndarray", pitch: "np.ndarray" = None, roll: "np.ndarray" = None
214
+ yaw: "np.ndarray",
215
+ pitch: Optional["np.ndarray"] = None,
216
+ roll: Optional["np.ndarray"] = None,
215
217
  ) -> "np.ndarray":
216
218
  if pitch is None:
217
219
  pitch = np.zeros_like(yaw)
@@ -245,16 +247,14 @@ def associate_cuboids_on_iou(
245
247
  wlh_1: "np.ndarray",
246
248
  yaw_1: "np.ndarray",
247
249
  threshold_in_overlap_ratio: float = 0.1,
248
- use_2d_iou: bool = False,
249
250
  ) -> List[Tuple[int, int]]:
250
251
  if xyz_0.shape[0] < 1 or xyz_1.shape[0] < 1:
251
252
  return []
252
- iou_3d, iou_2d = compute_outer_iou(xyz_0, wlh_0, yaw_0, xyz_1, wlh_1, yaw_1)
253
- iou = iou_2d if use_2d_iou else iou_3d
253
+ iou_matrix, _ = compute_outer_iou(xyz_0, wlh_0, yaw_0, xyz_1, wlh_1, yaw_1)
254
254
  mapping = []
255
- for i, m in enumerate(iou.max(axis=1)):
255
+ for i, m in enumerate(iou_matrix.max(axis=1)):
256
256
  if m >= threshold_in_overlap_ratio:
257
- mapping.append((i, iou[i].argmax()))
257
+ mapping.append((i, iou_matrix[i].argmax()))
258
258
  return mapping
259
259
 
260
260
 
@@ -262,7 +262,6 @@ def recall_precision(
262
262
  prediction: List[CuboidPrediction],
263
263
  groundtruth: List[CuboidAnnotation],
264
264
  threshold_in_overlap_ratio: float,
265
- use_2d_iou: bool = False,
266
265
  ) -> Dict[str, float]:
267
266
  """
268
267
  Calculates the precision and recall of each lidar frame.
@@ -271,7 +270,6 @@ def recall_precision(
271
270
  :param predictions: list of cuboid annotation predictions.
272
271
  :param ground_truth: list of cuboid annotation groundtruths.
273
272
  :param threshold: IOU threshold to consider detection as valid. Must be in [0, 1].
274
- :param use_2d_iou: flag whether to use 2d or 3d iou for evaluation.
275
273
  """
276
274
 
277
275
  tp_sum = 0
@@ -298,7 +296,6 @@ def recall_precision(
298
296
  gt_items["wlh"],
299
297
  gt_items["yaw"] + np.pi / 2,
300
298
  threshold_in_overlap_ratio=threshold_in_overlap_ratio,
301
- use_2d_iou=use_2d_iou,
302
299
  )
303
300
 
304
301
  for pred_id, gt_id in mapping:
@@ -25,6 +25,7 @@ class InMemoryLoader(SegmentationMaskLoader):
25
25
 
26
26
  def __init__(self, url_to_array: Dict[str, "np.ndarray"]):
27
27
  self.url_to_array = url_to_array
28
+ super().__init__()
28
29
 
29
30
  def fetch(self, url: str):
30
31
  array = self.url_to_array[url]
@@ -115,7 +115,7 @@ class SegmentationMaskToPolyMetric(Metric):
115
115
  if prediction:
116
116
  if self.mode == SegToPolyMode.GENERATE_GT_FROM_POLY:
117
117
  pred_img = self.loader.fetch(prediction.mask_url)
118
- ann_img, segments = rasterize_polygons_to_segmentation_mask(
118
+ ann_img, segments = rasterize_polygons_to_segmentation_mask( # type: ignore
119
119
  annotations.polygon_annotations
120
120
  + annotations.box_annotations, # type:ignore
121
121
  pred_img.shape,
@@ -103,7 +103,7 @@ def fast_confusion_matrix(
103
103
  mask = (label_true >= 0) & (label_true < n_class)
104
104
  hist = np.bincount(
105
105
  n_class * label_true[mask].astype(int) + label_pred[mask],
106
- minlength=n_class ** 2,
106
+ minlength=n_class**2,
107
107
  ).reshape(n_class, n_class)
108
108
  return hist
109
109
 
nucleus/scene.py CHANGED
@@ -596,7 +596,10 @@ class VideoScene(ABC):
596
596
  ), "No list of items is accepted when uploading a video_location unless you are using privacy mode"
597
597
 
598
598
  def add_item(
599
- self, item: DatasetItem, index: int = None, update: bool = False
599
+ self,
600
+ item: DatasetItem,
601
+ index: Optional[int] = None,
602
+ update: bool = False,
600
603
  ) -> None:
601
604
  """Adds DatasetItem to the specified index for videos uploaded as an array of images.
602
605
 
nucleus/slice.py CHANGED
@@ -7,10 +7,11 @@ from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
7
7
  import requests
8
8
 
9
9
  from nucleus.annotation import Annotation
10
- from nucleus.async_job import AsyncJob
10
+ from nucleus.async_job import AsyncJob, EmbeddingsExportJob
11
11
  from nucleus.constants import EXPORT_FOR_TRAINING_KEY, EXPORTED_ROWS, ITEMS_KEY
12
12
  from nucleus.dataset_item import DatasetItem
13
13
  from nucleus.errors import NucleusAPIError
14
+ from nucleus.prediction import Prediction
14
15
  from nucleus.prediction import from_json as prediction_from_json
15
16
  from nucleus.scene import Scene
16
17
  from nucleus.utils import (
@@ -335,7 +336,7 @@ class Slice:
335
336
 
336
337
  def append(
337
338
  self,
338
- reference_ids: List[str] = None,
339
+ reference_ids: Optional[List[str]] = None,
339
340
  ) -> dict:
340
341
  """Appends existing DatasetItems from a Dataset to a Slice.
341
342
 
@@ -458,6 +459,35 @@ class Slice:
458
459
  )
459
460
  return convert_export_payload(api_payload[EXPORTED_ROWS], True)
460
461
 
462
+ def export_raw_json(
463
+ self,
464
+ ) -> List[Union[DatasetItem, Annotation, Prediction, Scene]]:
465
+ """Exports object slices in a raw JSON format. Note that it currently does not support item-level slices.
466
+
467
+ For each object or match in an object slice, this method exports the following information:
468
+ - The item that contains the object.
469
+ - The prediction and/or annotation (both, if the slice is based on IOU matches).
470
+ - If the object is part of a scene, it includes scene-level attributes in the export.
471
+
472
+ Returns:
473
+ An iterable where each element is a dictionary containing JSON-formatted data.
474
+ ::
475
+
476
+ List[{
477
+ "item": DatasetItem (as JSON),
478
+ "annotation": BoxAnnotation/CuboidAnnotation (as JSON)
479
+ "prediction": BoxPrediction/CuboidPrediction (as JSON)
480
+ "scene": Scene (as JSON)
481
+ }
482
+ }]
483
+ """
484
+ api_payload = self._client.make_request(
485
+ payload=None,
486
+ route=f"slice/{self.id}/export_raw_json",
487
+ requests_command=requests.get,
488
+ )
489
+ return api_payload
490
+
461
491
  def export_predictions_generator(
462
492
  self, model
463
493
  ) -> Iterable[Dict[str, Union[DatasetItem, Dict[str, List[Annotation]]]]]:
@@ -570,17 +600,33 @@ class Slice:
570
600
 
571
601
  def export_embeddings(
572
602
  self,
573
- ) -> List[Dict[str, Union[str, List[float]]]]:
603
+ asynchronous: bool = True,
604
+ ) -> Union[List[Dict[str, Union[str, List[float]]]], EmbeddingsExportJob]:
574
605
  """Fetches a pd.DataFrame-ready list of slice embeddings.
575
606
 
607
+ Parameters:
608
+ asynchronous: Whether or not to process the export asynchronously (and
609
+ return an :class:`EmbeddingsExportJob` object). Default is True.
610
+
576
611
  Returns:
577
- A list where each element is a columnar mapping::
612
+ If synchronous, a list where each element is a columnar mapping::
578
613
 
579
614
  List[{
580
615
  "reference_id": str,
581
616
  "embedding_vector": List[float]
582
617
  }]
618
+
619
+ Otherwise, returns an :class:`EmbeddingsExportJob` object.
583
620
  """
621
+ if asynchronous:
622
+ api_payload = self._client.make_request(
623
+ payload=None,
624
+ route=f"dataset/{self.id}/async_export_embeddings",
625
+ requests_command=requests.post,
626
+ )
627
+
628
+ return EmbeddingsExportJob.from_json(api_payload, self._client)
629
+
584
630
  api_payload = self._client.make_request(
585
631
  payload=None,
586
632
  route=f"slice/{self.id}/embeddings",
@@ -144,8 +144,8 @@ _OUTLINE_COLOR = (0, 255, 0, 255)
144
144
  def visualize_box_launch_bundle(
145
145
  img_file: str,
146
146
  load_predict_fn: Callable,
147
- load_model_fn: Callable = None,
148
- model: Any = None,
147
+ load_model_fn: Optional[Callable] = None,
148
+ model: Optional[Any] = None,
149
149
  show_image: bool = False,
150
150
  max_annotations: int = 5,
151
151
  ) -> Image:
@@ -194,8 +194,8 @@ def visualize_box_launch_bundle(
194
194
  def run_category_launch_bundle(
195
195
  img_file: str,
196
196
  load_predict_fn: Callable,
197
- load_model_fn: Callable = None,
198
- model: Any = None,
197
+ load_model_fn: Optional[Callable] = None,
198
+ model: Optional[Any] = None,
199
199
  ):
200
200
  """
201
201
  Run this function locally to test if your image categorization model returns a format consumable by Launch + Nucleus
@@ -218,8 +218,8 @@ def run_category_launch_bundle(
218
218
  def visualize_line_launch_bundle(
219
219
  img_file: str,
220
220
  load_predict_fn: Callable,
221
- load_model_fn: Callable = None,
222
- model: Any = None,
221
+ load_model_fn: Optional[Callable] = None,
222
+ model: Optional[Any] = None,
223
223
  show_image: bool = False,
224
224
  max_annotations: int = 5,
225
225
  ) -> Image:
@@ -266,8 +266,8 @@ def visualize_line_launch_bundle(
266
266
  def visualize_polygon_launch_bundle(
267
267
  img_file: str,
268
268
  load_predict_fn: Callable,
269
- load_model_fn: Callable = None,
270
- model: Any = None,
269
+ load_model_fn: Optional[Callable] = None,
270
+ model: Optional[Any] = None,
271
271
  show_image: bool = False,
272
272
  max_annotations: int = 5,
273
273
  ) -> Image:
nucleus/utils.py CHANGED
@@ -221,6 +221,7 @@ def format_scale_task_info_response(response: dict) -> Union[Dict, List[Dict]]:
221
221
  return ret
222
222
 
223
223
 
224
+ # pylint: disable=too-many-branches
224
225
  def convert_export_payload(api_payload, has_predictions: bool = False):
225
226
  """Helper function to convert raw JSON to API objects
226
227
 
@@ -239,33 +240,66 @@ def convert_export_payload(api_payload, has_predictions: bool = False):
239
240
  if row.get(SEGMENTATION_TYPE) is not None:
240
241
  segmentation = row[SEGMENTATION_TYPE]
241
242
  segmentation[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
242
- annotations[SEGMENTATION_TYPE] = SegmentationAnnotation.from_json(
243
- segmentation
244
- )
243
+ if not has_predictions:
244
+ annotations[
245
+ SEGMENTATION_TYPE
246
+ ] = SegmentationAnnotation.from_json(segmentation)
247
+ else:
248
+ annotations[
249
+ SEGMENTATION_TYPE
250
+ ] = SegmentationPrediction.from_json(segmentation)
245
251
  for polygon in row[POLYGON_TYPE]:
246
252
  polygon[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
247
- annotations[POLYGON_TYPE].append(
248
- PolygonAnnotation.from_json(polygon)
249
- )
253
+ if not has_predictions:
254
+ annotations[POLYGON_TYPE].append(
255
+ PolygonAnnotation.from_json(polygon)
256
+ )
257
+ else:
258
+ annotations[POLYGON_TYPE].append(
259
+ PolygonPrediction.from_json(polygon)
260
+ )
250
261
  for line in row[LINE_TYPE]:
251
262
  line[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
252
- annotations[LINE_TYPE].append(LineAnnotation.from_json(line))
263
+ if not has_predictions:
264
+ annotations[LINE_TYPE].append(LineAnnotation.from_json(line))
265
+ else:
266
+ annotations[LINE_TYPE].append(LinePrediction.from_json(line))
253
267
  for keypoints in row[KEYPOINTS_TYPE]:
254
268
  keypoints[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
255
- annotations[KEYPOINTS_TYPE].append(
256
- KeypointsAnnotation.from_json(keypoints)
257
- )
269
+ if not has_predictions:
270
+ annotations[KEYPOINTS_TYPE].append(
271
+ KeypointsAnnotation.from_json(keypoints)
272
+ )
273
+ else:
274
+ annotations[KEYPOINTS_TYPE].append(
275
+ KeypointsPrediction.from_json(keypoints)
276
+ )
258
277
  for box in row[BOX_TYPE]:
259
278
  box[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
260
- annotations[BOX_TYPE].append(BoxAnnotation.from_json(box))
279
+ if not has_predictions:
280
+ annotations[BOX_TYPE].append(BoxAnnotation.from_json(box))
281
+ else:
282
+ annotations[BOX_TYPE].append(BoxPrediction.from_json(box))
261
283
  for cuboid in row[CUBOID_TYPE]:
262
284
  cuboid[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
263
- annotations[CUBOID_TYPE].append(CuboidAnnotation.from_json(cuboid))
285
+ if not has_predictions:
286
+ annotations[CUBOID_TYPE].append(
287
+ CuboidAnnotation.from_json(cuboid)
288
+ )
289
+ else:
290
+ annotations[CUBOID_TYPE].append(
291
+ CuboidPrediction.from_json(cuboid)
292
+ )
264
293
  for category in row[CATEGORY_TYPE]:
265
294
  category[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
266
- annotations[CATEGORY_TYPE].append(
267
- CategoryAnnotation.from_json(category)
268
- )
295
+ if not has_predictions:
296
+ annotations[CATEGORY_TYPE].append(
297
+ CategoryAnnotation.from_json(category)
298
+ )
299
+ else:
300
+ annotations[CATEGORY_TYPE].append(
301
+ CategoryPrediction.from_json(category)
302
+ )
269
303
  for multicategory in row[MULTICATEGORY_TYPE]:
270
304
  multicategory[REFERENCE_ID_KEY] = row[ITEM_KEY][REFERENCE_ID_KEY]
271
305
  annotations[MULTICATEGORY_TYPE].append(
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scale-nucleus
3
- Version: 0.15.10b0
3
+ Version: 0.16.2
4
4
  Summary: The official Python client library for Nucleus, the Data Platform for AI
5
5
  Home-page: https://scale.com/nucleus
6
6
  License: MIT
7
7
  Author: Scale AI Nucleus Team
8
8
  Author-email: nucleusapi@scaleapi.com
9
- Requires-Python: >=3.6.2,<4.0
9
+ Requires-Python: >=3.7,<4.0
10
10
  Classifier: License :: OSI Approved :: MIT License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.7
@@ -19,11 +19,8 @@ Provides-Extra: metrics
19
19
  Requires-Dist: Pillow (>=7.1.2)
20
20
  Requires-Dist: Shapely (>=1.8.0) ; extra == "metrics"
21
21
  Requires-Dist: aiohttp (>=3.7.4,<4.0.0)
22
- Requires-Dist: astroid (<=2.12) ; python_full_version <= "3.7.0"
23
22
  Requires-Dist: click (>=7.1.2,<9.0)
24
- Requires-Dist: dataclasses (>=0.7,<0.8) ; python_full_version >= "3.6.1" and python_version < "3.7"
25
23
  Requires-Dist: nest-asyncio (>=1.5.1,<2.0.0)
26
- Requires-Dist: numpy (>=1.19.5) ; python_version >= "3.6" and python_version < "4.0"
27
24
  Requires-Dist: numpy (>=1.19.5) ; python_version >= "3.7" and python_version < "3.10"
28
25
  Requires-Dist: numpy (>=1.22.0) ; python_version >= "3.10"
29
26
  Requires-Dist: pydantic (>=1.8.2,<2.0.0)
@@ -33,8 +30,8 @@ Requires-Dist: rasterio (>=1.2.0) ; extra == "metrics"
33
30
  Requires-Dist: requests (>=2.23.0,<3.0.0)
34
31
  Requires-Dist: rich (>=10.15.2)
35
32
  Requires-Dist: scale-launch (>=0.1.0) ; (python_version >= "3.7" and python_version < "4.0") and (extra == "launch")
36
- Requires-Dist: scikit-learn (>=0.24.0)
37
- Requires-Dist: scipy (>=1.4.1)
33
+ Requires-Dist: scikit-learn (>=0.24.0) ; extra == "metrics"
34
+ Requires-Dist: scipy (>=1.4.1) ; extra == "metrics"
38
35
  Requires-Dist: shellingham (>=1.4.0,<2.0.0)
39
36
  Requires-Dist: tqdm (>=4.41.0,<5.0.0)
40
37
  Project-URL: Documentation, https://dashboard.scale.com/nucleus/docs/api
@@ -10,14 +10,14 @@ cli/nu.py,sha256=0f71zPq4fe3I1ghhiSRQi39ENhAzoLPdhz_vh8vxSI8,2074
10
10
  cli/reference.py,sha256=RuHVhmGTZNe0MfwpL96YjJdaH0OJzg98rz4xeIu4hJU,256
11
11
  cli/slices.py,sha256=nxq_Zg1m5oXuhz0ibyHkElvyVWt1AcE9tG-fN4CQxF8,1397
12
12
  cli/tests.py,sha256=NiwEVGuF08_jlCiKEIjKhwq55NvyU4xvPEJW5MJmdZg,4590
13
- nucleus/__init__.py,sha256=OOEIVY83YFAlgez7L2QfZqSnv7aLLraxI7OD9nu8Rf0,44176
13
+ nucleus/__init__.py,sha256=JkJfKvNJbv-uK1wJTcgHrdsk5kroIrL_D3BCWs5gnSE,44351
14
14
  nucleus/annotation.py,sha256=hEA6nbVrKaR3ZuJpbVzdNy0bWzbzD9snjc6D_hEVtjo,42673
15
15
  nucleus/annotation_uploader.py,sha256=hv2Qr5WzwFohlWAnFG9nJ9uFHGhm4sFDl09oPssgVs0,9248
16
- nucleus/async_job.py,sha256=juxzn3xB8HwGDxolANX8Ryo4xM5QCOCaOembCdxyFaQ,4745
16
+ nucleus/async_job.py,sha256=QX8ZAm5Q5uZsfBqZvLLRj32mW7otwJZteYGktvtSwEI,6886
17
17
  nucleus/async_utils.py,sha256=ZUlhjhxdNVZ842TJl32hRcXDkS09vR8S6jwvroZBhsA,6373
18
18
  nucleus/autocurate.py,sha256=kI0vRqad_An8SN5JX6sSdGP_vNHJI2Pq4NINHuhNf2U,1080
19
19
  nucleus/camera_params.py,sha256=fl17aaSAZDAJIWo6F2HFvM6HKGcQh9fXvo4t3RzGMc4,3726
20
- nucleus/connection.py,sha256=xl6Dz8io57GOn02Fbg5btnlMslEgNbijpht7JekZoD4,2862
20
+ nucleus/connection.py,sha256=U7G7h_ufcTYBwqehhKWk899-TTvbRNFzXo7khtb5fy0,2900
21
21
  nucleus/constants.py,sha256=kU_Gkw-91O_NFNEqtkD-1UTbkyZiTGGZcJNe84fH26Y,4582
22
22
  nucleus/data_transfer_object/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  nucleus/data_transfer_object/dataset_details.py,sha256=1YGvfKkPSqDrXK_y5mBXyRThY07tU-nwOCYTkYCSl6k,214
@@ -25,19 +25,19 @@ nucleus/data_transfer_object/dataset_info.py,sha256=5P_gpvAyaqXxj2ZQuzLkGN2XROaN
25
25
  nucleus/data_transfer_object/dataset_size.py,sha256=oe-dXaMLpsQRDcJQRZ9Ja8JTagYz4dviZuTognEylp0,111
26
26
  nucleus/data_transfer_object/job_status.py,sha256=BbIo22-27Ue2-pEeVUvb-M7e-FyDw6rshkeJ6YRZ6VA,1665
27
27
  nucleus/data_transfer_object/scenes_list.py,sha256=iTHE6vA47bRB6ciyEU4LArUXEXco4ArnGvZTGTeK8xs,432
28
- nucleus/dataset.py,sha256=lT7ZGqTvt_PtSvt5eshuCrwtY29OYH2A7Jze6PaAA8o,78756
29
- nucleus/dataset_item.py,sha256=nfLbfcjnf3_8BS300h3Ss6P8vYSa9Yvx59kp9mcT12Q,9622
28
+ nucleus/dataset.py,sha256=rkgPmQJOpPi8fu6e3bD4Jhqftj3Qm9y6tQegEmr_uq4,79499
29
+ nucleus/dataset_item.py,sha256=_ZsTBJ9I7IN5YTos1TVs8R_vMoiXgRFCNiNNQevKvvE,9617
30
30
  nucleus/dataset_item_uploader.py,sha256=oT1HRFPu9J3iu0raKdtLS4fz7q1pO3fttjabgFGBuxc,6696
31
31
  nucleus/deprecation_warning.py,sha256=5C9dVusR5UkUQnW2MrRkIXCfbc8ULc7xOaB134agNKk,976
32
32
  nucleus/errors.py,sha256=xzJ3TBMee1SHbo1PY1z4B63Z_m8S3yHttrHZ9-Ry8Fk,2902
33
- nucleus/job.py,sha256=dJQz7LuBK7epc1COFI183UyC6X98l7HHCSPuqwKtcGA,1732
34
- nucleus/logger.py,sha256=acoFtszu4T-fj_Op4rwlXNNaLPrQ8Kw2ofaYusBHO8I,208
33
+ nucleus/job.py,sha256=aZ0hBHDbwmDxgk3Um3oP2S01vX5fgRtbsuY2421SB_w,1775
34
+ nucleus/logger.py,sha256=jmOMr32JfJyue3ZTdHScx-WG3WIf8pPOndlz9n4jQLA,223
35
35
  nucleus/metadata_manager.py,sha256=dWii8BPNNgpC3rRhJ3UmbgrmeDJm1zC_-8Oh6vGP8g4,2808
36
36
  nucleus/metrics/__init__.py,sha256=bve48T1quN_vh3zhpW1nbv8ZfFBBiT9k6d8cipu-1k0,894
37
37
  nucleus/metrics/base.py,sha256=cwBE1xm-2SN6ULfRwajLwNrC8oaLU4cGxP686EP4KE8,7757
38
38
  nucleus/metrics/categorization_metrics.py,sha256=sGM7Zyz1DLSUEFbaqBFEf4oNXlWvc465dMCFVI5LhcI,11934
39
- nucleus/metrics/cuboid_metrics.py,sha256=pZyNqvcNK8jG0sxxsZr1nKpOGiQtiuvR9nRay-gAoKo,12826
40
- nucleus/metrics/cuboid_utils.py,sha256=-cOfKS85_izR_OS3j3ON-kh3uRF6Bk2ySfUXFsUFpeg,11439
39
+ nucleus/metrics/cuboid_metrics.py,sha256=ZjlFNa8kknx2mJzEiJ8ZvTlvOSW0BjZzzfJiv3WHm-g,12461
40
+ nucleus/metrics/cuboid_utils.py,sha256=ZiQhlrCCsnWIu03X2xG-OaVTIKjmoyQfH5yUBPE0P80,11301
41
41
  nucleus/metrics/custom_types.py,sha256=6oWmlajz229crwEm2ImvC5vOhBXjt-HhVqj41D5uSwk,485
42
42
  nucleus/metrics/errors.py,sha256=Cu2W5tSsIHpazryd-9FXzu46OLC12Wk6gQT66hxQDis,304
43
43
  nucleus/metrics/filtering.py,sha256=X1y8JGKIKYUEVnoIKELuMJJTv6kcN_ZF4BPMCLIIVus,25502
@@ -45,10 +45,10 @@ nucleus/metrics/filters.py,sha256=SegJ2UdYSSnRJpoiqguVpGrDSGfyCK7OiREziunZw54,11
45
45
  nucleus/metrics/metric_utils.py,sha256=1EJf3ezTQAzh4mgECrxWyiUHzUjDqA_5hV9eqZISxRU,971
46
46
  nucleus/metrics/polygon_metrics.py,sha256=mrsGc7714B944NDhLZ6pa0FbyWltuJEkd6ZVccLibdU,29277
47
47
  nucleus/metrics/polygon_utils.py,sha256=olAwKQhkBjVpEWCqvC7sEvXBzYQ44v9SYp9nlPBKgeY,12931
48
- nucleus/metrics/segmentation_loader.py,sha256=u0u8KpYlOzbxKKz-dkboj3g7crpa0pY6MISbNH9t2-A,833
48
+ nucleus/metrics/segmentation_loader.py,sha256=SdEhEYB5azCWp3iR8UaW-MXB23O-NQSTkEzGIJKNeCg,860
49
49
  nucleus/metrics/segmentation_metrics.py,sha256=rvjfFeyK-4ZEIgxl6nelYyDkAr767WjNTnVFkcQHDh8,29556
50
- nucleus/metrics/segmentation_to_poly_metrics.py,sha256=ChN1g8FJwBvhVUt9TStL07ccSGIibPgWMS02Zcg3djw,29091
51
- nucleus/metrics/segmentation_utils.py,sha256=hNv4M_ajoiPpvVmG9hisfz4qcH-cOMseFTWQz03KBdc,10370
50
+ nucleus/metrics/segmentation_to_poly_metrics.py,sha256=92SuotttylxsTgebm3476wN21EJM19MT4rnjmiOlb68,29107
51
+ nucleus/metrics/segmentation_utils.py,sha256=AkqCbyim67K9DA0VQYOUpYHe8vOwSvanFqWB1oOz1sU,10368
52
52
  nucleus/model.py,sha256=4J9MH_byHVEi8XlAZ6qFYHj_N7xc6wySs__KUwjq9og,8744
53
53
  nucleus/model_run.py,sha256=WtGy8cD86M_6aVNAp0ELZgunwbztNeOO8nv8ZSpbwhY,9280
54
54
  nucleus/package_not_installed.py,sha256=1ae0aqKAM3KrB0C-5MuPPXoz9tLWJUKtP1UZ-vw9Zik,1117
@@ -57,13 +57,13 @@ nucleus/prediction.py,sha256=vLI0_ExaazVTrVhFru4mIWS_rX1xRFkg_El-5EAoaOQ,31092
57
57
  nucleus/pydantic_base.py,sha256=EQER8_XqG4B-RC4aggUfukJa6f5OZ7ej92AsDWWIWPc,682
58
58
  nucleus/quaternion.py,sha256=TAnwj4arQXoTeofFgZMdZsCyxAMnu23N6to0F1WFNwk,1111
59
59
  nucleus/retry_strategy.py,sha256=daKZqjZYCh87WtXoVUuR9BZu2TTE-CtOFEYZ-d6xVMY,312
60
- nucleus/scene.py,sha256=yCAhsLZ83IfrLYkPbE8anYGMtIR68ZN_A97aqSN8J0g,28541
61
- nucleus/slice.py,sha256=sQhBm9peXlYAvRIE-nCupcDqWcX4jB2CR8_PxepUKj8,24871
62
- nucleus/test_launch_integration.py,sha256=ywJajyUSrgoRyDxryQOJAxPWgOnBbrAlaWUm9hcUaPo,9875
60
+ nucleus/scene.py,sha256=uNafI3aCs7vhszMEOvu7kruX6oqhBJ_cKe0KNC0QUN8,28576
61
+ nucleus/slice.py,sha256=KAtcWcRM2sb86gvGCe9wP4OMh7-nGfYyrw8VFrIMyuQ,26801
62
+ nucleus/test_launch_integration.py,sha256=MVGuKc0Hp8KVlE-5JNaPrFDeZWQaVEnvyr314IwsX80,9955
63
63
  nucleus/track.py,sha256=ROmOyzYZKrHVTnLBhnk-qEBtklD_EDsSnRcGYE8xG4E,3247
64
64
  nucleus/upload_response.py,sha256=wR_pfZCBju1vGiGqbVgk8zhM6GhD3ebYxyGBm8y0GvY,3287
65
65
  nucleus/url_utils.py,sha256=EZ3vy1FYTGvXRVNyq43Wif-ypS8LFoDrYMYJre_DVuQ,790
66
- nucleus/utils.py,sha256=5xO3uSg-pXdu4xYc8Wjkb9PgRsGciiDea6T7G6vfCXg,12762
66
+ nucleus/utils.py,sha256=-9f0URffHYZDvCB_Gh2ghVMPs8l-oE_xnmjOZf-IrQw,14067
67
67
  nucleus/validate/__init__.py,sha256=UZx1tJHCRCosPXKdjFGaeifHOIf9R6ncYMcq77Gom54,786
68
68
  nucleus/validate/client.py,sha256=c8iF-fi7CEyKSoutzwJD0rhE6DwDDKFF-xC0wITCTjE,8219
69
69
  nucleus/validate/constants.py,sha256=EoR0BLKqQroyVx6TpirbcMKunOnREdtqL9OUvpejgAk,1089
@@ -82,8 +82,8 @@ nucleus/validate/scenario_test.py,sha256=pCmM157dblSciZCDTw-f47Fpy3OUZFgXmokdhIL
82
82
  nucleus/validate/scenario_test_evaluation.py,sha256=Q0WzaEE9uUbPVc4EHlCoKjhJcqMNt4QbyiiJx12VOR0,4075
83
83
  nucleus/validate/scenario_test_metric.py,sha256=AhVFOB1ULwBqlZ2X_Au1TXy4iQELljtzR4ZpeLB35So,1209
84
84
  nucleus/validate/utils.py,sha256=VjdIJj9Pii4z4L6xbvClAc7ra_J7cX0vWB_J2X6yrGE,185
85
- scale_nucleus-0.15.10b0.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
86
- scale_nucleus-0.15.10b0.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88
87
- scale_nucleus-0.15.10b0.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
88
- scale_nucleus-0.15.10b0.dist-info/METADATA,sha256=fnDzV4eF98NVeekA32DoDTPZkFwu-uETE-cj32Q4eBk,8035
89
- scale_nucleus-0.15.10b0.dist-info/RECORD,,
85
+ scale_nucleus-0.16.2.dist-info/LICENSE,sha256=jaTGyQSQIZeWMo5iyYqgbAYHR9Bdy7nOzgE-Up3m_-g,1075
86
+ scale_nucleus-0.16.2.dist-info/METADATA,sha256=oQtA3lLS6aXVH3B9Jg9yzQxGF1gbPE421LNWh9S0Keo,7822
87
+ scale_nucleus-0.16.2.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
88
+ scale_nucleus-0.16.2.dist-info/entry_points.txt,sha256=fmqEzh6NZQyg9eFMILnWabKT8OWQTMSCdDzMiVq2zYs,32
89
+ scale_nucleus-0.16.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.4.0
2
+ Generator: poetry-core 1.6.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any