scale-nucleus 0.12b1__py3-none-any.whl → 0.14.14b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. cli/slices.py +14 -28
  2. nucleus/__init__.py +211 -18
  3. nucleus/annotation.py +28 -5
  4. nucleus/connection.py +9 -1
  5. nucleus/constants.py +9 -3
  6. nucleus/dataset.py +197 -59
  7. nucleus/dataset_item.py +11 -1
  8. nucleus/job.py +1 -1
  9. nucleus/metrics/__init__.py +2 -1
  10. nucleus/metrics/base.py +34 -56
  11. nucleus/metrics/categorization_metrics.py +6 -2
  12. nucleus/metrics/cuboid_utils.py +4 -6
  13. nucleus/metrics/errors.py +4 -0
  14. nucleus/metrics/filtering.py +369 -19
  15. nucleus/metrics/polygon_utils.py +3 -3
  16. nucleus/metrics/segmentation_loader.py +30 -0
  17. nucleus/metrics/segmentation_metrics.py +256 -195
  18. nucleus/metrics/segmentation_to_poly_metrics.py +229 -105
  19. nucleus/metrics/segmentation_utils.py +239 -8
  20. nucleus/model.py +66 -10
  21. nucleus/model_run.py +1 -1
  22. nucleus/{shapely_not_installed.py → package_not_installed.py} +3 -3
  23. nucleus/payload_constructor.py +4 -0
  24. nucleus/prediction.py +6 -3
  25. nucleus/scene.py +7 -0
  26. nucleus/slice.py +160 -16
  27. nucleus/utils.py +51 -12
  28. nucleus/validate/__init__.py +1 -0
  29. nucleus/validate/client.py +57 -8
  30. nucleus/validate/constants.py +1 -0
  31. nucleus/validate/data_transfer_objects/eval_function.py +22 -0
  32. nucleus/validate/data_transfer_objects/scenario_test_evaluations.py +13 -5
  33. nucleus/validate/eval_functions/available_eval_functions.py +33 -20
  34. nucleus/validate/eval_functions/config_classes/segmentation.py +2 -46
  35. nucleus/validate/scenario_test.py +71 -13
  36. nucleus/validate/scenario_test_evaluation.py +21 -21
  37. nucleus/validate/utils.py +1 -1
  38. {scale_nucleus-0.12b1.dist-info → scale_nucleus-0.14.14b0.dist-info}/LICENSE +0 -0
  39. {scale_nucleus-0.12b1.dist-info → scale_nucleus-0.14.14b0.dist-info}/METADATA +13 -11
  40. {scale_nucleus-0.12b1.dist-info → scale_nucleus-0.14.14b0.dist-info}/RECORD +42 -41
  41. {scale_nucleus-0.12b1.dist-info → scale_nucleus-0.14.14b0.dist-info}/WHEEL +1 -1
  42. {scale_nucleus-0.12b1.dist-info → scale_nucleus-0.14.14b0.dist-info}/entry_points.txt +0 -0
cli/slices.py CHANGED
@@ -23,12 +23,9 @@ def slices(ctx, web):
23
23
  @slices.command("list")
24
24
  def list_slices():
25
25
  """List all available Slices"""
26
- with Live(
27
- Spinner("dots4", text="Finding your Slices!"),
28
- vertical_overflow="visible",
29
- ) as live:
30
- client = init_client()
31
- datasets = client.datasets
26
+ client = init_client()
27
+ console = Console()
28
+ with console.status("Finding your Slices!", spinner="dots4"):
32
29
  table = Table(
33
30
  Column("id", overflow="fold", min_width=24),
34
31
  "name",
@@ -37,26 +34,15 @@ def list_slices():
37
34
  title=":cake: Slices",
38
35
  title_justify="left",
39
36
  )
40
- errors = {}
41
- for ds in datasets:
42
- try:
43
- ds_slices = ds.slices
44
- if ds_slices:
45
- for slc_id in ds_slices:
46
- slice_url = nucleus_url(f"{ds.id}/{slc_id}")
47
- slice_info = client.get_slice(slc_id).info()
48
- table.add_row(
49
- slc_id, slice_info["name"], ds.name, slice_url
50
- )
51
- live.update(table)
52
- except NucleusAPIError as e:
53
- errors[ds.id] = e
54
-
55
- error_tree = Tree(
56
- ":x: Encountered the following errors while fetching information"
57
- )
58
- for ds_id, error in errors.items():
59
- dataset_branch = error_tree.add(f"Dataset: {ds_id}")
60
- dataset_branch.add(f"Error: {error}")
37
+ datasets = client.datasets
38
+ id_to_datasets = {d.id: d for d in datasets}
39
+ all_slices = client.slices
40
+ for s in all_slices:
41
+ table.add_row(
42
+ s.id,
43
+ s.name,
44
+ id_to_datasets[s.dataset_id].name,
45
+ nucleus_url(f"{s.dataset_id}/{s.id}"),
46
+ )
61
47
 
62
- Console().print(error_tree)
48
+ console.print(table)
nucleus/__init__.py CHANGED
@@ -40,7 +40,7 @@ __all__ = [
40
40
 
41
41
  import os
42
42
  import warnings
43
- from typing import Dict, List, Optional, Sequence, Union
43
+ from typing import Any, Dict, List, Optional, Sequence, Union
44
44
 
45
45
  import pkg_resources
46
46
  import pydantic
@@ -91,6 +91,7 @@ from .constants import (
91
91
  KEEP_HISTORY_KEY,
92
92
  MESSAGE_KEY,
93
93
  MODEL_RUN_ID_KEY,
94
+ MODEL_TAGS_KEY,
94
95
  NAME_KEY,
95
96
  NUCLEUS_ENDPOINT,
96
97
  PREDICTIONS_IGNORED_KEY,
@@ -218,6 +219,7 @@ class NucleusClient:
218
219
  reference_id=model["ref_id"],
219
220
  metadata=model["metadata"] or None,
220
221
  client=self,
222
+ tags=model.get(MODEL_TAGS_KEY, []),
221
223
  )
222
224
  for model in model_objects["models"]
223
225
  ]
@@ -233,6 +235,12 @@ class NucleusClient:
233
235
  """
234
236
  return self.list_jobs()
235
237
 
238
+ @property
239
+ def slices(self) -> List[Slice]:
240
+ response = self.make_request({}, "slice/", requests.get)
241
+ slices = [Slice.from_request(info, self) for info in response]
242
+ return slices
243
+
236
244
  @deprecated(msg="Use the NucleusClient.models property in the future.")
237
245
  def list_models(self) -> List[Model]:
238
246
  return self.models
@@ -439,7 +447,8 @@ class NucleusClient:
439
447
  Deletes a dataset by ID.
440
448
 
441
449
  All items, annotations, and predictions associated with the dataset will
442
- be deleted as well.
450
+ be deleted as well. Note that if this dataset is linked to a Scale or Rapid
451
+ labeling project, the project itself will not be deleted.
443
452
 
444
453
  Parameters:
445
454
  dataset_id: The ID of the dataset to delete.
@@ -484,6 +493,7 @@ class NucleusClient:
484
493
  reference_id: str,
485
494
  metadata: Optional[Dict] = None,
486
495
  bundle_name: Optional[str] = None,
496
+ tags: Optional[List[str]] = None,
487
497
  ) -> Model:
488
498
  """Adds a :class:`Model` to Nucleus.
489
499
 
@@ -495,13 +505,15 @@ class NucleusClient:
495
505
  metadata: An arbitrary dictionary of additional data about this model
496
506
  that can be stored and retrieved. For example, you can store information
497
507
  about the hyperparameters used in training this model.
508
+ bundle_name: Optional name of bundle attached to this model
509
+ tags: Optional list of tags to attach to this model
498
510
 
499
511
  Returns:
500
512
  :class:`Model`: The newly created model as an object.
501
513
  """
502
514
  response = self.make_request(
503
515
  construct_model_creation_payload(
504
- name, reference_id, metadata, bundle_name
516
+ name, reference_id, metadata, bundle_name, tags
505
517
  ),
506
518
  "models/add",
507
519
  )
@@ -516,6 +528,197 @@ class NucleusClient:
516
528
  metadata=metadata,
517
529
  bundle_name=bundle_name,
518
530
  client=self,
531
+ tags=tags,
532
+ )
533
+
534
+ def create_launch_model(
535
+ self,
536
+ name: str,
537
+ reference_id: str,
538
+ bundle_args: Dict[str, Any],
539
+ metadata: Optional[Dict] = None,
540
+ ) -> Model:
541
+ """
542
+ Adds a :class:`Model` to Nucleus, as well as a Launch bundle from a given function.
543
+
544
+ Parameters:
545
+ name: A human-readable name for the model.
546
+ reference_id: Unique, user-controlled ID for the model. This can be
547
+ used, for example, to link to an external storage of models which
548
+ may have its own id scheme.
549
+ bundle_args: Dict for kwargs for the creation of a Launch bundle,
550
+ more details on the keys below.
551
+ metadata: An arbitrary dictionary of additional data about this model
552
+ that can be stored and retrieved. For example, you can store information
553
+ about the hyperparameters used in training this model.
554
+
555
+ Returns:
556
+ :class:`Model`: The newly created model as an object.
557
+
558
+ Details on `bundle_args`:
559
+ Grabs a s3 signed url and uploads a model bundle to Scale Launch.
560
+
561
+ A model bundle consists of exactly {predict_fn_or_cls}, {load_predict_fn + model}, or {load_predict_fn + load_model_fn}.
562
+ Pre/post-processing code can be included inside load_predict_fn/model or in predict_fn_or_cls call.
563
+
564
+ Parameters:
565
+ model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
566
+ predict_fn_or_cls: Function or a Callable class that runs end-to-end (pre/post processing and model inference) on the call.
567
+ I.e. `predict_fn_or_cls(REQUEST) -> RESPONSE`.
568
+ model: Typically a trained Neural Network, e.g. a Pytorch module
569
+ load_predict_fn: Function that when called with model, returns a function that carries out inference
570
+ I.e. `load_predict_fn(model) -> func; func(REQUEST) -> RESPONSE`
571
+ load_model_fn: Function that when run, loads a model, e.g. a Pytorch module
572
+ I.e. `load_predict_fn(load_model_fn()) -> func; func(REQUEST) -> RESPONSE`
573
+ bundle_url: Only for self-hosted mode. Desired location of bundle.
574
+ Overrides any value given by self.bundle_location_fn
575
+ requirements: A list of python package requirements, e.g.
576
+ ["tensorflow==2.3.0", "tensorflow-hub==0.11.0"]. If no list has been passed, will default to the currently
577
+ imported list of packages.
578
+ app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
579
+ env_params: A dictionary that dictates environment information e.g.
580
+ the use of pytorch or tensorflow, which cuda/cudnn versions to use.
581
+ Specifically, the dictionary should contain the following keys:
582
+ "framework_type": either "tensorflow" or "pytorch".
583
+ "pytorch_version": Version of pytorch, e.g. "1.5.1", "1.7.0", etc. Only applicable if framework_type is pytorch
584
+ "cuda_version": Version of cuda used, e.g. "11.0".
585
+ "cudnn_version" Version of cudnn used, e.g. "cudnn8-devel".
586
+ "tensorflow_version": Version of tensorflow, e.g. "2.3.0". Only applicable if framework_type is tensorflow
587
+ globals_copy: Dictionary of the global symbol table. Normally provided by `globals()` built-in function.
588
+ """
589
+ from launch import LaunchClient
590
+
591
+ launch_client = LaunchClient(api_key=self.api_key)
592
+
593
+ model_exists = any(model.name == name for model in self.list_models())
594
+ bundle_exists = any(
595
+ bundle.name == name + "-nucleus-autogen"
596
+ for bundle in launch_client.list_model_bundles()
597
+ )
598
+
599
+ if bundle_exists or model_exists:
600
+ raise ModelCreationError(
601
+ "Bundle with the given name already exists, please try a different name"
602
+ )
603
+
604
+ kwargs = {
605
+ "model_bundle_name": name + "-nucleus-autogen",
606
+ **bundle_args,
607
+ }
608
+
609
+ bundle = launch_client.create_model_bundle(**kwargs)
610
+ return self.create_model(
611
+ name,
612
+ reference_id,
613
+ metadata,
614
+ bundle.name,
615
+ )
616
+
617
+ def create_launch_model_from_dir(
618
+ self,
619
+ name: str,
620
+ reference_id: str,
621
+ bundle_from_dir_args: Dict[str, Any],
622
+ metadata: Optional[Dict] = None,
623
+ ) -> Model:
624
+ """
625
+ Adds a :class:`Model` to Nucleus, as well as a Launch bundle from a directory.
626
+
627
+ Parameters:
628
+ name: A human-readable name for the model.
629
+ reference_id: Unique, user-controlled ID for the model. This can be
630
+ used, for example, to link to an external storage of models which
631
+ may have its own id scheme.
632
+ bundle_from_dir_args: Dict for kwargs for the creation of a bundle from directory,
633
+ more details on the keys below.
634
+ metadata: An arbitrary dictionary of additional data about this model
635
+ that can be stored and retrieved. For example, you can store information
636
+ about the hyperparameters used in training this model.
637
+
638
+ Returns:
639
+ :class:`Model`: The newly created model as an object.
640
+
641
+ Details on `bundle_from_dir_args`
642
+ Packages up code from one or more local filesystem folders and uploads them as a bundle to Scale Launch.
643
+ In this mode, a bundle is just local code instead of a serialized object.
644
+
645
+ For example, if you have a directory structure like so, and your current working directory is also `my_root`:
646
+
647
+ ```
648
+ my_root/
649
+ my_module1/
650
+ __init__.py
651
+ ...files and directories
652
+ my_inference_file.py
653
+ my_module2/
654
+ __init__.py
655
+ ...files and directories
656
+ ```
657
+
658
+ then calling `create_model_bundle_from_dirs` with `base_paths=["my_module1", "my_module2"]` essentially
659
+ creates a zip file without the root directory, e.g.:
660
+
661
+ ```
662
+ my_module1/
663
+ __init__.py
664
+ ...files and directories
665
+ my_inference_file.py
666
+ my_module2/
667
+ __init__.py
668
+ ...files and directories
669
+ ```
670
+
671
+ and these contents will be unzipped relative to the server side `PYTHONPATH`. Bear these points in mind when
672
+ referencing Python module paths for this bundle. For instance, if `my_inference_file.py` has `def f(...)`
673
+ as the desired inference loading function, then the `load_predict_fn_module_path` argument should be
674
+ `my_module1.my_inference_file.f`.
675
+
676
+
677
+ Keys for `bundle_from_dir_args`:
678
+ model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
679
+ base_paths: The paths on the local filesystem where the bundle code lives.
680
+ requirements_path: A path on the local filesystem where a requirements.txt file lives.
681
+ env_params: A dictionary that dictates environment information e.g.
682
+ the use of pytorch or tensorflow, which cuda/cudnn versions to use.
683
+ Specifically, the dictionary should contain the following keys:
684
+ "framework_type": either "tensorflow" or "pytorch".
685
+ "pytorch_version": Version of pytorch, e.g. "1.5.1", "1.7.0", etc. Only applicable if framework_type is pytorch
686
+ "cuda_version": Version of cuda used, e.g. "11.0".
687
+ "cudnn_version" Version of cudnn used, e.g. "cudnn8-devel".
688
+ "tensorflow_version": Version of tensorflow, e.g. "2.3.0". Only applicable if framework_type is tensorflow
689
+ load_predict_fn_module_path: A python module path for a function that, when called with the output of
690
+ load_model_fn_module_path, returns a function that carries out inference.
691
+ load_model_fn_module_path: A python module path for a function that returns a model. The output feeds into
692
+ the function located at load_predict_fn_module_path.
693
+ app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
694
+ """
695
+ from launch import LaunchClient
696
+
697
+ launch_client = LaunchClient(api_key=self.api_key)
698
+
699
+ model_exists = any(model.name == name for model in self.list_models())
700
+ bundle_exists = any(
701
+ bundle.name == name + "-nucleus-autogen"
702
+ for bundle in launch_client.list_model_bundles()
703
+ )
704
+
705
+ if bundle_exists or model_exists:
706
+ raise ModelCreationError(
707
+ "Bundle with the given name already exists, please try a different name"
708
+ )
709
+
710
+ kwargs = {
711
+ "model_bundle_name": name + "-nucleus-autogen",
712
+ **bundle_from_dir_args,
713
+ }
714
+
715
+ bundle = launch_client.create_model_bundle_from_dir(**kwargs)
716
+
717
+ return self.create_model(
718
+ name,
719
+ reference_id,
720
+ metadata,
721
+ bundle.name,
519
722
  )
520
723
 
521
724
  @deprecated(
@@ -656,7 +859,7 @@ class NucleusClient:
656
859
 
657
860
  @deprecated("Prefer calling Dataset.delete_annotations instead.")
658
861
  def delete_annotations(
659
- self, dataset_id: str, reference_ids: list = None, keep_history=False
862
+ self, dataset_id: str, reference_ids: list = None, keep_history=True
660
863
  ) -> AsyncJob:
661
864
  dataset = self.get_dataset(dataset_id)
662
865
  return dataset.delete_annotations(reference_ids, keep_history)
@@ -824,6 +1027,7 @@ class NucleusClient:
824
1027
  payload: Optional[dict],
825
1028
  route: str,
826
1029
  requests_command=requests.post,
1030
+ return_raw_response: bool = False,
827
1031
  ) -> dict:
828
1032
  """Makes a request to a Nucleus API endpoint.
829
1033
 
@@ -833,11 +1037,11 @@ class NucleusClient:
833
1037
  payload: Given request payload.
834
1038
  route: Route for the request.
835
1039
  Requests command: ``requests.post``, ``requests.get``, or ``requests.delete``.
1040
+ return_raw_response: return the request's response object entirely
836
1041
 
837
1042
  Returns:
838
- Response payload as JSON dict.
1043
+ Response payload as JSON dict or request object.
839
1044
  """
840
- print(payload, route)
841
1045
  if payload is None:
842
1046
  payload = {}
843
1047
  if requests_command is requests.get:
@@ -846,18 +1050,7 @@ class NucleusClient:
846
1050
  "Received defined payload with GET request! Will ignore payload"
847
1051
  )
848
1052
  payload = None
849
- return self._connection.make_request(payload, route, requests_command) # type: ignore
850
-
851
- def handle_bad_response(
852
- self,
853
- endpoint,
854
- requests_command,
855
- requests_response=None,
856
- aiohttp_response=None,
857
- ):
858
- self._connection.handle_bad_response(
859
- endpoint, requests_command, requests_response, aiohttp_response
860
- )
1053
+ return self._connection.make_request(payload, route, requests_command, return_raw_response) # type: ignore
861
1054
 
862
1055
  def _set_api_key(self, api_key):
863
1056
  """Fetch API key from environment variable NUCLEUS_API_KEY if not set"""
nucleus/annotation.py CHANGED
@@ -105,7 +105,7 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
105
105
  reference_id="image_1",
106
106
  annotation_id="image_1_car_box_1",
107
107
  metadata={"vehicle_color": "red"},
108
- embedding_vector=[0.1423, 1.432, ...3.829],
108
+ embedding_vector=[0.1423, 1.432, ..., 3.829],
109
109
  )
110
110
 
111
111
  Parameters:
@@ -180,6 +180,19 @@ class BoxAnnotation(Annotation): # pylint: disable=R0902
180
180
  EMBEDDING_VECTOR_KEY: self.embedding_vector,
181
181
  }
182
182
 
183
+ def __eq__(self, other):
184
+ return (
185
+ self.label == other.label
186
+ and self.x == other.x
187
+ and self.y == other.y
188
+ and self.width == other.width
189
+ and self.height == other.height
190
+ and self.reference_id == other.reference_id
191
+ and self.annotation_id == other.annotation_id
192
+ and sorted(self.metadata.items()) == sorted(other.metadata.items())
193
+ and self.embedding_vector == other.embedding_vector
194
+ )
195
+
183
196
 
184
197
  @dataclass
185
198
  class Point:
@@ -297,7 +310,7 @@ class PolygonAnnotation(Annotation):
297
310
  reference_id="image_2",
298
311
  annotation_id="image_2_bus_polygon_1",
299
312
  metadata={"vehicle_color": "yellow"},
300
- embedding_vector=[0.1423, 1.432, ...3.829],
313
+ embedding_vector=[0.1423, 1.432, ..., 3.829],
301
314
  )
302
315
 
303
316
  Parameters:
@@ -426,7 +439,7 @@ class KeypointsAnnotation(Annotation):
426
439
  label="face",
427
440
  keypoints=[Keypoint(100, 100), Keypoint(120, 120), Keypoint(visible=False), Keypoint(0, 0)],
428
441
  names=["point1", "point2", "point3", "point4"],
429
- skeleton=[[0, 1], [1, 2], [1, 3], [2, 4]],
442
+ skeleton=[[0, 1], [1, 2], [1, 3], [2, 3]],
430
443
  reference_id="image_2",
431
444
  annotation_id="image_2_face_keypoints_1",
432
445
  metadata={"face_direction": "forward"},
@@ -473,11 +486,17 @@ class KeypointsAnnotation(Annotation):
473
486
  )
474
487
  seen.add(name)
475
488
 
489
+ max_segment_index = len(self.keypoints) - 1
476
490
  for segment in self.skeleton:
477
491
  if len(segment) != 2:
478
492
  raise ValueError(
479
493
  "The keypoints skeleton must contain a list of line segments with exactly 2 indices"
480
494
  )
495
+ for index in segment:
496
+ if index > max_segment_index:
497
+ raise ValueError(
498
+ f"The skeleton index {index} is not a valid keypoint index"
499
+ )
481
500
 
482
501
  @classmethod
483
502
  def from_json(cls, payload: dict):
@@ -672,7 +691,7 @@ class SegmentationAnnotation(Annotation):
672
691
  from nucleus import SegmentationAnnotation
673
692
 
674
693
  segmentation = SegmentationAnnotation(
675
- mask_url="s3://your-bucket-name/segmentation-masks/image_2_mask_id1.png",
694
+ mask_url="s3://your-bucket-name/segmentation-masks/image_2_mask_id_1.png",
676
695
  annotations=[
677
696
  Segment(label="grass", index="1"),
678
697
  Segment(label="road", index="2"),
@@ -685,7 +704,8 @@ class SegmentationAnnotation(Annotation):
685
704
 
686
705
  Parameters:
687
706
  mask_url (str): A URL pointing to the segmentation prediction mask which is
688
- accessible to Scale, or a local path. The mask is an HxW int8 array saved in PNG format,
707
+ accessible to Scale. This "URL" can also be a path to a local file.
708
+ The mask is an HxW int8 array saved in PNG format,
689
709
  with each pixel value ranging from [0, N), where N is the number of
690
710
  possible classes (for semantic segmentation) or instances (for instance
691
711
  segmentation).
@@ -920,6 +940,9 @@ class AnnotationList:
920
940
  ), f"Unexpected annotation type: {type(annotation)}"
921
941
  self.segmentation_annotations.append(annotation)
922
942
 
943
+ def items(self):
944
+ return self.__dict__.items()
945
+
923
946
  def __len__(self):
924
947
  return (
925
948
  len(self.box_annotations)
nucleus/connection.py CHANGED
@@ -40,7 +40,11 @@ class Connection:
40
40
  return self.make_request(payload, route, requests_command=requests.put)
41
41
 
42
42
  def make_request(
43
- self, payload: dict, route: str, requests_command=requests.post
43
+ self,
44
+ payload: dict,
45
+ route: str,
46
+ requests_command=requests.post,
47
+ return_raw_response: bool = False,
44
48
  ) -> dict:
45
49
  """
46
50
  Makes a request to Nucleus endpoint and logs a warning if not
@@ -49,6 +53,7 @@ class Connection:
49
53
  :param payload: given payload
50
54
  :param route: route for the request
51
55
  :param requests_command: requests.post, requests.get, requests.delete
56
+ :param return_raw_response: return the request's response object entirely
52
57
  :return: response JSON
53
58
  """
54
59
  endpoint = f"{self.endpoint}/{route}"
@@ -73,6 +78,9 @@ class Connection:
73
78
  if not response.ok:
74
79
  self.handle_bad_response(endpoint, requests_command, response)
75
80
 
81
+ if return_raw_response:
82
+ return response
83
+
76
84
  return response.json()
77
85
 
78
86
  def handle_bad_response(
nucleus/constants.py CHANGED
@@ -27,6 +27,8 @@ ANNOTATION_UPDATE_KEY = "update"
27
27
  AUTOTAGS_KEY = "autotags"
28
28
  AUTOTAG_SCORE_THRESHOLD = "score_threshold"
29
29
  EXPORTED_ROWS = "exportedRows"
30
+ EXPORTED_SCALE_TASK_INFO_ROWS = "exportedScaleTaskInfoRows"
31
+ EXPORT_FOR_TRAINING_KEY = "data"
30
32
  CAMERA_MODEL_KEY = "camera_model"
31
33
  CAMERA_PARAMS_KEY = "camera_params"
32
34
  CLASS_PDF_KEY = "class_pdf"
@@ -81,7 +83,6 @@ JOB_CREATION_TIME_KEY = "job_creation_time"
81
83
  KEYPOINTS_KEY = "keypoints"
82
84
  KEYPOINTS_NAMES_KEY = "names"
83
85
  KEYPOINTS_SKELETON_KEY = "skeleton"
84
- LAST_PAGE = "lastPage"
85
86
  LABEL_KEY = "label"
86
87
  LABELS_KEY = "labels"
87
88
  MASK_URL_KEY = "mask_url"
@@ -89,6 +90,7 @@ MAX_PAYLOAD_SIZE = 0x1FFFFFE8 # Set to max string size since we currently conve
89
90
  MESSAGE_KEY = "message"
90
91
  METADATA_KEY = "metadata"
91
92
  MODEL_BUNDLE_NAME_KEY = "bundle_name"
93
+ MODEL_TAGS_KEY = "tags"
92
94
  MODEL_ID_KEY = "model_id"
93
95
  MODEL_RUN_ID_KEY = "model_run_id"
94
96
  NAME_KEY = "name"
@@ -96,8 +98,9 @@ NEW_ITEMS = "new_items"
96
98
  NUCLEUS_ENDPOINT = "https://api.scale.com/v1/nucleus"
97
99
  NUM_SENSORS_KEY = "num_sensors"
98
100
  ORIGINAL_IMAGE_URL_KEY = "original_image_url"
99
- PAGE_SIZE = "pageSize"
100
- PAGE_TOKEN = "pageToken"
101
+ PAGE_SIZE_KEY = "pageSize"
102
+ PAGE_TOKEN_KEY = "pageToken"
103
+ NEXT_TOKEN_KEY = "nextPageToken"
101
104
  P1_KEY = "p1"
102
105
  P2_KEY = "p2"
103
106
  POINTCLOUD_KEY = "pointcloud"
@@ -105,11 +108,14 @@ POINTCLOUD_LOCATION_KEY = "pointcloud_location"
105
108
  POINTCLOUD_URL_KEY = "pointcloud_url"
106
109
  POSITION_KEY = "position"
107
110
  PREDICTIONS_IGNORED_KEY = "predictions_ignored"
111
+ PREDICTIONS_KEY = "predictions"
108
112
  PREDICTIONS_PROCESSED_KEY = "predictions_processed"
109
113
  REFERENCE_IDS_KEY = "reference_ids"
110
114
  REFERENCE_ID_KEY = "reference_id"
111
115
  BACKEND_REFERENCE_ID_KEY = "ref_id" # TODO(355762): Our backend returns this instead of the "proper" key sometimes.
112
116
  REQUEST_ID_KEY = "requestId"
117
+ SCALE_TASK_INFO_KEY = "scale_task_info"
118
+ SCENE_KEY = "scene"
113
119
  SCENES_KEY = "scenes"
114
120
  SERIALIZED_REQUEST_KEY = "serialized_request"
115
121
  SEGMENTATIONS_KEY = "segmentations"