databricks-sdk 0.53.0__py3-none-any.whl → 0.54.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -338,6 +338,15 @@ class CleanRoomAssetNotebook:
338
338
  """Base 64 representation of the notebook contents. This is the same format as returned by
339
339
  :method:workspace/export with the format of **HTML**."""
340
340
 
341
+ review_state: Optional[CleanRoomNotebookReviewNotebookReviewState] = None
342
+ """top-level status derived from all reviews"""
343
+
344
+ reviews: Optional[List[CleanRoomNotebookReview]] = None
345
+ """All existing approvals or rejections"""
346
+
347
+ runner_collaborator_aliases: Optional[List[str]] = None
348
+ """collaborators that can run the notebook"""
349
+
341
350
  def as_dict(self) -> dict:
342
351
  """Serializes the CleanRoomAssetNotebook into a dictionary suitable for use as a JSON request body."""
343
352
  body = {}
@@ -345,6 +354,12 @@ class CleanRoomAssetNotebook:
345
354
  body["etag"] = self.etag
346
355
  if self.notebook_content is not None:
347
356
  body["notebook_content"] = self.notebook_content
357
+ if self.review_state is not None:
358
+ body["review_state"] = self.review_state.value
359
+ if self.reviews:
360
+ body["reviews"] = [v.as_dict() for v in self.reviews]
361
+ if self.runner_collaborator_aliases:
362
+ body["runner_collaborator_aliases"] = [v for v in self.runner_collaborator_aliases]
348
363
  return body
349
364
 
350
365
  def as_shallow_dict(self) -> dict:
@@ -354,12 +369,24 @@ class CleanRoomAssetNotebook:
354
369
  body["etag"] = self.etag
355
370
  if self.notebook_content is not None:
356
371
  body["notebook_content"] = self.notebook_content
372
+ if self.review_state is not None:
373
+ body["review_state"] = self.review_state
374
+ if self.reviews:
375
+ body["reviews"] = self.reviews
376
+ if self.runner_collaborator_aliases:
377
+ body["runner_collaborator_aliases"] = self.runner_collaborator_aliases
357
378
  return body
358
379
 
359
380
  @classmethod
360
381
  def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetNotebook:
361
382
  """Deserializes the CleanRoomAssetNotebook from a dictionary."""
362
- return cls(etag=d.get("etag", None), notebook_content=d.get("notebook_content", None))
383
+ return cls(
384
+ etag=d.get("etag", None),
385
+ notebook_content=d.get("notebook_content", None),
386
+ review_state=_enum(d, "review_state", CleanRoomNotebookReviewNotebookReviewState),
387
+ reviews=_repeated_dict(d, "reviews", CleanRoomNotebookReview),
388
+ runner_collaborator_aliases=d.get("runner_collaborator_aliases", None),
389
+ )
363
390
 
364
391
 
365
392
  class CleanRoomAssetStatusEnum(Enum):
@@ -585,6 +612,78 @@ class CleanRoomCollaborator:
585
612
  )
586
613
 
587
614
 
615
+ @dataclass
616
+ class CleanRoomNotebookReview:
617
+ comment: Optional[str] = None
618
+ """review comment"""
619
+
620
+ created_at_millis: Optional[int] = None
621
+ """timestamp of when the review was submitted"""
622
+
623
+ review_state: Optional[CleanRoomNotebookReviewNotebookReviewState] = None
624
+ """review outcome"""
625
+
626
+ review_sub_reason: Optional[CleanRoomNotebookReviewNotebookReviewSubReason] = None
627
+ """specified when the review was not explicitly made by a user"""
628
+
629
+ reviewer_collaborator_alias: Optional[str] = None
630
+ """collaborator alias of the reviewer"""
631
+
632
+ def as_dict(self) -> dict:
633
+ """Serializes the CleanRoomNotebookReview into a dictionary suitable for use as a JSON request body."""
634
+ body = {}
635
+ if self.comment is not None:
636
+ body["comment"] = self.comment
637
+ if self.created_at_millis is not None:
638
+ body["created_at_millis"] = self.created_at_millis
639
+ if self.review_state is not None:
640
+ body["review_state"] = self.review_state.value
641
+ if self.review_sub_reason is not None:
642
+ body["review_sub_reason"] = self.review_sub_reason.value
643
+ if self.reviewer_collaborator_alias is not None:
644
+ body["reviewer_collaborator_alias"] = self.reviewer_collaborator_alias
645
+ return body
646
+
647
+ def as_shallow_dict(self) -> dict:
648
+ """Serializes the CleanRoomNotebookReview into a shallow dictionary of its immediate attributes."""
649
+ body = {}
650
+ if self.comment is not None:
651
+ body["comment"] = self.comment
652
+ if self.created_at_millis is not None:
653
+ body["created_at_millis"] = self.created_at_millis
654
+ if self.review_state is not None:
655
+ body["review_state"] = self.review_state
656
+ if self.review_sub_reason is not None:
657
+ body["review_sub_reason"] = self.review_sub_reason
658
+ if self.reviewer_collaborator_alias is not None:
659
+ body["reviewer_collaborator_alias"] = self.reviewer_collaborator_alias
660
+ return body
661
+
662
+ @classmethod
663
+ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomNotebookReview:
664
+ """Deserializes the CleanRoomNotebookReview from a dictionary."""
665
+ return cls(
666
+ comment=d.get("comment", None),
667
+ created_at_millis=d.get("created_at_millis", None),
668
+ review_state=_enum(d, "review_state", CleanRoomNotebookReviewNotebookReviewState),
669
+ review_sub_reason=_enum(d, "review_sub_reason", CleanRoomNotebookReviewNotebookReviewSubReason),
670
+ reviewer_collaborator_alias=d.get("reviewer_collaborator_alias", None),
671
+ )
672
+
673
+
674
+ class CleanRoomNotebookReviewNotebookReviewState(Enum):
675
+
676
+ APPROVED = "APPROVED"
677
+ PENDING = "PENDING"
678
+ REJECTED = "REJECTED"
679
+
680
+
681
+ class CleanRoomNotebookReviewNotebookReviewSubReason(Enum):
682
+
683
+ AUTO_APPROVED = "AUTO_APPROVED"
684
+ BACKFILLED = "BACKFILLED"
685
+
686
+
588
687
  @dataclass
589
688
  class CleanRoomNotebookTaskRun:
590
689
  """Stores information about a single task run."""
@@ -594,12 +693,18 @@ class CleanRoomNotebookTaskRun:
594
693
  LIST API. if the task was run within the same workspace the API is being called. If the task run
595
694
  was in a different workspace under the same metastore, only the workspace_id is included."""
596
695
 
696
+ notebook_etag: Optional[str] = None
697
+ """Etag of the notebook executed in this task run, used to identify the notebook version."""
698
+
597
699
  notebook_job_run_state: Optional[jobs.CleanRoomTaskRunState] = None
598
700
  """State of the task run."""
599
701
 
600
702
  notebook_name: Optional[str] = None
601
703
  """Asset name of the notebook executed in this task run."""
602
704
 
705
+ notebook_updated_at: Optional[int] = None
706
+ """The timestamp of when the notebook was last updated."""
707
+
603
708
  output_schema_expiration_time: Optional[int] = None
604
709
  """Expiration time of the output schema of the task run (if any), in epoch milliseconds."""
605
710
 
@@ -617,10 +722,14 @@ class CleanRoomNotebookTaskRun:
617
722
  body = {}
618
723
  if self.collaborator_job_run_info:
619
724
  body["collaborator_job_run_info"] = self.collaborator_job_run_info.as_dict()
725
+ if self.notebook_etag is not None:
726
+ body["notebook_etag"] = self.notebook_etag
620
727
  if self.notebook_job_run_state:
621
728
  body["notebook_job_run_state"] = self.notebook_job_run_state.as_dict()
622
729
  if self.notebook_name is not None:
623
730
  body["notebook_name"] = self.notebook_name
731
+ if self.notebook_updated_at is not None:
732
+ body["notebook_updated_at"] = self.notebook_updated_at
624
733
  if self.output_schema_expiration_time is not None:
625
734
  body["output_schema_expiration_time"] = self.output_schema_expiration_time
626
735
  if self.output_schema_name is not None:
@@ -636,10 +745,14 @@ class CleanRoomNotebookTaskRun:
636
745
  body = {}
637
746
  if self.collaborator_job_run_info:
638
747
  body["collaborator_job_run_info"] = self.collaborator_job_run_info
748
+ if self.notebook_etag is not None:
749
+ body["notebook_etag"] = self.notebook_etag
639
750
  if self.notebook_job_run_state:
640
751
  body["notebook_job_run_state"] = self.notebook_job_run_state
641
752
  if self.notebook_name is not None:
642
753
  body["notebook_name"] = self.notebook_name
754
+ if self.notebook_updated_at is not None:
755
+ body["notebook_updated_at"] = self.notebook_updated_at
643
756
  if self.output_schema_expiration_time is not None:
644
757
  body["output_schema_expiration_time"] = self.output_schema_expiration_time
645
758
  if self.output_schema_name is not None:
@@ -655,8 +768,10 @@ class CleanRoomNotebookTaskRun:
655
768
  """Deserializes the CleanRoomNotebookTaskRun from a dictionary."""
656
769
  return cls(
657
770
  collaborator_job_run_info=_from_dict(d, "collaborator_job_run_info", CollaboratorJobRunInfo),
771
+ notebook_etag=d.get("notebook_etag", None),
658
772
  notebook_job_run_state=_from_dict(d, "notebook_job_run_state", jobs.CleanRoomTaskRunState),
659
773
  notebook_name=d.get("notebook_name", None),
774
+ notebook_updated_at=d.get("notebook_updated_at", None),
660
775
  output_schema_expiration_time=d.get("output_schema_expiration_time", None),
661
776
  output_schema_name=d.get("output_schema_name", None),
662
777
  run_duration=d.get("run_duration", None),
@@ -729,7 +729,8 @@ class ClusterAttributes:
729
729
 
730
730
  cluster_name: Optional[str] = None
731
731
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
732
- creation, the cluster name will be an empty string."""
732
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
733
+ automatically set based on the job and job run IDs."""
733
734
 
734
735
  custom_tags: Optional[Dict[str, str]] = None
735
736
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
@@ -1118,7 +1119,8 @@ class ClusterDetails:
1118
1119
 
1119
1120
  cluster_name: Optional[str] = None
1120
1121
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
1121
- creation, the cluster name will be an empty string."""
1122
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
1123
+ automatically set based on the job and job run IDs."""
1122
1124
 
1123
1125
  cluster_source: Optional[ClusterSource] = None
1124
1126
  """Determines whether the cluster was created by a user through the UI, created by the Databricks
@@ -2300,7 +2302,8 @@ class ClusterSpec:
2300
2302
 
2301
2303
  cluster_name: Optional[str] = None
2302
2304
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
2303
- creation, the cluster name will be an empty string."""
2305
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
2306
+ automatically set based on the job and job run IDs."""
2304
2307
 
2305
2308
  custom_tags: Optional[Dict[str, str]] = None
2306
2309
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
@@ -2803,7 +2806,8 @@ class CreateCluster:
2803
2806
 
2804
2807
  cluster_name: Optional[str] = None
2805
2808
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
2806
- creation, the cluster name will be an empty string."""
2809
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
2810
+ automatically set based on the job and job run IDs."""
2807
2811
 
2808
2812
  custom_tags: Optional[Dict[str, str]] = None
2809
2813
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
@@ -4117,7 +4121,8 @@ class EditCluster:
4117
4121
 
4118
4122
  cluster_name: Optional[str] = None
4119
4123
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
4120
- creation, the cluster name will be an empty string."""
4124
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
4125
+ automatically set based on the job and job run IDs."""
4121
4126
 
4122
4127
  custom_tags: Optional[Dict[str, str]] = None
4123
4128
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
@@ -4499,10 +4504,6 @@ class EditInstancePool:
4499
4504
  min_idle_instances: Optional[int] = None
4500
4505
  """Minimum number of idle instances to keep in the instance pool"""
4501
4506
 
4502
- node_type_flexibility: Optional[NodeTypeFlexibility] = None
4503
- """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
4504
- when attempting to launch a cluster if the node type id is not available."""
4505
-
4506
4507
  def as_dict(self) -> dict:
4507
4508
  """Serializes the EditInstancePool into a dictionary suitable for use as a JSON request body."""
4508
4509
  body = {}
@@ -4518,8 +4519,6 @@ class EditInstancePool:
4518
4519
  body["max_capacity"] = self.max_capacity
4519
4520
  if self.min_idle_instances is not None:
4520
4521
  body["min_idle_instances"] = self.min_idle_instances
4521
- if self.node_type_flexibility:
4522
- body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
4523
4522
  if self.node_type_id is not None:
4524
4523
  body["node_type_id"] = self.node_type_id
4525
4524
  return body
@@ -4539,8 +4538,6 @@ class EditInstancePool:
4539
4538
  body["max_capacity"] = self.max_capacity
4540
4539
  if self.min_idle_instances is not None:
4541
4540
  body["min_idle_instances"] = self.min_idle_instances
4542
- if self.node_type_flexibility:
4543
- body["node_type_flexibility"] = self.node_type_flexibility
4544
4541
  if self.node_type_id is not None:
4545
4542
  body["node_type_id"] = self.node_type_id
4546
4543
  return body
@@ -4555,7 +4552,6 @@ class EditInstancePool:
4555
4552
  instance_pool_name=d.get("instance_pool_name", None),
4556
4553
  max_capacity=d.get("max_capacity", None),
4557
4554
  min_idle_instances=d.get("min_idle_instances", None),
4558
- node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
4559
4555
  node_type_id=d.get("node_type_id", None),
4560
4556
  )
4561
4557
 
@@ -4782,9 +4778,7 @@ class EnforceClusterComplianceResponse:
4782
4778
  @dataclass
4783
4779
  class Environment:
4784
4780
  """The environment entity used to preserve serverless environment side panel, jobs' environment for
4785
- non-notebook task, and DLT's environment for classic and serverless pipelines. (Note: DLT uses a
4786
- copied version of the Environment proto below, at
4787
- //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In this minimal
4781
+ non-notebook task, and DLT's environment for classic and serverless pipelines. In this minimal
4788
4782
  environment spec, only pip dependencies are supported."""
4789
4783
 
4790
4784
  client: str
@@ -4800,6 +4794,13 @@ class Environment:
4800
4794
  Databricks), <vcs project url> E.g. dependencies: ["foo==0.0.1", "-r
4801
4795
  /Workspace/test/requirements.txt"]"""
4802
4796
 
4797
+ environment_version: Optional[str] = None
4798
+ """We renamed `client` to `environment_version` in notebook exports. This field is meant solely so
4799
+ that imported notebooks with `environment_version` can be deserialized correctly, in a
4800
+ backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it
4801
+ will be deserialized correctly). Do NOT use this field for any other purpose, e.g. notebook
4802
+ storage. This field is not yet exposed to customers (e.g. in the jobs API)."""
4803
+
4803
4804
  jar_dependencies: Optional[List[str]] = None
4804
4805
  """List of jar dependencies, should be string representing volume paths. For example:
4805
4806
  `/Volumes/path/to/test.jar`."""
@@ -4811,6 +4812,8 @@ class Environment:
4811
4812
  body["client"] = self.client
4812
4813
  if self.dependencies:
4813
4814
  body["dependencies"] = [v for v in self.dependencies]
4815
+ if self.environment_version is not None:
4816
+ body["environment_version"] = self.environment_version
4814
4817
  if self.jar_dependencies:
4815
4818
  body["jar_dependencies"] = [v for v in self.jar_dependencies]
4816
4819
  return body
@@ -4822,6 +4825,8 @@ class Environment:
4822
4825
  body["client"] = self.client
4823
4826
  if self.dependencies:
4824
4827
  body["dependencies"] = self.dependencies
4828
+ if self.environment_version is not None:
4829
+ body["environment_version"] = self.environment_version
4825
4830
  if self.jar_dependencies:
4826
4831
  body["jar_dependencies"] = self.jar_dependencies
4827
4832
  return body
@@ -4832,6 +4837,7 @@ class Environment:
4832
4837
  return cls(
4833
4838
  client=d.get("client", None),
4834
4839
  dependencies=d.get("dependencies", None),
4840
+ environment_version=d.get("environment_version", None),
4835
4841
  jar_dependencies=d.get("jar_dependencies", None),
4836
4842
  )
4837
4843
 
@@ -5497,10 +5503,6 @@ class GetInstancePool:
5497
5503
  min_idle_instances: Optional[int] = None
5498
5504
  """Minimum number of idle instances to keep in the instance pool"""
5499
5505
 
5500
- node_type_flexibility: Optional[NodeTypeFlexibility] = None
5501
- """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
5502
- when attempting to launch a cluster if the node type id is not available."""
5503
-
5504
5506
  node_type_id: Optional[str] = None
5505
5507
  """This field encodes, through a single value, the resources available to each of the Spark nodes
5506
5508
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -5551,8 +5553,6 @@ class GetInstancePool:
5551
5553
  body["max_capacity"] = self.max_capacity
5552
5554
  if self.min_idle_instances is not None:
5553
5555
  body["min_idle_instances"] = self.min_idle_instances
5554
- if self.node_type_flexibility:
5555
- body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
5556
5556
  if self.node_type_id is not None:
5557
5557
  body["node_type_id"] = self.node_type_id
5558
5558
  if self.preloaded_docker_images:
@@ -5594,8 +5594,6 @@ class GetInstancePool:
5594
5594
  body["max_capacity"] = self.max_capacity
5595
5595
  if self.min_idle_instances is not None:
5596
5596
  body["min_idle_instances"] = self.min_idle_instances
5597
- if self.node_type_flexibility:
5598
- body["node_type_flexibility"] = self.node_type_flexibility
5599
5597
  if self.node_type_id is not None:
5600
5598
  body["node_type_id"] = self.node_type_id
5601
5599
  if self.preloaded_docker_images:
@@ -5626,7 +5624,6 @@ class GetInstancePool:
5626
5624
  instance_pool_name=d.get("instance_pool_name", None),
5627
5625
  max_capacity=d.get("max_capacity", None),
5628
5626
  min_idle_instances=d.get("min_idle_instances", None),
5629
- node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
5630
5627
  node_type_id=d.get("node_type_id", None),
5631
5628
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
5632
5629
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -6461,10 +6458,6 @@ class InstancePoolAndStats:
6461
6458
  min_idle_instances: Optional[int] = None
6462
6459
  """Minimum number of idle instances to keep in the instance pool"""
6463
6460
 
6464
- node_type_flexibility: Optional[NodeTypeFlexibility] = None
6465
- """For Fleet-pool V2, this object contains the information about the alternate node type ids to use
6466
- when attempting to launch a cluster if the node type id is not available."""
6467
-
6468
6461
  node_type_id: Optional[str] = None
6469
6462
  """This field encodes, through a single value, the resources available to each of the Spark nodes
6470
6463
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -6515,8 +6508,6 @@ class InstancePoolAndStats:
6515
6508
  body["max_capacity"] = self.max_capacity
6516
6509
  if self.min_idle_instances is not None:
6517
6510
  body["min_idle_instances"] = self.min_idle_instances
6518
- if self.node_type_flexibility:
6519
- body["node_type_flexibility"] = self.node_type_flexibility.as_dict()
6520
6511
  if self.node_type_id is not None:
6521
6512
  body["node_type_id"] = self.node_type_id
6522
6513
  if self.preloaded_docker_images:
@@ -6558,8 +6549,6 @@ class InstancePoolAndStats:
6558
6549
  body["max_capacity"] = self.max_capacity
6559
6550
  if self.min_idle_instances is not None:
6560
6551
  body["min_idle_instances"] = self.min_idle_instances
6561
- if self.node_type_flexibility:
6562
- body["node_type_flexibility"] = self.node_type_flexibility
6563
6552
  if self.node_type_id is not None:
6564
6553
  body["node_type_id"] = self.node_type_id
6565
6554
  if self.preloaded_docker_images:
@@ -6590,7 +6579,6 @@ class InstancePoolAndStats:
6590
6579
  instance_pool_name=d.get("instance_pool_name", None),
6591
6580
  max_capacity=d.get("max_capacity", None),
6592
6581
  min_idle_instances=d.get("min_idle_instances", None),
6593
- node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility),
6594
6582
  node_type_id=d.get("node_type_id", None),
6595
6583
  preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
6596
6584
  preloaded_spark_versions=d.get("preloaded_spark_versions", None),
@@ -8053,28 +8041,6 @@ class NodeType:
8053
8041
  )
8054
8042
 
8055
8043
 
8056
- @dataclass
8057
- class NodeTypeFlexibility:
8058
- """For Fleet-V2 using classic clusters, this object contains the information about the alternate
8059
- node type ids to use when attempting to launch a cluster. It can be used with both the driver
8060
- and worker node types."""
8061
-
8062
- def as_dict(self) -> dict:
8063
- """Serializes the NodeTypeFlexibility into a dictionary suitable for use as a JSON request body."""
8064
- body = {}
8065
- return body
8066
-
8067
- def as_shallow_dict(self) -> dict:
8068
- """Serializes the NodeTypeFlexibility into a shallow dictionary of its immediate attributes."""
8069
- body = {}
8070
- return body
8071
-
8072
- @classmethod
8073
- def from_dict(cls, d: Dict[str, Any]) -> NodeTypeFlexibility:
8074
- """Deserializes the NodeTypeFlexibility from a dictionary."""
8075
- return cls()
8076
-
8077
-
8078
8044
  @dataclass
8079
8045
  class PendingInstanceError:
8080
8046
  """Error message of a failed pending instances"""
@@ -9404,7 +9370,8 @@ class UpdateClusterResource:
9404
9370
 
9405
9371
  cluster_name: Optional[str] = None
9406
9372
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
9407
- creation, the cluster name will be an empty string."""
9373
+ creation, the cluster name will be an empty string. For job clusters, the cluster name is
9374
+ automatically set based on the job and job run IDs."""
9408
9375
 
9409
9376
  custom_tags: Optional[Dict[str, str]] = None
9410
9377
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
@@ -10374,7 +10341,8 @@ class ClustersAPI:
10374
10341
  of executor logs is `$destination/$clusterId/executor`.
10375
10342
  :param cluster_name: str (optional)
10376
10343
  Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
10377
- the cluster name will be an empty string.
10344
+ the cluster name will be an empty string. For job clusters, the cluster name is automatically set
10345
+ based on the job and job run IDs.
10378
10346
  :param custom_tags: Dict[str,str] (optional)
10379
10347
  Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
10380
10348
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -10766,7 +10734,8 @@ class ClustersAPI:
10766
10734
  of executor logs is `$destination/$clusterId/executor`.
10767
10735
  :param cluster_name: str (optional)
10768
10736
  Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
10769
- the cluster name will be an empty string.
10737
+ the cluster name will be an empty string. For job clusters, the cluster name is automatically set
10738
+ based on the job and job run IDs.
10770
10739
  :param custom_tags: Dict[str,str] (optional)
10771
10740
  Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
10772
10741
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -12227,7 +12196,6 @@ class InstancePoolsAPI:
12227
12196
  idle_instance_autotermination_minutes: Optional[int] = None,
12228
12197
  max_capacity: Optional[int] = None,
12229
12198
  min_idle_instances: Optional[int] = None,
12230
- node_type_flexibility: Optional[NodeTypeFlexibility] = None,
12231
12199
  ):
12232
12200
  """Edit an existing instance pool.
12233
12201
 
@@ -12260,9 +12228,6 @@ class InstancePoolsAPI:
12260
12228
  upsize requests.
12261
12229
  :param min_idle_instances: int (optional)
12262
12230
  Minimum number of idle instances to keep in the instance pool
12263
- :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional)
12264
- For Fleet-pool V2, this object contains the information about the alternate node type ids to use
12265
- when attempting to launch a cluster if the node type id is not available.
12266
12231
 
12267
12232
 
12268
12233
  """
@@ -12279,8 +12244,6 @@ class InstancePoolsAPI:
12279
12244
  body["max_capacity"] = max_capacity
12280
12245
  if min_idle_instances is not None:
12281
12246
  body["min_idle_instances"] = min_idle_instances
12282
- if node_type_flexibility is not None:
12283
- body["node_type_flexibility"] = node_type_flexibility.as_dict()
12284
12247
  if node_type_id is not None:
12285
12248
  body["node_type_id"] = node_type_id
12286
12249
  headers = {
@@ -12440,8 +12403,10 @@ class InstanceProfilesAPI:
12440
12403
  ):
12441
12404
  """Register an instance profile.
12442
12405
 
12443
- In the UI, you can select the instance profile when launching clusters. This API is only available to
12444
- admin users.
12406
+ Registers an instance profile in Databricks. In the UI, you can then give users the permission to use
12407
+ this instance profile when launching clusters.
12408
+
12409
+ This API is only available to admin users.
12445
12410
 
12446
12411
  :param instance_profile_arn: str
12447
12412
  The AWS ARN of the instance profile to register with Databricks. This field is required.
@@ -1255,6 +1255,9 @@ class MessageErrorType(Enum):
1255
1255
  COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION = "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION"
1256
1256
  COULD_NOT_GET_UC_SCHEMA_EXCEPTION = "COULD_NOT_GET_UC_SCHEMA_EXCEPTION"
1257
1257
  DEPLOYMENT_NOT_FOUND_EXCEPTION = "DEPLOYMENT_NOT_FOUND_EXCEPTION"
1258
+ DESCRIBE_QUERY_INVALID_SQL_ERROR = "DESCRIBE_QUERY_INVALID_SQL_ERROR"
1259
+ DESCRIBE_QUERY_TIMEOUT = "DESCRIBE_QUERY_TIMEOUT"
1260
+ DESCRIBE_QUERY_UNEXPECTED_FAILURE = "DESCRIBE_QUERY_UNEXPECTED_FAILURE"
1258
1261
  FUNCTIONS_NOT_AVAILABLE_EXCEPTION = "FUNCTIONS_NOT_AVAILABLE_EXCEPTION"
1259
1262
  FUNCTION_ARGUMENTS_INVALID_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_EXCEPTION"
1260
1263
  FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION = "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION"
@@ -1267,9 +1270,13 @@ class MessageErrorType(Enum):
1267
1270
  ILLEGAL_PARAMETER_DEFINITION_EXCEPTION = "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION"
1268
1271
  INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION = "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION"
1269
1272
  INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION = "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION"
1273
+ INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION"
1270
1274
  INVALID_CHAT_COMPLETION_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_JSON_EXCEPTION"
1271
1275
  INVALID_COMPLETION_REQUEST_EXCEPTION = "INVALID_COMPLETION_REQUEST_EXCEPTION"
1272
1276
  INVALID_FUNCTION_CALL_EXCEPTION = "INVALID_FUNCTION_CALL_EXCEPTION"
1277
+ INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION = "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION"
1278
+ INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION = "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION"
1279
+ INVALID_SQL_UNKNOWN_TABLE_EXCEPTION = "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION"
1273
1280
  INVALID_TABLE_IDENTIFIER_EXCEPTION = "INVALID_TABLE_IDENTIFIER_EXCEPTION"
1274
1281
  LOCAL_CONTEXT_EXCEEDED_EXCEPTION = "LOCAL_CONTEXT_EXCEEDED_EXCEPTION"
1275
1282
  MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION = "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION"