databricks-sdk 0.29.0__py3-none-any.whl → 0.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +89 -21
- databricks/sdk/config.py +61 -75
- databricks/sdk/core.py +16 -9
- databricks/sdk/credentials_provider.py +15 -15
- databricks/sdk/data_plane.py +65 -0
- databricks/sdk/errors/overrides.py +8 -0
- databricks/sdk/errors/platform.py +5 -0
- databricks/sdk/mixins/files.py +12 -4
- databricks/sdk/service/apps.py +977 -0
- databricks/sdk/service/billing.py +602 -218
- databricks/sdk/service/catalog.py +324 -34
- databricks/sdk/service/compute.py +766 -81
- databricks/sdk/service/dashboards.py +628 -18
- databricks/sdk/service/iam.py +99 -88
- databricks/sdk/service/jobs.py +332 -23
- databricks/sdk/service/marketplace.py +2 -122
- databricks/sdk/service/oauth2.py +127 -70
- databricks/sdk/service/pipelines.py +72 -52
- databricks/sdk/service/serving.py +303 -750
- databricks/sdk/service/settings.py +423 -4
- databricks/sdk/service/sharing.py +235 -25
- databricks/sdk/service/sql.py +2328 -544
- databricks/sdk/useragent.py +151 -0
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/METADATA +36 -16
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/RECORD +30 -27
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/LICENSE +0 -0
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/NOTICE +0 -0
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.31.0.dist-info}/top_level.txt +0 -0
|
@@ -690,6 +690,35 @@ class ClusterAttributes:
|
|
|
690
690
|
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
691
691
|
|
|
692
692
|
|
|
693
|
+
@dataclass
|
|
694
|
+
class ClusterCompliance:
|
|
695
|
+
cluster_id: str
|
|
696
|
+
"""Canonical unique identifier for a cluster."""
|
|
697
|
+
|
|
698
|
+
is_compliant: Optional[bool] = None
|
|
699
|
+
"""Whether this cluster is in compliance with the latest version of its policy."""
|
|
700
|
+
|
|
701
|
+
violations: Optional[Dict[str, str]] = None
|
|
702
|
+
"""An object containing key-value mappings representing the first 200 policy validation errors. The
|
|
703
|
+
keys indicate the path where the policy validation error is occurring. The values indicate an
|
|
704
|
+
error message describing the policy validation error."""
|
|
705
|
+
|
|
706
|
+
def as_dict(self) -> dict:
|
|
707
|
+
"""Serializes the ClusterCompliance into a dictionary suitable for use as a JSON request body."""
|
|
708
|
+
body = {}
|
|
709
|
+
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
710
|
+
if self.is_compliant is not None: body['is_compliant'] = self.is_compliant
|
|
711
|
+
if self.violations: body['violations'] = self.violations
|
|
712
|
+
return body
|
|
713
|
+
|
|
714
|
+
@classmethod
|
|
715
|
+
def from_dict(cls, d: Dict[str, any]) -> ClusterCompliance:
|
|
716
|
+
"""Deserializes the ClusterCompliance from a dictionary."""
|
|
717
|
+
return cls(cluster_id=d.get('cluster_id', None),
|
|
718
|
+
is_compliant=d.get('is_compliant', None),
|
|
719
|
+
violations=d.get('violations', None))
|
|
720
|
+
|
|
721
|
+
|
|
693
722
|
@dataclass
|
|
694
723
|
class ClusterDetails:
|
|
695
724
|
autoscale: Optional[AutoScale] = None
|
|
@@ -1377,6 +1406,40 @@ class ClusterPolicyPermissionsRequest:
|
|
|
1377
1406
|
cluster_policy_id=d.get('cluster_policy_id', None))
|
|
1378
1407
|
|
|
1379
1408
|
|
|
1409
|
+
@dataclass
|
|
1410
|
+
class ClusterSettingsChange:
|
|
1411
|
+
"""Represents a change to the cluster settings required for the cluster to become compliant with
|
|
1412
|
+
its policy."""
|
|
1413
|
+
|
|
1414
|
+
field: Optional[str] = None
|
|
1415
|
+
"""The field where this change would be made."""
|
|
1416
|
+
|
|
1417
|
+
new_value: Optional[str] = None
|
|
1418
|
+
"""The new value of this field after enforcing policy compliance (either a number, a boolean, or a
|
|
1419
|
+
string) converted to a string. This is intended to be read by a human. The typed new value of
|
|
1420
|
+
this field can be retrieved by reading the settings field in the API response."""
|
|
1421
|
+
|
|
1422
|
+
previous_value: Optional[str] = None
|
|
1423
|
+
"""The previous value of this field before enforcing policy compliance (either a number, a boolean,
|
|
1424
|
+
or a string) converted to a string. This is intended to be read by a human. The type of the
|
|
1425
|
+
field can be retrieved by reading the settings field in the API response."""
|
|
1426
|
+
|
|
1427
|
+
def as_dict(self) -> dict:
|
|
1428
|
+
"""Serializes the ClusterSettingsChange into a dictionary suitable for use as a JSON request body."""
|
|
1429
|
+
body = {}
|
|
1430
|
+
if self.field is not None: body['field'] = self.field
|
|
1431
|
+
if self.new_value is not None: body['new_value'] = self.new_value
|
|
1432
|
+
if self.previous_value is not None: body['previous_value'] = self.previous_value
|
|
1433
|
+
return body
|
|
1434
|
+
|
|
1435
|
+
@classmethod
|
|
1436
|
+
def from_dict(cls, d: Dict[str, any]) -> ClusterSettingsChange:
|
|
1437
|
+
"""Deserializes the ClusterSettingsChange from a dictionary."""
|
|
1438
|
+
return cls(field=d.get('field', None),
|
|
1439
|
+
new_value=d.get('new_value', None),
|
|
1440
|
+
previous_value=d.get('previous_value', None))
|
|
1441
|
+
|
|
1442
|
+
|
|
1380
1443
|
@dataclass
|
|
1381
1444
|
class ClusterSize:
|
|
1382
1445
|
autoscale: Optional[AutoScale] = None
|
|
@@ -2106,10 +2169,6 @@ class CreateInstancePoolResponse:
|
|
|
2106
2169
|
|
|
2107
2170
|
@dataclass
|
|
2108
2171
|
class CreatePolicy:
|
|
2109
|
-
name: str
|
|
2110
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2111
|
-
100 characters."""
|
|
2112
|
-
|
|
2113
2172
|
definition: Optional[str] = None
|
|
2114
2173
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
2115
2174
|
|
|
@@ -2126,6 +2185,10 @@ class CreatePolicy:
|
|
|
2126
2185
|
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
2127
2186
|
no max limit."""
|
|
2128
2187
|
|
|
2188
|
+
name: Optional[str] = None
|
|
2189
|
+
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2190
|
+
100 characters."""
|
|
2191
|
+
|
|
2129
2192
|
policy_family_definition_overrides: Optional[str] = None
|
|
2130
2193
|
"""Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
2131
2194
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -2891,10 +2954,6 @@ class EditPolicy:
|
|
|
2891
2954
|
policy_id: str
|
|
2892
2955
|
"""The ID of the policy to update."""
|
|
2893
2956
|
|
|
2894
|
-
name: str
|
|
2895
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2896
|
-
100 characters."""
|
|
2897
|
-
|
|
2898
2957
|
definition: Optional[str] = None
|
|
2899
2958
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
2900
2959
|
|
|
@@ -2911,6 +2970,10 @@ class EditPolicy:
|
|
|
2911
2970
|
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
2912
2971
|
no max limit."""
|
|
2913
2972
|
|
|
2973
|
+
name: Optional[str] = None
|
|
2974
|
+
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2975
|
+
100 characters."""
|
|
2976
|
+
|
|
2914
2977
|
policy_family_definition_overrides: Optional[str] = None
|
|
2915
2978
|
"""Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
2916
2979
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -2982,6 +3045,52 @@ class EditResponse:
|
|
|
2982
3045
|
return cls()
|
|
2983
3046
|
|
|
2984
3047
|
|
|
3048
|
+
@dataclass
|
|
3049
|
+
class EnforceClusterComplianceRequest:
|
|
3050
|
+
cluster_id: str
|
|
3051
|
+
"""The ID of the cluster you want to enforce policy compliance on."""
|
|
3052
|
+
|
|
3053
|
+
validate_only: Optional[bool] = None
|
|
3054
|
+
"""If set, previews the changes that would be made to a cluster to enforce compliance but does not
|
|
3055
|
+
update the cluster."""
|
|
3056
|
+
|
|
3057
|
+
def as_dict(self) -> dict:
|
|
3058
|
+
"""Serializes the EnforceClusterComplianceRequest into a dictionary suitable for use as a JSON request body."""
|
|
3059
|
+
body = {}
|
|
3060
|
+
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
3061
|
+
if self.validate_only is not None: body['validate_only'] = self.validate_only
|
|
3062
|
+
return body
|
|
3063
|
+
|
|
3064
|
+
@classmethod
|
|
3065
|
+
def from_dict(cls, d: Dict[str, any]) -> EnforceClusterComplianceRequest:
|
|
3066
|
+
"""Deserializes the EnforceClusterComplianceRequest from a dictionary."""
|
|
3067
|
+
return cls(cluster_id=d.get('cluster_id', None), validate_only=d.get('validate_only', None))
|
|
3068
|
+
|
|
3069
|
+
|
|
3070
|
+
@dataclass
|
|
3071
|
+
class EnforceClusterComplianceResponse:
|
|
3072
|
+
changes: Optional[List[ClusterSettingsChange]] = None
|
|
3073
|
+
"""A list of changes that have been made to the cluster settings for the cluster to become
|
|
3074
|
+
compliant with its policy."""
|
|
3075
|
+
|
|
3076
|
+
has_changes: Optional[bool] = None
|
|
3077
|
+
"""Whether any changes have been made to the cluster settings for the cluster to become compliant
|
|
3078
|
+
with its policy."""
|
|
3079
|
+
|
|
3080
|
+
def as_dict(self) -> dict:
|
|
3081
|
+
"""Serializes the EnforceClusterComplianceResponse into a dictionary suitable for use as a JSON request body."""
|
|
3082
|
+
body = {}
|
|
3083
|
+
if self.changes: body['changes'] = [v.as_dict() for v in self.changes]
|
|
3084
|
+
if self.has_changes is not None: body['has_changes'] = self.has_changes
|
|
3085
|
+
return body
|
|
3086
|
+
|
|
3087
|
+
@classmethod
|
|
3088
|
+
def from_dict(cls, d: Dict[str, any]) -> EnforceClusterComplianceResponse:
|
|
3089
|
+
"""Deserializes the EnforceClusterComplianceResponse from a dictionary."""
|
|
3090
|
+
return cls(changes=_repeated_dict(d, 'changes', ClusterSettingsChange),
|
|
3091
|
+
has_changes=d.get('has_changes', None))
|
|
3092
|
+
|
|
3093
|
+
|
|
2985
3094
|
@dataclass
|
|
2986
3095
|
class Environment:
|
|
2987
3096
|
"""The environment entity used to preserve serverless environment side panel and jobs' environment
|
|
@@ -3251,6 +3360,30 @@ class GcsStorageInfo:
|
|
|
3251
3360
|
return cls(destination=d.get('destination', None))
|
|
3252
3361
|
|
|
3253
3362
|
|
|
3363
|
+
@dataclass
|
|
3364
|
+
class GetClusterComplianceResponse:
|
|
3365
|
+
is_compliant: Optional[bool] = None
|
|
3366
|
+
"""Whether the cluster is compliant with its policy or not. Clusters could be out of compliance if
|
|
3367
|
+
the policy was updated after the cluster was last edited."""
|
|
3368
|
+
|
|
3369
|
+
violations: Optional[Dict[str, str]] = None
|
|
3370
|
+
"""An object containing key-value mappings representing the first 200 policy validation errors. The
|
|
3371
|
+
keys indicate the path where the policy validation error is occurring. The values indicate an
|
|
3372
|
+
error message describing the policy validation error."""
|
|
3373
|
+
|
|
3374
|
+
def as_dict(self) -> dict:
|
|
3375
|
+
"""Serializes the GetClusterComplianceResponse into a dictionary suitable for use as a JSON request body."""
|
|
3376
|
+
body = {}
|
|
3377
|
+
if self.is_compliant is not None: body['is_compliant'] = self.is_compliant
|
|
3378
|
+
if self.violations: body['violations'] = self.violations
|
|
3379
|
+
return body
|
|
3380
|
+
|
|
3381
|
+
@classmethod
|
|
3382
|
+
def from_dict(cls, d: Dict[str, any]) -> GetClusterComplianceResponse:
|
|
3383
|
+
"""Deserializes the GetClusterComplianceResponse from a dictionary."""
|
|
3384
|
+
return cls(is_compliant=d.get('is_compliant', None), violations=d.get('violations', None))
|
|
3385
|
+
|
|
3386
|
+
|
|
3254
3387
|
@dataclass
|
|
3255
3388
|
class GetClusterPermissionLevelsResponse:
|
|
3256
3389
|
permission_levels: Optional[List[ClusterPermissionsDescription]] = None
|
|
@@ -4461,11 +4594,8 @@ class Library:
|
|
|
4461
4594
|
"""Specification of a CRAN library to be installed as part of the library"""
|
|
4462
4595
|
|
|
4463
4596
|
egg: Optional[str] = None
|
|
4464
|
-
"""URI of the egg library to install.
|
|
4465
|
-
|
|
4466
|
-
"/Volumes/path/to/library.egg" }` or `{ "egg": "s3://my-bucket/library.egg" }`. If S3 is used,
|
|
4467
|
-
please make sure the cluster has read access on the library. You may need to launch the cluster
|
|
4468
|
-
with an IAM role to access the S3 URI."""
|
|
4597
|
+
"""Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is
|
|
4598
|
+
not supported in Databricks Runtime 14.0 and above."""
|
|
4469
4599
|
|
|
4470
4600
|
jar: Optional[str] = None
|
|
4471
4601
|
"""URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
|
|
@@ -4603,21 +4733,132 @@ class ListAvailableZonesResponse:
|
|
|
4603
4733
|
return cls(default_zone=d.get('default_zone', None), zones=d.get('zones', None))
|
|
4604
4734
|
|
|
4605
4735
|
|
|
4736
|
+
@dataclass
|
|
4737
|
+
class ListClusterCompliancesResponse:
|
|
4738
|
+
clusters: Optional[List[ClusterCompliance]] = None
|
|
4739
|
+
"""A list of clusters and their policy compliance statuses."""
|
|
4740
|
+
|
|
4741
|
+
next_page_token: Optional[str] = None
|
|
4742
|
+
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
4743
|
+
"", it means no further results for the request."""
|
|
4744
|
+
|
|
4745
|
+
prev_page_token: Optional[str] = None
|
|
4746
|
+
"""This field represents the pagination token to retrieve the previous page of results. If the
|
|
4747
|
+
value is "", it means no further results for the request."""
|
|
4748
|
+
|
|
4749
|
+
def as_dict(self) -> dict:
|
|
4750
|
+
"""Serializes the ListClusterCompliancesResponse into a dictionary suitable for use as a JSON request body."""
|
|
4751
|
+
body = {}
|
|
4752
|
+
if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
|
|
4753
|
+
if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
|
|
4754
|
+
if self.prev_page_token is not None: body['prev_page_token'] = self.prev_page_token
|
|
4755
|
+
return body
|
|
4756
|
+
|
|
4757
|
+
@classmethod
|
|
4758
|
+
def from_dict(cls, d: Dict[str, any]) -> ListClusterCompliancesResponse:
|
|
4759
|
+
"""Deserializes the ListClusterCompliancesResponse from a dictionary."""
|
|
4760
|
+
return cls(clusters=_repeated_dict(d, 'clusters', ClusterCompliance),
|
|
4761
|
+
next_page_token=d.get('next_page_token', None),
|
|
4762
|
+
prev_page_token=d.get('prev_page_token', None))
|
|
4763
|
+
|
|
4764
|
+
|
|
4765
|
+
@dataclass
|
|
4766
|
+
class ListClustersFilterBy:
|
|
4767
|
+
cluster_sources: Optional[List[ClusterSource]] = None
|
|
4768
|
+
"""The source of cluster creation."""
|
|
4769
|
+
|
|
4770
|
+
cluster_states: Optional[List[State]] = None
|
|
4771
|
+
"""The current state of the clusters."""
|
|
4772
|
+
|
|
4773
|
+
is_pinned: Optional[bool] = None
|
|
4774
|
+
"""Whether the clusters are pinned or not."""
|
|
4775
|
+
|
|
4776
|
+
policy_id: Optional[str] = None
|
|
4777
|
+
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
4778
|
+
|
|
4779
|
+
def as_dict(self) -> dict:
|
|
4780
|
+
"""Serializes the ListClustersFilterBy into a dictionary suitable for use as a JSON request body."""
|
|
4781
|
+
body = {}
|
|
4782
|
+
if self.cluster_sources: body['cluster_sources'] = [v.value for v in self.cluster_sources]
|
|
4783
|
+
if self.cluster_states: body['cluster_states'] = [v.value for v in self.cluster_states]
|
|
4784
|
+
if self.is_pinned is not None: body['is_pinned'] = self.is_pinned
|
|
4785
|
+
if self.policy_id is not None: body['policy_id'] = self.policy_id
|
|
4786
|
+
return body
|
|
4787
|
+
|
|
4788
|
+
@classmethod
|
|
4789
|
+
def from_dict(cls, d: Dict[str, any]) -> ListClustersFilterBy:
|
|
4790
|
+
"""Deserializes the ListClustersFilterBy from a dictionary."""
|
|
4791
|
+
return cls(cluster_sources=_repeated_enum(d, 'cluster_sources', ClusterSource),
|
|
4792
|
+
cluster_states=_repeated_enum(d, 'cluster_states', State),
|
|
4793
|
+
is_pinned=d.get('is_pinned', None),
|
|
4794
|
+
policy_id=d.get('policy_id', None))
|
|
4795
|
+
|
|
4796
|
+
|
|
4606
4797
|
@dataclass
|
|
4607
4798
|
class ListClustersResponse:
|
|
4608
4799
|
clusters: Optional[List[ClusterDetails]] = None
|
|
4609
4800
|
"""<needs content added>"""
|
|
4610
4801
|
|
|
4802
|
+
next_page_token: Optional[str] = None
|
|
4803
|
+
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
4804
|
+
"", it means no further results for the request."""
|
|
4805
|
+
|
|
4806
|
+
prev_page_token: Optional[str] = None
|
|
4807
|
+
"""This field represents the pagination token to retrieve the previous page of results. If the
|
|
4808
|
+
value is "", it means no further results for the request."""
|
|
4809
|
+
|
|
4611
4810
|
def as_dict(self) -> dict:
|
|
4612
4811
|
"""Serializes the ListClustersResponse into a dictionary suitable for use as a JSON request body."""
|
|
4613
4812
|
body = {}
|
|
4614
4813
|
if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
|
|
4814
|
+
if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
|
|
4815
|
+
if self.prev_page_token is not None: body['prev_page_token'] = self.prev_page_token
|
|
4615
4816
|
return body
|
|
4616
4817
|
|
|
4617
4818
|
@classmethod
|
|
4618
4819
|
def from_dict(cls, d: Dict[str, any]) -> ListClustersResponse:
|
|
4619
4820
|
"""Deserializes the ListClustersResponse from a dictionary."""
|
|
4620
|
-
return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails)
|
|
4821
|
+
return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails),
|
|
4822
|
+
next_page_token=d.get('next_page_token', None),
|
|
4823
|
+
prev_page_token=d.get('prev_page_token', None))
|
|
4824
|
+
|
|
4825
|
+
|
|
4826
|
+
@dataclass
|
|
4827
|
+
class ListClustersSortBy:
|
|
4828
|
+
direction: Optional[ListClustersSortByDirection] = None
|
|
4829
|
+
"""The direction to sort by."""
|
|
4830
|
+
|
|
4831
|
+
field: Optional[ListClustersSortByField] = None
|
|
4832
|
+
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
4833
|
+
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
4834
|
+
|
|
4835
|
+
def as_dict(self) -> dict:
|
|
4836
|
+
"""Serializes the ListClustersSortBy into a dictionary suitable for use as a JSON request body."""
|
|
4837
|
+
body = {}
|
|
4838
|
+
if self.direction is not None: body['direction'] = self.direction.value
|
|
4839
|
+
if self.field is not None: body['field'] = self.field.value
|
|
4840
|
+
return body
|
|
4841
|
+
|
|
4842
|
+
@classmethod
|
|
4843
|
+
def from_dict(cls, d: Dict[str, any]) -> ListClustersSortBy:
|
|
4844
|
+
"""Deserializes the ListClustersSortBy from a dictionary."""
|
|
4845
|
+
return cls(direction=_enum(d, 'direction', ListClustersSortByDirection),
|
|
4846
|
+
field=_enum(d, 'field', ListClustersSortByField))
|
|
4847
|
+
|
|
4848
|
+
|
|
4849
|
+
class ListClustersSortByDirection(Enum):
|
|
4850
|
+
"""The direction to sort by."""
|
|
4851
|
+
|
|
4852
|
+
ASC = 'ASC'
|
|
4853
|
+
DESC = 'DESC'
|
|
4854
|
+
|
|
4855
|
+
|
|
4856
|
+
class ListClustersSortByField(Enum):
|
|
4857
|
+
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
4858
|
+
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
4859
|
+
|
|
4860
|
+
CLUSTER_NAME = 'CLUSTER_NAME'
|
|
4861
|
+
DEFAULT = 'DEFAULT'
|
|
4621
4862
|
|
|
4622
4863
|
|
|
4623
4864
|
@dataclass
|
|
@@ -4705,13 +4946,13 @@ class ListPoliciesResponse:
|
|
|
4705
4946
|
|
|
4706
4947
|
@dataclass
|
|
4707
4948
|
class ListPolicyFamiliesResponse:
|
|
4708
|
-
policy_families: List[PolicyFamily]
|
|
4709
|
-
"""List of policy families."""
|
|
4710
|
-
|
|
4711
4949
|
next_page_token: Optional[str] = None
|
|
4712
4950
|
"""A token that can be used to get the next page of results. If not present, there are no more
|
|
4713
4951
|
results to show."""
|
|
4714
4952
|
|
|
4953
|
+
policy_families: Optional[List[PolicyFamily]] = None
|
|
4954
|
+
"""List of policy families."""
|
|
4955
|
+
|
|
4715
4956
|
def as_dict(self) -> dict:
|
|
4716
4957
|
"""Serializes the ListPolicyFamiliesResponse into a dictionary suitable for use as a JSON request body."""
|
|
4717
4958
|
body = {}
|
|
@@ -4733,6 +4974,7 @@ class ListSortColumn(Enum):
|
|
|
4733
4974
|
|
|
4734
4975
|
|
|
4735
4976
|
class ListSortOrder(Enum):
|
|
4977
|
+
"""A generic ordering enum for list-based queries."""
|
|
4736
4978
|
|
|
4737
4979
|
ASC = 'ASC'
|
|
4738
4980
|
DESC = 'DESC'
|
|
@@ -5059,6 +5301,8 @@ class PinClusterResponse:
|
|
|
5059
5301
|
|
|
5060
5302
|
@dataclass
|
|
5061
5303
|
class Policy:
|
|
5304
|
+
"""Describes a Cluster Policy entity."""
|
|
5305
|
+
|
|
5062
5306
|
created_at_timestamp: Optional[int] = None
|
|
5063
5307
|
"""Creation time. The timestamp (in millisecond) when this Cluster Policy was created."""
|
|
5064
5308
|
|
|
@@ -5100,7 +5344,11 @@ class Policy:
|
|
|
5100
5344
|
[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
5101
5345
|
|
|
5102
5346
|
policy_family_id: Optional[str] = None
|
|
5103
|
-
"""ID of the policy family.
|
|
5347
|
+
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
5348
|
+
policy definition.
|
|
5349
|
+
|
|
5350
|
+
Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
|
|
5351
|
+
the policy definition."""
|
|
5104
5352
|
|
|
5105
5353
|
policy_id: Optional[str] = None
|
|
5106
5354
|
"""Canonical unique identifier for the Cluster Policy."""
|
|
@@ -5140,20 +5388,20 @@ class Policy:
|
|
|
5140
5388
|
|
|
5141
5389
|
@dataclass
|
|
5142
5390
|
class PolicyFamily:
|
|
5143
|
-
|
|
5144
|
-
"""ID of the policy family."""
|
|
5145
|
-
|
|
5146
|
-
name: str
|
|
5147
|
-
"""Name of the policy family."""
|
|
5148
|
-
|
|
5149
|
-
description: str
|
|
5150
|
-
"""Human-readable description of the purpose of the policy family."""
|
|
5151
|
-
|
|
5152
|
-
definition: str
|
|
5391
|
+
definition: Optional[str] = None
|
|
5153
5392
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5154
5393
|
|
|
5155
5394
|
[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
5156
5395
|
|
|
5396
|
+
description: Optional[str] = None
|
|
5397
|
+
"""Human-readable description of the purpose of the policy family."""
|
|
5398
|
+
|
|
5399
|
+
name: Optional[str] = None
|
|
5400
|
+
"""Name of the policy family."""
|
|
5401
|
+
|
|
5402
|
+
policy_family_id: Optional[str] = None
|
|
5403
|
+
"""Unique identifier for the policy family."""
|
|
5404
|
+
|
|
5157
5405
|
def as_dict(self) -> dict:
|
|
5158
5406
|
"""Serializes the PolicyFamily into a dictionary suitable for use as a JSON request body."""
|
|
5159
5407
|
body = {}
|
|
@@ -5793,6 +6041,260 @@ class UnpinClusterResponse:
|
|
|
5793
6041
|
return cls()
|
|
5794
6042
|
|
|
5795
6043
|
|
|
6044
|
+
@dataclass
|
|
6045
|
+
class UpdateCluster:
|
|
6046
|
+
cluster_id: str
|
|
6047
|
+
"""ID of the cluster."""
|
|
6048
|
+
|
|
6049
|
+
update_mask: str
|
|
6050
|
+
"""Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
6051
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them
|
|
6052
|
+
with commas (no spaces). To delete a field from a cluster configuration, add it to the
|
|
6053
|
+
`update_mask` string but omit it from the `cluster` object."""
|
|
6054
|
+
|
|
6055
|
+
cluster: Optional[UpdateClusterResource] = None
|
|
6056
|
+
"""The cluster to be updated."""
|
|
6057
|
+
|
|
6058
|
+
def as_dict(self) -> dict:
|
|
6059
|
+
"""Serializes the UpdateCluster into a dictionary suitable for use as a JSON request body."""
|
|
6060
|
+
body = {}
|
|
6061
|
+
if self.cluster: body['cluster'] = self.cluster.as_dict()
|
|
6062
|
+
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
6063
|
+
if self.update_mask is not None: body['update_mask'] = self.update_mask
|
|
6064
|
+
return body
|
|
6065
|
+
|
|
6066
|
+
@classmethod
|
|
6067
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateCluster:
|
|
6068
|
+
"""Deserializes the UpdateCluster from a dictionary."""
|
|
6069
|
+
return cls(cluster=_from_dict(d, 'cluster', UpdateClusterResource),
|
|
6070
|
+
cluster_id=d.get('cluster_id', None),
|
|
6071
|
+
update_mask=d.get('update_mask', None))
|
|
6072
|
+
|
|
6073
|
+
|
|
6074
|
+
@dataclass
|
|
6075
|
+
class UpdateClusterResource:
|
|
6076
|
+
autoscale: Optional[AutoScale] = None
|
|
6077
|
+
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
6078
|
+
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
6079
|
+
|
|
6080
|
+
autotermination_minutes: Optional[int] = None
|
|
6081
|
+
"""Automatically terminates the cluster after it is inactive for this time in minutes. If not set,
|
|
6082
|
+
this cluster will not be automatically terminated. If specified, the threshold must be between
|
|
6083
|
+
10 and 10000 minutes. Users can also set this value to 0 to explicitly disable automatic
|
|
6084
|
+
termination."""
|
|
6085
|
+
|
|
6086
|
+
aws_attributes: Optional[AwsAttributes] = None
|
|
6087
|
+
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
6088
|
+
creation, a set of default values will be used."""
|
|
6089
|
+
|
|
6090
|
+
azure_attributes: Optional[AzureAttributes] = None
|
|
6091
|
+
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
6092
|
+
a set of default values will be used."""
|
|
6093
|
+
|
|
6094
|
+
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
6095
|
+
"""The configuration for delivering spark logs to a long-term storage destination. Two kinds of
|
|
6096
|
+
destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
|
|
6097
|
+
If the conf is given, the logs will be delivered to the destination every `5 mins`. The
|
|
6098
|
+
destination of driver logs is `$destination/$clusterId/driver`, while the destination of
|
|
6099
|
+
executor logs is `$destination/$clusterId/executor`."""
|
|
6100
|
+
|
|
6101
|
+
cluster_name: Optional[str] = None
|
|
6102
|
+
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
6103
|
+
creation, the cluster name will be an empty string."""
|
|
6104
|
+
|
|
6105
|
+
custom_tags: Optional[Dict[str, str]] = None
|
|
6106
|
+
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
6107
|
+
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
6108
|
+
|
|
6109
|
+
- Currently, Databricks allows at most 45 custom tags
|
|
6110
|
+
|
|
6111
|
+
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
6112
|
+
tags"""
|
|
6113
|
+
|
|
6114
|
+
data_security_mode: Optional[DataSecurityMode] = None
|
|
6115
|
+
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
6116
|
+
|
|
6117
|
+
* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
|
|
6118
|
+
are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
|
|
6119
|
+
used by a single user specified in `single_user_name`. Most programming languages, cluster
|
|
6120
|
+
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
6121
|
+
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
6122
|
+
cannot see each other's data and credentials. Most data governance features are supported in
|
|
6123
|
+
this mode. But programming languages and cluster features might be limited.
|
|
6124
|
+
|
|
6125
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
6126
|
+
future Databricks Runtime versions:
|
|
6127
|
+
|
|
6128
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
6129
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
6130
|
+
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
6131
|
+
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
6132
|
+
doesn’t have UC nor passthrough enabled."""
|
|
6133
|
+
|
|
6134
|
+
docker_image: Optional[DockerImage] = None
|
|
6135
|
+
|
|
6136
|
+
driver_instance_pool_id: Optional[str] = None
|
|
6137
|
+
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
6138
|
+
uses the instance pool with id (instance_pool_id) if the driver pool is not assigned."""
|
|
6139
|
+
|
|
6140
|
+
driver_node_type_id: Optional[str] = None
|
|
6141
|
+
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
6142
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
6143
|
+
|
|
6144
|
+
enable_elastic_disk: Optional[bool] = None
|
|
6145
|
+
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
6146
|
+
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
6147
|
+
permissions to function correctly - refer to the User Guide for more details."""
|
|
6148
|
+
|
|
6149
|
+
enable_local_disk_encryption: Optional[bool] = None
|
|
6150
|
+
"""Whether to enable LUKS on cluster VMs' local disks"""
|
|
6151
|
+
|
|
6152
|
+
gcp_attributes: Optional[GcpAttributes] = None
|
|
6153
|
+
"""Attributes related to clusters running on Google Cloud Platform. If not specified at cluster
|
|
6154
|
+
creation, a set of default values will be used."""
|
|
6155
|
+
|
|
6156
|
+
init_scripts: Optional[List[InitScriptInfo]] = None
|
|
6157
|
+
"""The configuration for storing init scripts. Any number of destinations can be specified. The
|
|
6158
|
+
scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified,
|
|
6159
|
+
init script logs are sent to `<destination>/<cluster-ID>/init_scripts`."""
|
|
6160
|
+
|
|
6161
|
+
instance_pool_id: Optional[str] = None
|
|
6162
|
+
"""The optional ID of the instance pool to which the cluster belongs."""
|
|
6163
|
+
|
|
6164
|
+
node_type_id: Optional[str] = None
|
|
6165
|
+
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
6166
|
+
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
6167
|
+
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
6168
|
+
:method:clusters/listNodeTypes API call."""
|
|
6169
|
+
|
|
6170
|
+
num_workers: Optional[int] = None
|
|
6171
|
+
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
6172
|
+
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
6173
|
+
|
|
6174
|
+
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
6175
|
+
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
6176
|
+
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
6177
|
+
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
6178
|
+
new nodes are provisioned."""
|
|
6179
|
+
|
|
6180
|
+
policy_id: Optional[str] = None
|
|
6181
|
+
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
6182
|
+
|
|
6183
|
+
runtime_engine: Optional[RuntimeEngine] = None
|
|
6184
|
+
"""Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime
|
|
6185
|
+
engine is inferred from spark_version."""
|
|
6186
|
+
|
|
6187
|
+
single_user_name: Optional[str] = None
|
|
6188
|
+
"""Single user name if data_security_mode is `SINGLE_USER`"""
|
|
6189
|
+
|
|
6190
|
+
spark_conf: Optional[Dict[str, str]] = None
|
|
6191
|
+
"""An object containing a set of optional, user-specified Spark configuration key-value pairs.
|
|
6192
|
+
Users can also pass in a string of extra JVM options to the driver and the executors via
|
|
6193
|
+
`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
|
|
6194
|
+
|
|
6195
|
+
spark_env_vars: Optional[Dict[str, str]] = None
|
|
6196
|
+
"""An object containing a set of optional, user-specified environment variable key-value pairs.
|
|
6197
|
+
Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
|
|
6198
|
+
while launching the driver and workers.
|
|
6199
|
+
|
|
6200
|
+
In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
|
|
6201
|
+
to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
|
|
6202
|
+
databricks managed environmental variables are included as well.
|
|
6203
|
+
|
|
6204
|
+
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
6205
|
+
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
6206
|
+
-Dspark.shuffle.service.enabled=true"}`"""
|
|
6207
|
+
|
|
6208
|
+
spark_version: Optional[str] = None
|
|
6209
|
+
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
6210
|
+
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
6211
|
+
|
|
6212
|
+
ssh_public_keys: Optional[List[str]] = None
|
|
6213
|
+
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
6214
|
+
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
6215
|
+
be specified."""
|
|
6216
|
+
|
|
6217
|
+
workload_type: Optional[WorkloadType] = None
|
|
6218
|
+
|
|
6219
|
+
def as_dict(self) -> dict:
|
|
6220
|
+
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
6221
|
+
body = {}
|
|
6222
|
+
if self.autoscale: body['autoscale'] = self.autoscale.as_dict()
|
|
6223
|
+
if self.autotermination_minutes is not None:
|
|
6224
|
+
body['autotermination_minutes'] = self.autotermination_minutes
|
|
6225
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
6226
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
6227
|
+
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
6228
|
+
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
6229
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
6230
|
+
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
6231
|
+
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
6232
|
+
if self.driver_instance_pool_id is not None:
|
|
6233
|
+
body['driver_instance_pool_id'] = self.driver_instance_pool_id
|
|
6234
|
+
if self.driver_node_type_id is not None: body['driver_node_type_id'] = self.driver_node_type_id
|
|
6235
|
+
if self.enable_elastic_disk is not None: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
6236
|
+
if self.enable_local_disk_encryption is not None:
|
|
6237
|
+
body['enable_local_disk_encryption'] = self.enable_local_disk_encryption
|
|
6238
|
+
if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
|
|
6239
|
+
if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
|
|
6240
|
+
if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
|
|
6241
|
+
if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
|
|
6242
|
+
if self.num_workers is not None: body['num_workers'] = self.num_workers
|
|
6243
|
+
if self.policy_id is not None: body['policy_id'] = self.policy_id
|
|
6244
|
+
if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine.value
|
|
6245
|
+
if self.single_user_name is not None: body['single_user_name'] = self.single_user_name
|
|
6246
|
+
if self.spark_conf: body['spark_conf'] = self.spark_conf
|
|
6247
|
+
if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
|
|
6248
|
+
if self.spark_version is not None: body['spark_version'] = self.spark_version
|
|
6249
|
+
if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
|
|
6250
|
+
if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
|
|
6251
|
+
return body
|
|
6252
|
+
|
|
6253
|
+
@classmethod
|
|
6254
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResource:
|
|
6255
|
+
"""Deserializes the UpdateClusterResource from a dictionary."""
|
|
6256
|
+
return cls(autoscale=_from_dict(d, 'autoscale', AutoScale),
|
|
6257
|
+
autotermination_minutes=d.get('autotermination_minutes', None),
|
|
6258
|
+
aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
|
|
6259
|
+
azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
|
|
6260
|
+
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
6261
|
+
cluster_name=d.get('cluster_name', None),
|
|
6262
|
+
custom_tags=d.get('custom_tags', None),
|
|
6263
|
+
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
6264
|
+
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
6265
|
+
driver_instance_pool_id=d.get('driver_instance_pool_id', None),
|
|
6266
|
+
driver_node_type_id=d.get('driver_node_type_id', None),
|
|
6267
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
6268
|
+
enable_local_disk_encryption=d.get('enable_local_disk_encryption', None),
|
|
6269
|
+
gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
|
|
6270
|
+
init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
|
|
6271
|
+
instance_pool_id=d.get('instance_pool_id', None),
|
|
6272
|
+
node_type_id=d.get('node_type_id', None),
|
|
6273
|
+
num_workers=d.get('num_workers', None),
|
|
6274
|
+
policy_id=d.get('policy_id', None),
|
|
6275
|
+
runtime_engine=_enum(d, 'runtime_engine', RuntimeEngine),
|
|
6276
|
+
single_user_name=d.get('single_user_name', None),
|
|
6277
|
+
spark_conf=d.get('spark_conf', None),
|
|
6278
|
+
spark_env_vars=d.get('spark_env_vars', None),
|
|
6279
|
+
spark_version=d.get('spark_version', None),
|
|
6280
|
+
ssh_public_keys=d.get('ssh_public_keys', None),
|
|
6281
|
+
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
6282
|
+
|
|
6283
|
+
|
|
6284
|
+
@dataclass
|
|
6285
|
+
class UpdateClusterResponse:
|
|
6286
|
+
|
|
6287
|
+
def as_dict(self) -> dict:
|
|
6288
|
+
"""Serializes the UpdateClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
6289
|
+
body = {}
|
|
6290
|
+
return body
|
|
6291
|
+
|
|
6292
|
+
@classmethod
|
|
6293
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResponse:
|
|
6294
|
+
"""Deserializes the UpdateClusterResponse from a dictionary."""
|
|
6295
|
+
return cls()
|
|
6296
|
+
|
|
6297
|
+
|
|
5796
6298
|
@dataclass
|
|
5797
6299
|
class UpdateResponse:
|
|
5798
6300
|
|
|
@@ -5881,21 +6383,18 @@ class ClusterPoliciesAPI:
|
|
|
5881
6383
|
self._api = api_client
|
|
5882
6384
|
|
|
5883
6385
|
def create(self,
|
|
5884
|
-
name: str,
|
|
5885
6386
|
*,
|
|
5886
6387
|
definition: Optional[str] = None,
|
|
5887
6388
|
description: Optional[str] = None,
|
|
5888
6389
|
libraries: Optional[List[Library]] = None,
|
|
5889
6390
|
max_clusters_per_user: Optional[int] = None,
|
|
6391
|
+
name: Optional[str] = None,
|
|
5890
6392
|
policy_family_definition_overrides: Optional[str] = None,
|
|
5891
6393
|
policy_family_id: Optional[str] = None) -> CreatePolicyResponse:
|
|
5892
6394
|
"""Create a new policy.
|
|
5893
6395
|
|
|
5894
6396
|
Creates a new policy with prescribed settings.
|
|
5895
6397
|
|
|
5896
|
-
:param name: str
|
|
5897
|
-
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
5898
|
-
characters.
|
|
5899
6398
|
:param definition: str (optional)
|
|
5900
6399
|
Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5901
6400
|
|
|
@@ -5908,6 +6407,9 @@ class ClusterPoliciesAPI:
|
|
|
5908
6407
|
:param max_clusters_per_user: int (optional)
|
|
5909
6408
|
Max number of clusters per user that can be active using this policy. If not present, there is no
|
|
5910
6409
|
max limit.
|
|
6410
|
+
:param name: str (optional)
|
|
6411
|
+
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
6412
|
+
characters.
|
|
5911
6413
|
:param policy_family_definition_overrides: str (optional)
|
|
5912
6414
|
Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
5913
6415
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -5957,12 +6459,12 @@ class ClusterPoliciesAPI:
|
|
|
5957
6459
|
|
|
5958
6460
|
def edit(self,
|
|
5959
6461
|
policy_id: str,
|
|
5960
|
-
name: str,
|
|
5961
6462
|
*,
|
|
5962
6463
|
definition: Optional[str] = None,
|
|
5963
6464
|
description: Optional[str] = None,
|
|
5964
6465
|
libraries: Optional[List[Library]] = None,
|
|
5965
6466
|
max_clusters_per_user: Optional[int] = None,
|
|
6467
|
+
name: Optional[str] = None,
|
|
5966
6468
|
policy_family_definition_overrides: Optional[str] = None,
|
|
5967
6469
|
policy_family_id: Optional[str] = None):
|
|
5968
6470
|
"""Update a cluster policy.
|
|
@@ -5972,9 +6474,6 @@ class ClusterPoliciesAPI:
|
|
|
5972
6474
|
|
|
5973
6475
|
:param policy_id: str
|
|
5974
6476
|
The ID of the policy to update.
|
|
5975
|
-
:param name: str
|
|
5976
|
-
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
5977
|
-
characters.
|
|
5978
6477
|
:param definition: str (optional)
|
|
5979
6478
|
Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5980
6479
|
|
|
@@ -5987,6 +6486,9 @@ class ClusterPoliciesAPI:
|
|
|
5987
6486
|
:param max_clusters_per_user: int (optional)
|
|
5988
6487
|
Max number of clusters per user that can be active using this policy. If not present, there is no
|
|
5989
6488
|
max limit.
|
|
6489
|
+
:param name: str (optional)
|
|
6490
|
+
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
6491
|
+
characters.
|
|
5990
6492
|
:param policy_family_definition_overrides: str (optional)
|
|
5991
6493
|
Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
5992
6494
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -6024,7 +6526,7 @@ class ClusterPoliciesAPI:
|
|
|
6024
6526
|
Get a cluster policy entity. Creation and editing is available to admins only.
|
|
6025
6527
|
|
|
6026
6528
|
:param policy_id: str
|
|
6027
|
-
Canonical unique identifier for the
|
|
6529
|
+
Canonical unique identifier for the Cluster Policy.
|
|
6028
6530
|
|
|
6029
6531
|
:returns: :class:`Policy`
|
|
6030
6532
|
"""
|
|
@@ -6174,9 +6676,8 @@ class ClustersAPI:
|
|
|
6174
6676
|
restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive
|
|
6175
6677
|
analysis.
|
|
6176
6678
|
|
|
6177
|
-
IMPORTANT: Databricks retains cluster configuration information for
|
|
6178
|
-
|
|
6179
|
-
an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
|
|
6679
|
+
IMPORTANT: Databricks retains cluster configuration information for terminated clusters for 30 days. To
|
|
6680
|
+
keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
|
|
6180
6681
|
administrator can pin a cluster to the cluster list."""
|
|
6181
6682
|
|
|
6182
6683
|
def __init__(self, api_client):
|
|
@@ -6263,7 +6764,7 @@ class ClustersAPI:
|
|
|
6263
6764
|
if owner_username is not None: body['owner_username'] = owner_username
|
|
6264
6765
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6265
6766
|
|
|
6266
|
-
self._api.do('POST', '/api/2.
|
|
6767
|
+
self._api.do('POST', '/api/2.1/clusters/change-owner', body=body, headers=headers)
|
|
6267
6768
|
|
|
6268
6769
|
def create(self,
|
|
6269
6770
|
spark_version: str,
|
|
@@ -6462,7 +6963,7 @@ class ClustersAPI:
|
|
|
6462
6963
|
if workload_type is not None: body['workload_type'] = workload_type.as_dict()
|
|
6463
6964
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6464
6965
|
|
|
6465
|
-
op_response = self._api.do('POST', '/api/2.
|
|
6966
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/create', body=body, headers=headers)
|
|
6466
6967
|
return Wait(self.wait_get_cluster_running,
|
|
6467
6968
|
response=CreateClusterResponse.from_dict(op_response),
|
|
6468
6969
|
cluster_id=op_response['cluster_id'])
|
|
@@ -6546,7 +7047,7 @@ class ClustersAPI:
|
|
|
6546
7047
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
6547
7048
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6548
7049
|
|
|
6549
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7050
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/delete', body=body, headers=headers)
|
|
6550
7051
|
return Wait(self.wait_get_cluster_terminated,
|
|
6551
7052
|
response=DeleteClusterResponse.from_dict(op_response),
|
|
6552
7053
|
cluster_id=cluster_id)
|
|
@@ -6756,7 +7257,7 @@ class ClustersAPI:
|
|
|
6756
7257
|
if workload_type is not None: body['workload_type'] = workload_type.as_dict()
|
|
6757
7258
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6758
7259
|
|
|
6759
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7260
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/edit', body=body, headers=headers)
|
|
6760
7261
|
return Wait(self.wait_get_cluster_running,
|
|
6761
7262
|
response=EditClusterResponse.from_dict(op_response),
|
|
6762
7263
|
cluster_id=cluster_id)
|
|
@@ -6867,7 +7368,7 @@ class ClustersAPI:
|
|
|
6867
7368
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6868
7369
|
|
|
6869
7370
|
while True:
|
|
6870
|
-
json = self._api.do('POST', '/api/2.
|
|
7371
|
+
json = self._api.do('POST', '/api/2.1/clusters/events', body=body, headers=headers)
|
|
6871
7372
|
if 'events' in json:
|
|
6872
7373
|
for v in json['events']:
|
|
6873
7374
|
yield ClusterEvent.from_dict(v)
|
|
@@ -6891,7 +7392,7 @@ class ClustersAPI:
|
|
|
6891
7392
|
if cluster_id is not None: query['cluster_id'] = cluster_id
|
|
6892
7393
|
headers = {'Accept': 'application/json', }
|
|
6893
7394
|
|
|
6894
|
-
res = self._api.do('GET', '/api/2.
|
|
7395
|
+
res = self._api.do('GET', '/api/2.1/clusters/get', query=query, headers=headers)
|
|
6895
7396
|
return ClusterDetails.from_dict(res)
|
|
6896
7397
|
|
|
6897
7398
|
def get_permission_levels(self, cluster_id: str) -> GetClusterPermissionLevelsResponse:
|
|
@@ -6928,33 +7429,46 @@ class ClustersAPI:
|
|
|
6928
7429
|
res = self._api.do('GET', f'/api/2.0/permissions/clusters/{cluster_id}', headers=headers)
|
|
6929
7430
|
return ClusterPermissions.from_dict(res)
|
|
6930
7431
|
|
|
6931
|
-
def list(self,
|
|
6932
|
-
|
|
6933
|
-
|
|
6934
|
-
|
|
6935
|
-
|
|
6936
|
-
|
|
6937
|
-
|
|
6938
|
-
|
|
6939
|
-
|
|
6940
|
-
|
|
6941
|
-
|
|
6942
|
-
|
|
6943
|
-
|
|
6944
|
-
|
|
6945
|
-
|
|
6946
|
-
|
|
7432
|
+
def list(self,
|
|
7433
|
+
*,
|
|
7434
|
+
filter_by: Optional[ListClustersFilterBy] = None,
|
|
7435
|
+
page_size: Optional[int] = None,
|
|
7436
|
+
page_token: Optional[str] = None,
|
|
7437
|
+
sort_by: Optional[ListClustersSortBy] = None) -> Iterator[ClusterDetails]:
|
|
7438
|
+
"""List clusters.
|
|
7439
|
+
|
|
7440
|
+
Return information about all pinned and active clusters, and all clusters terminated within the last
|
|
7441
|
+
30 days. Clusters terminated prior to this period are not included.
|
|
7442
|
+
|
|
7443
|
+
:param filter_by: :class:`ListClustersFilterBy` (optional)
|
|
7444
|
+
Filters to apply to the list of clusters.
|
|
7445
|
+
:param page_size: int (optional)
|
|
7446
|
+
Use this field to specify the maximum number of results to be returned by the server. The server may
|
|
7447
|
+
further constrain the maximum number of results returned in a single page.
|
|
7448
|
+
:param page_token: str (optional)
|
|
7449
|
+
Use next_page_token or prev_page_token returned from the previous request to list the next or
|
|
7450
|
+
previous page of clusters respectively.
|
|
7451
|
+
:param sort_by: :class:`ListClustersSortBy` (optional)
|
|
7452
|
+
Sort the list of clusters by a specific criteria.
|
|
6947
7453
|
|
|
6948
7454
|
:returns: Iterator over :class:`ClusterDetails`
|
|
6949
7455
|
"""
|
|
6950
7456
|
|
|
6951
7457
|
query = {}
|
|
6952
|
-
if
|
|
7458
|
+
if filter_by is not None: query['filter_by'] = filter_by.as_dict()
|
|
7459
|
+
if page_size is not None: query['page_size'] = page_size
|
|
7460
|
+
if page_token is not None: query['page_token'] = page_token
|
|
7461
|
+
if sort_by is not None: query['sort_by'] = sort_by.as_dict()
|
|
6953
7462
|
headers = {'Accept': 'application/json', }
|
|
6954
7463
|
|
|
6955
|
-
|
|
6956
|
-
|
|
6957
|
-
|
|
7464
|
+
while True:
|
|
7465
|
+
json = self._api.do('GET', '/api/2.1/clusters/list', query=query, headers=headers)
|
|
7466
|
+
if 'clusters' in json:
|
|
7467
|
+
for v in json['clusters']:
|
|
7468
|
+
yield ClusterDetails.from_dict(v)
|
|
7469
|
+
if 'next_page_token' not in json or not json['next_page_token']:
|
|
7470
|
+
return
|
|
7471
|
+
query['page_token'] = json['next_page_token']
|
|
6958
7472
|
|
|
6959
7473
|
def list_node_types(self) -> ListNodeTypesResponse:
|
|
6960
7474
|
"""List node types.
|
|
@@ -6966,7 +7480,7 @@ class ClustersAPI:
|
|
|
6966
7480
|
|
|
6967
7481
|
headers = {'Accept': 'application/json', }
|
|
6968
7482
|
|
|
6969
|
-
res = self._api.do('GET', '/api/2.
|
|
7483
|
+
res = self._api.do('GET', '/api/2.1/clusters/list-node-types', headers=headers)
|
|
6970
7484
|
return ListNodeTypesResponse.from_dict(res)
|
|
6971
7485
|
|
|
6972
7486
|
def list_zones(self) -> ListAvailableZonesResponse:
|
|
@@ -6980,7 +7494,7 @@ class ClustersAPI:
|
|
|
6980
7494
|
|
|
6981
7495
|
headers = {'Accept': 'application/json', }
|
|
6982
7496
|
|
|
6983
|
-
res = self._api.do('GET', '/api/2.
|
|
7497
|
+
res = self._api.do('GET', '/api/2.1/clusters/list-zones', headers=headers)
|
|
6984
7498
|
return ListAvailableZonesResponse.from_dict(res)
|
|
6985
7499
|
|
|
6986
7500
|
def permanent_delete(self, cluster_id: str):
|
|
@@ -7001,7 +7515,7 @@ class ClustersAPI:
|
|
|
7001
7515
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7002
7516
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7003
7517
|
|
|
7004
|
-
self._api.do('POST', '/api/2.
|
|
7518
|
+
self._api.do('POST', '/api/2.1/clusters/permanent-delete', body=body, headers=headers)
|
|
7005
7519
|
|
|
7006
7520
|
def pin(self, cluster_id: str):
|
|
7007
7521
|
"""Pin cluster.
|
|
@@ -7018,7 +7532,7 @@ class ClustersAPI:
|
|
|
7018
7532
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7019
7533
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7020
7534
|
|
|
7021
|
-
self._api.do('POST', '/api/2.
|
|
7535
|
+
self._api.do('POST', '/api/2.1/clusters/pin', body=body, headers=headers)
|
|
7022
7536
|
|
|
7023
7537
|
def resize(self,
|
|
7024
7538
|
cluster_id: str,
|
|
@@ -7055,7 +7569,7 @@ class ClustersAPI:
|
|
|
7055
7569
|
if num_workers is not None: body['num_workers'] = num_workers
|
|
7056
7570
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7057
7571
|
|
|
7058
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7572
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/resize', body=body, headers=headers)
|
|
7059
7573
|
return Wait(self.wait_get_cluster_running,
|
|
7060
7574
|
response=ResizeClusterResponse.from_dict(op_response),
|
|
7061
7575
|
cluster_id=cluster_id)
|
|
@@ -7089,7 +7603,7 @@ class ClustersAPI:
|
|
|
7089
7603
|
if restart_user is not None: body['restart_user'] = restart_user
|
|
7090
7604
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7091
7605
|
|
|
7092
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7606
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/restart', body=body, headers=headers)
|
|
7093
7607
|
return Wait(self.wait_get_cluster_running,
|
|
7094
7608
|
response=RestartClusterResponse.from_dict(op_response),
|
|
7095
7609
|
cluster_id=cluster_id)
|
|
@@ -7134,7 +7648,7 @@ class ClustersAPI:
|
|
|
7134
7648
|
|
|
7135
7649
|
headers = {'Accept': 'application/json', }
|
|
7136
7650
|
|
|
7137
|
-
res = self._api.do('GET', '/api/2.
|
|
7651
|
+
res = self._api.do('GET', '/api/2.1/clusters/spark-versions', headers=headers)
|
|
7138
7652
|
return GetSparkVersionsResponse.from_dict(res)
|
|
7139
7653
|
|
|
7140
7654
|
def start(self, cluster_id: str) -> Wait[ClusterDetails]:
|
|
@@ -7158,7 +7672,7 @@ class ClustersAPI:
|
|
|
7158
7672
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7159
7673
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7160
7674
|
|
|
7161
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7675
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/start', body=body, headers=headers)
|
|
7162
7676
|
return Wait(self.wait_get_cluster_running,
|
|
7163
7677
|
response=StartClusterResponse.from_dict(op_response),
|
|
7164
7678
|
cluster_id=cluster_id)
|
|
@@ -7182,7 +7696,58 @@ class ClustersAPI:
|
|
|
7182
7696
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7183
7697
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7184
7698
|
|
|
7185
|
-
self._api.do('POST', '/api/2.
|
|
7699
|
+
self._api.do('POST', '/api/2.1/clusters/unpin', body=body, headers=headers)
|
|
7700
|
+
|
|
7701
|
+
def update(self,
|
|
7702
|
+
cluster_id: str,
|
|
7703
|
+
update_mask: str,
|
|
7704
|
+
*,
|
|
7705
|
+
cluster: Optional[UpdateClusterResource] = None) -> Wait[ClusterDetails]:
|
|
7706
|
+
"""Update cluster configuration (partial).
|
|
7707
|
+
|
|
7708
|
+
Updates the configuration of a cluster to match the partial set of attributes and size. Denote which
|
|
7709
|
+
fields to update using the `update_mask` field in the request body. A cluster can be updated if it is
|
|
7710
|
+
in a `RUNNING` or `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it will be
|
|
7711
|
+
restarted so that the new attributes can take effect. If a cluster is updated while in a `TERMINATED`
|
|
7712
|
+
state, it will remain `TERMINATED`. The updated attributes will take effect the next time the cluster
|
|
7713
|
+
is started using the `clusters/start` API. Attempts to update a cluster in any other state will be
|
|
7714
|
+
rejected with an `INVALID_STATE` error code. Clusters created by the Databricks Jobs service cannot be
|
|
7715
|
+
updated.
|
|
7716
|
+
|
|
7717
|
+
:param cluster_id: str
|
|
7718
|
+
ID of the cluster.
|
|
7719
|
+
:param update_mask: str
|
|
7720
|
+
Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
7721
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them with
|
|
7722
|
+
commas (no spaces). To delete a field from a cluster configuration, add it to the `update_mask`
|
|
7723
|
+
string but omit it from the `cluster` object.
|
|
7724
|
+
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
7725
|
+
The cluster to be updated.
|
|
7726
|
+
|
|
7727
|
+
:returns:
|
|
7728
|
+
Long-running operation waiter for :class:`ClusterDetails`.
|
|
7729
|
+
See :method:wait_get_cluster_running for more details.
|
|
7730
|
+
"""
|
|
7731
|
+
body = {}
|
|
7732
|
+
if cluster is not None: body['cluster'] = cluster.as_dict()
|
|
7733
|
+
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7734
|
+
if update_mask is not None: body['update_mask'] = update_mask
|
|
7735
|
+
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7736
|
+
|
|
7737
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/update', body=body, headers=headers)
|
|
7738
|
+
return Wait(self.wait_get_cluster_running,
|
|
7739
|
+
response=UpdateClusterResponse.from_dict(op_response),
|
|
7740
|
+
cluster_id=cluster_id)
|
|
7741
|
+
|
|
7742
|
+
def update_and_wait(
|
|
7743
|
+
self,
|
|
7744
|
+
cluster_id: str,
|
|
7745
|
+
update_mask: str,
|
|
7746
|
+
*,
|
|
7747
|
+
cluster: Optional[UpdateClusterResource] = None,
|
|
7748
|
+
timeout=timedelta(minutes=20)) -> ClusterDetails:
|
|
7749
|
+
return self.update(cluster=cluster, cluster_id=cluster_id,
|
|
7750
|
+
update_mask=update_mask).result(timeout=timeout)
|
|
7186
7751
|
|
|
7187
7752
|
def update_permissions(
|
|
7188
7753
|
self,
|
|
@@ -7209,7 +7774,8 @@ class ClustersAPI:
|
|
|
7209
7774
|
|
|
7210
7775
|
|
|
7211
7776
|
class CommandExecutionAPI:
|
|
7212
|
-
"""This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters.
|
|
7777
|
+
"""This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters. This API
|
|
7778
|
+
only supports (classic) all-purpose clusters. Serverless compute is not supported."""
|
|
7213
7779
|
|
|
7214
7780
|
def __init__(self, api_client):
|
|
7215
7781
|
self._api = api_client
|
|
@@ -8180,6 +8746,116 @@ class LibrariesAPI:
|
|
|
8180
8746
|
self._api.do('POST', '/api/2.0/libraries/uninstall', body=body, headers=headers)
|
|
8181
8747
|
|
|
8182
8748
|
|
|
8749
|
+
class PolicyComplianceForClustersAPI:
|
|
8750
|
+
"""The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your
|
|
8751
|
+
workspace.
|
|
8752
|
+
|
|
8753
|
+
A cluster is compliant with its policy if its configuration satisfies all its policy rules. Clusters could
|
|
8754
|
+
be out of compliance if their policy was updated after the cluster was last edited.
|
|
8755
|
+
|
|
8756
|
+
The get and list compliance APIs allow you to view the policy compliance status of a cluster. The enforce
|
|
8757
|
+
compliance API allows you to update a cluster to be compliant with the current version of its policy."""
|
|
8758
|
+
|
|
8759
|
+
def __init__(self, api_client):
|
|
8760
|
+
self._api = api_client
|
|
8761
|
+
|
|
8762
|
+
def enforce_compliance(self,
|
|
8763
|
+
cluster_id: str,
|
|
8764
|
+
*,
|
|
8765
|
+
validate_only: Optional[bool] = None) -> EnforceClusterComplianceResponse:
|
|
8766
|
+
"""Enforce cluster policy compliance.
|
|
8767
|
+
|
|
8768
|
+
Updates a cluster to be compliant with the current version of its policy. A cluster can be updated if
|
|
8769
|
+
it is in a `RUNNING` or `TERMINATED` state.
|
|
8770
|
+
|
|
8771
|
+
If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes
|
|
8772
|
+
can take effect.
|
|
8773
|
+
|
|
8774
|
+
If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. The next time the
|
|
8775
|
+
cluster is started, the new attributes will take effect.
|
|
8776
|
+
|
|
8777
|
+
Clusters created by the Databricks Jobs, DLT, or Models services cannot be enforced by this API.
|
|
8778
|
+
Instead, use the "Enforce job policy compliance" API to enforce policy compliance on jobs.
|
|
8779
|
+
|
|
8780
|
+
:param cluster_id: str
|
|
8781
|
+
The ID of the cluster you want to enforce policy compliance on.
|
|
8782
|
+
:param validate_only: bool (optional)
|
|
8783
|
+
If set, previews the changes that would be made to a cluster to enforce compliance but does not
|
|
8784
|
+
update the cluster.
|
|
8785
|
+
|
|
8786
|
+
:returns: :class:`EnforceClusterComplianceResponse`
|
|
8787
|
+
"""
|
|
8788
|
+
body = {}
|
|
8789
|
+
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
8790
|
+
if validate_only is not None: body['validate_only'] = validate_only
|
|
8791
|
+
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
8792
|
+
|
|
8793
|
+
res = self._api.do('POST',
|
|
8794
|
+
'/api/2.0/policies/clusters/enforce-compliance',
|
|
8795
|
+
body=body,
|
|
8796
|
+
headers=headers)
|
|
8797
|
+
return EnforceClusterComplianceResponse.from_dict(res)
|
|
8798
|
+
|
|
8799
|
+
def get_compliance(self, cluster_id: str) -> GetClusterComplianceResponse:
|
|
8800
|
+
"""Get cluster policy compliance.
|
|
8801
|
+
|
|
8802
|
+
Returns the policy compliance status of a cluster. Clusters could be out of compliance if their policy
|
|
8803
|
+
was updated after the cluster was last edited.
|
|
8804
|
+
|
|
8805
|
+
:param cluster_id: str
|
|
8806
|
+
The ID of the cluster to get the compliance status
|
|
8807
|
+
|
|
8808
|
+
:returns: :class:`GetClusterComplianceResponse`
|
|
8809
|
+
"""
|
|
8810
|
+
|
|
8811
|
+
query = {}
|
|
8812
|
+
if cluster_id is not None: query['cluster_id'] = cluster_id
|
|
8813
|
+
headers = {'Accept': 'application/json', }
|
|
8814
|
+
|
|
8815
|
+
res = self._api.do('GET', '/api/2.0/policies/clusters/get-compliance', query=query, headers=headers)
|
|
8816
|
+
return GetClusterComplianceResponse.from_dict(res)
|
|
8817
|
+
|
|
8818
|
+
def list_compliance(self,
|
|
8819
|
+
policy_id: str,
|
|
8820
|
+
*,
|
|
8821
|
+
page_size: Optional[int] = None,
|
|
8822
|
+
page_token: Optional[str] = None) -> Iterator[ClusterCompliance]:
|
|
8823
|
+
"""List cluster policy compliance.
|
|
8824
|
+
|
|
8825
|
+
Returns the policy compliance status of all clusters that use a given policy. Clusters could be out of
|
|
8826
|
+
compliance if their policy was updated after the cluster was last edited.
|
|
8827
|
+
|
|
8828
|
+
:param policy_id: str
|
|
8829
|
+
Canonical unique identifier for the cluster policy.
|
|
8830
|
+
:param page_size: int (optional)
|
|
8831
|
+
Use this field to specify the maximum number of results to be returned by the server. The server may
|
|
8832
|
+
further constrain the maximum number of results returned in a single page.
|
|
8833
|
+
:param page_token: str (optional)
|
|
8834
|
+
A page token that can be used to navigate to the next page or previous page as returned by
|
|
8835
|
+
`next_page_token` or `prev_page_token`.
|
|
8836
|
+
|
|
8837
|
+
:returns: Iterator over :class:`ClusterCompliance`
|
|
8838
|
+
"""
|
|
8839
|
+
|
|
8840
|
+
query = {}
|
|
8841
|
+
if page_size is not None: query['page_size'] = page_size
|
|
8842
|
+
if page_token is not None: query['page_token'] = page_token
|
|
8843
|
+
if policy_id is not None: query['policy_id'] = policy_id
|
|
8844
|
+
headers = {'Accept': 'application/json', }
|
|
8845
|
+
|
|
8846
|
+
while True:
|
|
8847
|
+
json = self._api.do('GET',
|
|
8848
|
+
'/api/2.0/policies/clusters/list-compliance',
|
|
8849
|
+
query=query,
|
|
8850
|
+
headers=headers)
|
|
8851
|
+
if 'clusters' in json:
|
|
8852
|
+
for v in json['clusters']:
|
|
8853
|
+
yield ClusterCompliance.from_dict(v)
|
|
8854
|
+
if 'next_page_token' not in json or not json['next_page_token']:
|
|
8855
|
+
return
|
|
8856
|
+
query['page_token'] = json['next_page_token']
|
|
8857
|
+
|
|
8858
|
+
|
|
8183
8859
|
class PolicyFamiliesAPI:
|
|
8184
8860
|
"""View available policy families. A policy family contains a policy definition providing best practices for
|
|
8185
8861
|
configuring clusters for a particular use case.
|
|
@@ -8194,19 +8870,27 @@ class PolicyFamiliesAPI:
|
|
|
8194
8870
|
def __init__(self, api_client):
|
|
8195
8871
|
self._api = api_client
|
|
8196
8872
|
|
|
8197
|
-
def get(self, policy_family_id: str) -> PolicyFamily:
|
|
8873
|
+
def get(self, policy_family_id: str, *, version: Optional[int] = None) -> PolicyFamily:
|
|
8198
8874
|
"""Get policy family information.
|
|
8199
8875
|
|
|
8200
|
-
Retrieve the information for an policy family based on its identifier
|
|
8876
|
+
Retrieve the information for an policy family based on its identifier and version
|
|
8201
8877
|
|
|
8202
8878
|
:param policy_family_id: str
|
|
8879
|
+
The family ID about which to retrieve information.
|
|
8880
|
+
:param version: int (optional)
|
|
8881
|
+
The version number for the family to fetch. Defaults to the latest version.
|
|
8203
8882
|
|
|
8204
8883
|
:returns: :class:`PolicyFamily`
|
|
8205
8884
|
"""
|
|
8206
8885
|
|
|
8886
|
+
query = {}
|
|
8887
|
+
if version is not None: query['version'] = version
|
|
8207
8888
|
headers = {'Accept': 'application/json', }
|
|
8208
8889
|
|
|
8209
|
-
res = self._api.do('GET',
|
|
8890
|
+
res = self._api.do('GET',
|
|
8891
|
+
f'/api/2.0/policy-families/{policy_family_id}',
|
|
8892
|
+
query=query,
|
|
8893
|
+
headers=headers)
|
|
8210
8894
|
return PolicyFamily.from_dict(res)
|
|
8211
8895
|
|
|
8212
8896
|
def list(self,
|
|
@@ -8215,10 +8899,11 @@ class PolicyFamiliesAPI:
|
|
|
8215
8899
|
page_token: Optional[str] = None) -> Iterator[PolicyFamily]:
|
|
8216
8900
|
"""List policy families.
|
|
8217
8901
|
|
|
8218
|
-
|
|
8902
|
+
Returns the list of policy definition types available to use at their latest version. This API is
|
|
8903
|
+
paginated.
|
|
8219
8904
|
|
|
8220
8905
|
:param max_results: int (optional)
|
|
8221
|
-
|
|
8906
|
+
Maximum number of policy families to return.
|
|
8222
8907
|
:param page_token: str (optional)
|
|
8223
8908
|
A token that can be used to get the next page of results.
|
|
8224
8909
|
|