databricks-sdk 0.66.0__py3-none-any.whl → 0.68.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (47) hide show
  1. databricks/sdk/__init__.py +10 -3
  2. databricks/sdk/_base_client.py +4 -1
  3. databricks/sdk/common/lro.py +17 -0
  4. databricks/sdk/common/types/__init__.py +0 -0
  5. databricks/sdk/common/types/fieldmask.py +39 -0
  6. databricks/sdk/credentials_provider.py +61 -12
  7. databricks/sdk/dbutils.py +5 -1
  8. databricks/sdk/errors/parser.py +8 -3
  9. databricks/sdk/mixins/files.py +1 -0
  10. databricks/sdk/oidc_token_supplier.py +80 -0
  11. databricks/sdk/retries.py +102 -2
  12. databricks/sdk/service/_internal.py +93 -1
  13. databricks/sdk/service/agentbricks.py +1 -1
  14. databricks/sdk/service/apps.py +264 -1
  15. databricks/sdk/service/billing.py +2 -3
  16. databricks/sdk/service/catalog.py +1030 -537
  17. databricks/sdk/service/cleanrooms.py +3 -3
  18. databricks/sdk/service/compute.py +21 -33
  19. databricks/sdk/service/dashboards.py +51 -3
  20. databricks/sdk/service/database.py +99 -8
  21. databricks/sdk/service/dataquality.py +1145 -0
  22. databricks/sdk/service/files.py +2 -1
  23. databricks/sdk/service/iam.py +6 -5
  24. databricks/sdk/service/iamv2.py +1 -1
  25. databricks/sdk/service/jobs.py +6 -9
  26. databricks/sdk/service/marketplace.py +3 -1
  27. databricks/sdk/service/ml.py +3 -1
  28. databricks/sdk/service/oauth2.py +1 -1
  29. databricks/sdk/service/pipelines.py +5 -6
  30. databricks/sdk/service/provisioning.py +544 -655
  31. databricks/sdk/service/qualitymonitorv2.py +1 -1
  32. databricks/sdk/service/serving.py +59 -1
  33. databricks/sdk/service/settings.py +5 -2
  34. databricks/sdk/service/settingsv2.py +1 -1
  35. databricks/sdk/service/sharing.py +12 -3
  36. databricks/sdk/service/sql.py +305 -70
  37. databricks/sdk/service/tags.py +1 -1
  38. databricks/sdk/service/vectorsearch.py +3 -1
  39. databricks/sdk/service/workspace.py +70 -17
  40. databricks/sdk/version.py +1 -1
  41. {databricks_sdk-0.66.0.dist-info → databricks_sdk-0.68.0.dist-info}/METADATA +4 -2
  42. databricks_sdk-0.68.0.dist-info/RECORD +83 -0
  43. databricks_sdk-0.66.0.dist-info/RECORD +0 -79
  44. {databricks_sdk-0.66.0.dist-info → databricks_sdk-0.68.0.dist-info}/WHEEL +0 -0
  45. {databricks_sdk-0.66.0.dist-info → databricks_sdk-0.68.0.dist-info}/licenses/LICENSE +0 -0
  46. {databricks_sdk-0.66.0.dist-info → databricks_sdk-0.68.0.dist-info}/licenses/NOTICE +0 -0
  47. {databricks_sdk-0.66.0.dist-info → databricks_sdk-0.68.0.dist-info}/top_level.txt +0 -0
@@ -10,8 +10,10 @@ from datetime import timedelta
10
10
  from enum import Enum
11
11
  from typing import Any, Callable, Dict, Iterator, List, Optional
12
12
 
13
+ from databricks.sdk.service._internal import (Wait, _enum, _from_dict,
14
+ _repeated_dict, _repeated_enum)
15
+
13
16
  from ..errors import OperationFailed
14
- from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum
15
17
 
16
18
  _LOG = logging.getLogger("databricks.sdk")
17
19
 
@@ -1080,9 +1082,6 @@ class AlertV2Subscription:
1080
1082
 
1081
1083
  @dataclass
1082
1084
  class BaseChunkInfo:
1083
- """Describes metadata for a particular chunk, within a result set; this structure is used both
1084
- within a manifest, and when fetching individual chunk data or links."""
1085
-
1086
1085
  byte_count: Optional[int] = None
1087
1086
  """The number of bytes in the result chunk. This field is not available when using `INLINE`
1088
1087
  disposition."""
@@ -1686,8 +1685,6 @@ class CreateVisualizationRequestVisualization:
1686
1685
 
1687
1686
 
1688
1687
  class CreateWarehouseRequestWarehouseType(Enum):
1689
- """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
1690
- and also set the field `enable_serverless_compute` to `true`."""
1691
1688
 
1692
1689
  CLASSIC = "CLASSIC"
1693
1690
  PRO = "PRO"
@@ -2251,8 +2248,6 @@ class Disposition(Enum):
2251
2248
 
2252
2249
 
2253
2250
  class EditWarehouseRequestWarehouseType(Enum):
2254
- """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
2255
- and also set the field `enable_serverless_compute` to `true`."""
2256
2251
 
2257
2252
  CLASSIC = "CLASSIC"
2258
2253
  PRO = "PRO"
@@ -2341,6 +2336,7 @@ class EndpointHealth:
2341
2336
  """Deprecated. split into summary and details for security"""
2342
2337
 
2343
2338
  status: Optional[Status] = None
2339
+ """Health status of the endpoint."""
2344
2340
 
2345
2341
  summary: Optional[str] = None
2346
2342
  """A short summary of the health status in case of degraded/failed warehouses."""
@@ -2434,7 +2430,7 @@ class EndpointInfo:
2434
2430
  max_num_clusters: Optional[int] = None
2435
2431
  """Maximum number of clusters that the autoscaler will create to handle concurrent queries.
2436
2432
 
2437
- Supported values: - Must be >= min_num_clusters - Must be <= 30.
2433
+ Supported values: - Must be >= min_num_clusters - Must be <= 40.
2438
2434
 
2439
2435
  Defaults to min_clusters if unset."""
2440
2436
 
@@ -2463,8 +2459,10 @@ class EndpointInfo:
2463
2459
  """ODBC parameters for the SQL warehouse"""
2464
2460
 
2465
2461
  spot_instance_policy: Optional[SpotInstancePolicy] = None
2462
+ """Configurations whether the endpoint should use spot instances."""
2466
2463
 
2467
2464
  state: Optional[State] = None
2465
+ """state of the endpoint"""
2468
2466
 
2469
2467
  tags: Optional[EndpointTags] = None
2470
2468
  """A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS
@@ -2594,8 +2592,6 @@ class EndpointInfo:
2594
2592
 
2595
2593
 
2596
2594
  class EndpointInfoWarehouseType(Enum):
2597
- """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
2598
- and also set the field `enable_serverless_compute` to `true`."""
2599
2595
 
2600
2596
  CLASSIC = "CLASSIC"
2601
2597
  PRO = "PRO"
@@ -2725,6 +2721,9 @@ class ExternalLink:
2725
2721
  which point a new `external_link` must be requested."""
2726
2722
 
2727
2723
  external_link: Optional[str] = None
2724
+ """A URL pointing to a chunk of result data, hosted by an external service, with a short expiration
2725
+ time (<= 15 minutes). As this URL contains a temporary credential, it should be considered
2726
+ sensitive and the client should not expose this URL in a log."""
2728
2727
 
2729
2728
  http_headers: Optional[Dict[str, str]] = None
2730
2729
  """HTTP headers that must be included with a GET request to the `external_link`. Each header is
@@ -2735,7 +2734,7 @@ class ExternalLink:
2735
2734
  next_chunk_index: Optional[int] = None
2736
2735
  """When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are
2737
2736
  no more chunks. The next chunk can be fetched with a
2738
- :method:statementexecution/getStatementResultChunkN request."""
2737
+ :method:statementexecution/getstatementresultchunkn request."""
2739
2738
 
2740
2739
  next_chunk_internal_link: Optional[str] = None
2741
2740
  """When fetching, provides a link to fetch the _next_ chunk. If absent, indicates there are no more
@@ -3048,7 +3047,7 @@ class GetWarehouseResponse:
3048
3047
  max_num_clusters: Optional[int] = None
3049
3048
  """Maximum number of clusters that the autoscaler will create to handle concurrent queries.
3050
3049
 
3051
- Supported values: - Must be >= min_num_clusters - Must be <= 30.
3050
+ Supported values: - Must be >= min_num_clusters - Must be <= 40.
3052
3051
 
3053
3052
  Defaults to min_clusters if unset."""
3054
3053
 
@@ -3077,8 +3076,10 @@ class GetWarehouseResponse:
3077
3076
  """ODBC parameters for the SQL warehouse"""
3078
3077
 
3079
3078
  spot_instance_policy: Optional[SpotInstancePolicy] = None
3079
+ """Configurations whether the endpoint should use spot instances."""
3080
3080
 
3081
3081
  state: Optional[State] = None
3082
+ """state of the endpoint"""
3082
3083
 
3083
3084
  tags: Optional[EndpointTags] = None
3084
3085
  """A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS
@@ -3087,6 +3088,8 @@ class GetWarehouseResponse:
3087
3088
  Supported values: - Number of tags < 45."""
3088
3089
 
3089
3090
  warehouse_type: Optional[GetWarehouseResponseWarehouseType] = None
3091
+ """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
3092
+ and also set the field `enable_serverless_compute` to `true`."""
3090
3093
 
3091
3094
  def as_dict(self) -> dict:
3092
3095
  """Serializes the GetWarehouseResponse into a dictionary suitable for use as a JSON request body."""
@@ -3206,8 +3209,6 @@ class GetWarehouseResponse:
3206
3209
 
3207
3210
 
3208
3211
  class GetWarehouseResponseWarehouseType(Enum):
3209
- """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
3210
- and also set the field `enable_serverless_compute` to `true`."""
3211
3212
 
3212
3213
  CLASSIC = "CLASSIC"
3213
3214
  PRO = "PRO"
@@ -3226,6 +3227,9 @@ class GetWorkspaceWarehouseConfigResponse:
3226
3227
  """Spark confs for external hive metastore configuration JSON serialized size must be less than <=
3227
3228
  512K"""
3228
3229
 
3230
+ enable_serverless_compute: Optional[bool] = None
3231
+ """Enable Serverless compute for SQL warehouses"""
3232
+
3229
3233
  enabled_warehouse_types: Optional[List[WarehouseTypePair]] = None
3230
3234
  """List of Warehouse Types allowed in this workspace (limits allowed value of the type field in
3231
3235
  CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be
@@ -3240,7 +3244,8 @@ class GetWorkspaceWarehouseConfigResponse:
3240
3244
  """GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage"""
3241
3245
 
3242
3246
  instance_profile_arn: Optional[str] = None
3243
- """AWS Only: Instance profile used to pass IAM role to the cluster"""
3247
+ """AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This
3248
+ configuration is also applied to the workspace's serverless compute for notebooks and jobs."""
3244
3249
 
3245
3250
  security_policy: Optional[GetWorkspaceWarehouseConfigResponseSecurityPolicy] = None
3246
3251
  """Security policy for warehouses"""
@@ -3257,6 +3262,8 @@ class GetWorkspaceWarehouseConfigResponse:
3257
3262
  body["config_param"] = self.config_param.as_dict()
3258
3263
  if self.data_access_config:
3259
3264
  body["data_access_config"] = [v.as_dict() for v in self.data_access_config]
3265
+ if self.enable_serverless_compute is not None:
3266
+ body["enable_serverless_compute"] = self.enable_serverless_compute
3260
3267
  if self.enabled_warehouse_types:
3261
3268
  body["enabled_warehouse_types"] = [v.as_dict() for v in self.enabled_warehouse_types]
3262
3269
  if self.global_param:
@@ -3280,6 +3287,8 @@ class GetWorkspaceWarehouseConfigResponse:
3280
3287
  body["config_param"] = self.config_param
3281
3288
  if self.data_access_config:
3282
3289
  body["data_access_config"] = self.data_access_config
3290
+ if self.enable_serverless_compute is not None:
3291
+ body["enable_serverless_compute"] = self.enable_serverless_compute
3283
3292
  if self.enabled_warehouse_types:
3284
3293
  body["enabled_warehouse_types"] = self.enabled_warehouse_types
3285
3294
  if self.global_param:
@@ -3301,6 +3310,7 @@ class GetWorkspaceWarehouseConfigResponse:
3301
3310
  channel=_from_dict(d, "channel", Channel),
3302
3311
  config_param=_from_dict(d, "config_param", RepeatedEndpointConfPairs),
3303
3312
  data_access_config=_repeated_dict(d, "data_access_config", EndpointConfPair),
3313
+ enable_serverless_compute=d.get("enable_serverless_compute", None),
3304
3314
  enabled_warehouse_types=_repeated_dict(d, "enabled_warehouse_types", WarehouseTypePair),
3305
3315
  global_param=_from_dict(d, "global_param", RepeatedEndpointConfPairs),
3306
3316
  google_service_account=d.get("google_service_account", None),
@@ -3311,7 +3321,7 @@ class GetWorkspaceWarehouseConfigResponse:
3311
3321
 
3312
3322
 
3313
3323
  class GetWorkspaceWarehouseConfigResponseSecurityPolicy(Enum):
3314
- """Security policy for warehouses"""
3324
+ """Security policy to be used for warehouses"""
3315
3325
 
3316
3326
  DATA_ACCESS_CONTROL = "DATA_ACCESS_CONTROL"
3317
3327
  NONE = "NONE"
@@ -4264,12 +4274,18 @@ class ListVisualizationsForQueryResponse:
4264
4274
 
4265
4275
  @dataclass
4266
4276
  class ListWarehousesResponse:
4277
+ next_page_token: Optional[str] = None
4278
+ """A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted,
4279
+ there are no subsequent pages."""
4280
+
4267
4281
  warehouses: Optional[List[EndpointInfo]] = None
4268
4282
  """A list of warehouses and their configurations."""
4269
4283
 
4270
4284
  def as_dict(self) -> dict:
4271
4285
  """Serializes the ListWarehousesResponse into a dictionary suitable for use as a JSON request body."""
4272
4286
  body = {}
4287
+ if self.next_page_token is not None:
4288
+ body["next_page_token"] = self.next_page_token
4273
4289
  if self.warehouses:
4274
4290
  body["warehouses"] = [v.as_dict() for v in self.warehouses]
4275
4291
  return body
@@ -4277,6 +4293,8 @@ class ListWarehousesResponse:
4277
4293
  def as_shallow_dict(self) -> dict:
4278
4294
  """Serializes the ListWarehousesResponse into a shallow dictionary of its immediate attributes."""
4279
4295
  body = {}
4296
+ if self.next_page_token is not None:
4297
+ body["next_page_token"] = self.next_page_token
4280
4298
  if self.warehouses:
4281
4299
  body["warehouses"] = self.warehouses
4282
4300
  return body
@@ -4284,7 +4302,9 @@ class ListWarehousesResponse:
4284
4302
  @classmethod
4285
4303
  def from_dict(cls, d: Dict[str, Any]) -> ListWarehousesResponse:
4286
4304
  """Deserializes the ListWarehousesResponse from a dictionary."""
4287
- return cls(warehouses=_repeated_dict(d, "warehouses", EndpointInfo))
4305
+ return cls(
4306
+ next_page_token=d.get("next_page_token", None), warehouses=_repeated_dict(d, "warehouses", EndpointInfo)
4307
+ )
4288
4308
 
4289
4309
 
4290
4310
  @dataclass
@@ -5551,6 +5571,12 @@ class RestoreResponse:
5551
5571
 
5552
5572
  @dataclass
5553
5573
  class ResultData:
5574
+ """Contains the result data of a single chunk when using `INLINE` disposition. When using
5575
+ `EXTERNAL_LINKS` disposition, the array `external_links` is used instead to provide URLs to the
5576
+ result data in cloud storage. Exactly one of these alternatives is used. (While the
5577
+ `external_links` array prepares the API to return multiple links in a single response. Currently
5578
+ only a single link is returned.)"""
5579
+
5554
5580
  byte_count: Optional[int] = None
5555
5581
  """The number of bytes in the result chunk. This field is not available when using `INLINE`
5556
5582
  disposition."""
@@ -5567,7 +5593,7 @@ class ResultData:
5567
5593
  next_chunk_index: Optional[int] = None
5568
5594
  """When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are
5569
5595
  no more chunks. The next chunk can be fetched with a
5570
- :method:statementexecution/getStatementResultChunkN request."""
5596
+ :method:statementexecution/getstatementresultchunkn request."""
5571
5597
 
5572
5598
  next_chunk_internal_link: Optional[str] = None
5573
5599
  """When fetching, provides a link to fetch the _next_ chunk. If absent, indicates there are no more
@@ -5855,7 +5881,7 @@ class SetResponse:
5855
5881
 
5856
5882
 
5857
5883
  class SetWorkspaceWarehouseConfigRequestSecurityPolicy(Enum):
5858
- """Security policy for warehouses"""
5884
+ """Security policy to be used for warehouses"""
5859
5885
 
5860
5886
  DATA_ACCESS_CONTROL = "DATA_ACCESS_CONTROL"
5861
5887
  NONE = "NONE"
@@ -5881,7 +5907,20 @@ class SetWorkspaceWarehouseConfigResponse:
5881
5907
 
5882
5908
 
5883
5909
  class SpotInstancePolicy(Enum):
5884
- """Configurations whether the warehouse should use spot instances."""
5910
+ """EndpointSpotInstancePolicy configures whether the endpoint should use spot instances.
5911
+
5912
+ The breakdown of how the EndpointSpotInstancePolicy converts to per cloud configurations is:
5913
+
5914
+ +-------+--------------------------------------+--------------------------------+ | Cloud |
5915
+ COST_OPTIMIZED | RELIABILITY_OPTIMIZED |
5916
+ +-------+--------------------------------------+--------------------------------+ | AWS | On
5917
+ Demand Driver with Spot Executors | On Demand Driver and Executors | | AZURE | On Demand Driver
5918
+ and Executors | On Demand Driver and Executors |
5919
+ +-------+--------------------------------------+--------------------------------+
5920
+
5921
+ While including "spot" in the enum name may limit the the future extensibility of this field
5922
+ because it limits this enum to denoting "spot or not", this is the field that PM recommends
5923
+ after discussion with customers per SC-48783."""
5885
5924
 
5886
5925
  COST_OPTIMIZED = "COST_OPTIMIZED"
5887
5926
  POLICY_UNSPECIFIED = "POLICY_UNSPECIFIED"
@@ -5907,7 +5946,7 @@ class StartWarehouseResponse:
5907
5946
 
5908
5947
 
5909
5948
  class State(Enum):
5910
- """State of the warehouse"""
5949
+ """* State of a warehouse."""
5911
5950
 
5912
5951
  DELETED = "DELETED"
5913
5952
  DELETING = "DELETING"
@@ -6011,11 +6050,6 @@ class StatementResponse:
6011
6050
 
6012
6051
 
6013
6052
  class StatementState(Enum):
6014
- """Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running -
6015
- `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution
6016
- failed; reason for failure described in accomanying error message - `CANCELED`: user canceled;
6017
- can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`:
6018
- execution successful, and statement closed; result no longer available for fetch"""
6019
6053
 
6020
6054
  CANCELED = "CANCELED"
6021
6055
  CLOSED = "CLOSED"
@@ -6032,6 +6066,11 @@ class StatementStatus:
6032
6066
  error: Optional[ServiceError] = None
6033
6067
 
6034
6068
  state: Optional[StatementState] = None
6069
+ """Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running -
6070
+ `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution
6071
+ failed; reason for failure described in accompanying error message - `CANCELED`: user canceled;
6072
+ can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`:
6073
+ execution successful, and statement closed; result no longer available for fetch"""
6035
6074
 
6036
6075
  def as_dict(self) -> dict:
6037
6076
  """Serializes the StatementStatus into a dictionary suitable for use as a JSON request body."""
@@ -6058,12 +6097,10 @@ class StatementStatus:
6058
6097
 
6059
6098
 
6060
6099
  class Status(Enum):
6061
- """Health status of the warehouse."""
6062
6100
 
6063
6101
  DEGRADED = "DEGRADED"
6064
6102
  FAILED = "FAILED"
6065
6103
  HEALTHY = "HEALTHY"
6066
- STATUS_UNSPECIFIED = "STATUS_UNSPECIFIED"
6067
6104
 
6068
6105
 
6069
6106
  @dataclass
@@ -6214,20 +6251,35 @@ class TerminationReason:
6214
6251
 
6215
6252
 
6216
6253
  class TerminationReasonCode(Enum):
6217
- """status code indicating why the cluster was terminated"""
6254
+ """The status code indicating why the cluster was terminated"""
6218
6255
 
6219
6256
  ABUSE_DETECTED = "ABUSE_DETECTED"
6257
+ ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
6258
+ ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
6259
+ ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
6260
+ ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS"
6261
+ ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
6262
+ ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
6263
+ ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
6264
+ ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS"
6265
+ ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS"
6220
6266
  ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE"
6221
6267
  AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE"
6268
+ AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE"
6269
+ AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE"
6222
6270
  AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE"
6223
6271
  AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE"
6272
+ AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR"
6273
+ AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE"
6224
6274
  AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE"
6225
6275
  AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED"
6276
+ AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED"
6226
6277
  AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE"
6227
6278
  AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE"
6228
6279
  AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE"
6229
6280
  AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE"
6230
6281
  AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION"
6282
+ AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE"
6231
6283
  AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION"
6232
6284
  AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING"
6233
6285
  AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING"
@@ -6236,65 +6288,150 @@ class TerminationReasonCode(Enum):
6236
6288
  AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE"
6237
6289
  BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT"
6238
6290
  BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION"
6291
+ BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG"
6292
+ BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED"
6293
+ BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE"
6294
+ CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED = "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED"
6295
+ CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE"
6296
+ CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED"
6239
6297
  CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE"
6298
+ CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED"
6240
6299
  CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE"
6300
+ CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG"
6241
6301
  CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT"
6302
+ CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG"
6242
6303
  CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN"
6304
+ CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED"
6305
+ CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT"
6243
6306
  COMMUNICATION_LOST = "COMMUNICATION_LOST"
6244
6307
  CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE"
6245
6308
  CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE"
6309
+ CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG"
6246
6310
  DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE"
6311
+ DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
6247
6312
  DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
6313
+ DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
6314
+ DNS_RESOLUTION_ERROR = "DNS_RESOLUTION_ERROR"
6315
+ DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION"
6248
6316
  DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
6317
+ DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
6318
+ DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION"
6319
+ DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE"
6320
+ DRIVER_EVICTION = "DRIVER_EVICTION"
6321
+ DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
6322
+ DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
6323
+ DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK"
6324
+ DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY"
6325
+ DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE"
6326
+ DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE"
6327
+ DRIVER_UNHEALTHY = "DRIVER_UNHEALTHY"
6249
6328
  DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE"
6250
6329
  DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE"
6330
+ DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED"
6331
+ EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE"
6251
6332
  EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
6333
+ EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
6334
+ GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
6335
+ GCP_DENIED_BY_ORG_POLICY = "GCP_DENIED_BY_ORG_POLICY"
6336
+ GCP_FORBIDDEN = "GCP_FORBIDDEN"
6337
+ GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
6338
+ GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
6339
+ GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY"
6340
+ GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED"
6341
+ GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED"
6342
+ GCP_NOT_FOUND = "GCP_NOT_FOUND"
6252
6343
  GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED"
6344
+ GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED"
6345
+ GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED"
6253
6346
  GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED"
6347
+ GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND"
6348
+ GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY"
6349
+ GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED"
6350
+ GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION"
6254
6351
  GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE"
6255
6352
  HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE"
6256
6353
  IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED"
6257
6354
  INACTIVITY = "INACTIVITY"
6355
+ INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED"
6258
6356
  INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE"
6259
6357
  INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE"
6358
+ INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED"
6359
+ INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND"
6260
6360
  INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE"
6361
+ INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG"
6362
+ INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE"
6261
6363
  INTERNAL_ERROR = "INTERNAL_ERROR"
6262
6364
  INVALID_ARGUMENT = "INVALID_ARGUMENT"
6365
+ INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER"
6366
+ INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL"
6263
6367
  INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE"
6368
+ INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE"
6369
+ IN_PENALTY_BOX = "IN_PENALTY_BOX"
6264
6370
  IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE"
6265
6371
  JOB_FINISHED = "JOB_FINISHED"
6372
+ K8S_ACTIVE_POD_QUOTA_EXCEEDED = "K8S_ACTIVE_POD_QUOTA_EXCEEDED"
6266
6373
  K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE"
6267
6374
  K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT"
6375
+ LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT"
6376
+ MAINTENANCE_MODE = "MAINTENANCE_MODE"
6268
6377
  METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY"
6269
6378
  NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT"
6379
+ NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT"
6380
+ NETWORK_CHECK_CONTROL_PLANE_FAILURE = "NETWORK_CHECK_CONTROL_PLANE_FAILURE"
6381
+ NETWORK_CHECK_DNS_SERVER_FAILURE = "NETWORK_CHECK_DNS_SERVER_FAILURE"
6382
+ NETWORK_CHECK_METADATA_ENDPOINT_FAILURE = "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE"
6383
+ NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE = "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE"
6384
+ NETWORK_CHECK_NIC_FAILURE = "NETWORK_CHECK_NIC_FAILURE"
6385
+ NETWORK_CHECK_STORAGE_FAILURE = "NETWORK_CHECK_STORAGE_FAILURE"
6270
6386
  NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE"
6271
6387
  NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE"
6388
+ NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S"
6389
+ NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG"
6390
+ NO_MATCHED_K8S = "NO_MATCHED_K8S"
6391
+ NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG"
6272
6392
  NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE"
6273
6393
  NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE"
6394
+ POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE"
6395
+ POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE"
6274
6396
  REQUEST_REJECTED = "REQUEST_REJECTED"
6275
6397
  REQUEST_THROTTLED = "REQUEST_THROTTLED"
6398
+ RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED"
6399
+ SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
6400
+ SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED"
6276
6401
  SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
6402
+ SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION"
6277
6403
  SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
6278
6404
  SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
6405
+ SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
6279
6406
  SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES"
6280
6407
  SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD"
6281
6408
  SPARK_ERROR = "SPARK_ERROR"
6282
6409
  SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE"
6410
+ SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED"
6411
+ SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND"
6283
6412
  SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE"
6284
6413
  SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION"
6414
+ SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE"
6285
6415
  STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE"
6416
+ STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG"
6417
+ STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW"
6418
+ STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED"
6286
6419
  STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE"
6287
6420
  SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE"
6288
6421
  TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE"
6289
6422
  TRIAL_EXPIRED = "TRIAL_EXPIRED"
6290
6423
  UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE"
6424
+ UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION"
6291
6425
  UNKNOWN = "UNKNOWN"
6292
6426
  UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE"
6293
6427
  UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE"
6428
+ USAGE_POLICY_ENTITLEMENT_DENIED = "USAGE_POLICY_ENTITLEMENT_DENIED"
6429
+ USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION"
6294
6430
  USER_REQUEST = "USER_REQUEST"
6295
6431
  WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE"
6296
6432
  WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR"
6297
6433
  WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR"
6434
+ WORKSPACE_UPDATE = "WORKSPACE_UPDATE"
6298
6435
 
6299
6436
 
6300
6437
  class TerminationReasonType(Enum):
@@ -7008,12 +7145,14 @@ class WarehousePermissionsDescription:
7008
7145
 
7009
7146
  @dataclass
7010
7147
  class WarehouseTypePair:
7148
+ """* Configuration values to enable or disable the access to specific warehouse types in the
7149
+ workspace."""
7150
+
7011
7151
  enabled: Optional[bool] = None
7012
7152
  """If set to false the specific warehouse type will not be be allowed as a value for warehouse_type
7013
7153
  in CreateWarehouse and EditWarehouse"""
7014
7154
 
7015
7155
  warehouse_type: Optional[WarehouseTypePairWarehouseType] = None
7016
- """Warehouse type: `PRO` or `CLASSIC`."""
7017
7156
 
7018
7157
  def as_dict(self) -> dict:
7019
7158
  """Serializes the WarehouseTypePair into a dictionary suitable for use as a JSON request body."""
@@ -7042,7 +7181,6 @@ class WarehouseTypePair:
7042
7181
 
7043
7182
 
7044
7183
  class WarehouseTypePairWarehouseType(Enum):
7045
- """Warehouse type: `PRO` or `CLASSIC`."""
7046
7184
 
7047
7185
  CLASSIC = "CLASSIC"
7048
7186
  PRO = "PRO"
@@ -8819,17 +8957,17 @@ class StatementExecutionAPI:
8819
8957
  the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to
8820
8958
  asynchronous mode, or it can be set to `CANCEL`, which cancels the statement.
8821
8959
 
8822
- In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30
8823
- seconds; if the statement execution finishes within this time, the result data is returned directly in the
8824
- response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns
8825
- with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call
8826
- doesn't wait for the statement to finish but returns directly with a statement ID. The status of the
8827
- statement execution can be polled by issuing :method:statementexecution/getStatement with the statement
8828
- ID. Once the execution has succeeded, this call also returns the result and metadata in the response. -
8829
- Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10
8830
- seconds; if the statement execution finishes within this time, the result data is returned directly in the
8831
- response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can
8832
- be used to fetch status and results in the same way as in the asynchronous mode.
8960
+ In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call waits up to
8961
+ 30 seconds; if the statement execution finishes within this time, the result data is returned directly in
8962
+ the response. If the execution takes longer than 30 seconds, the execution is canceled and the call
8963
+ returns with a `CANCELED` state. - **Asynchronous mode** (`wait_timeout=0s` and `on_wait_timeout` is
8964
+ ignored): The call doesn't wait for the statement to finish but returns directly with a statement ID. The
8965
+ status of the statement execution can be polled by issuing :method:statementexecution/getStatement with
8966
+ the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the
8967
+ response. - **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits
8968
+ for up to 10 seconds; if the statement execution finishes within this time, the result data is returned
8969
+ directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The
8970
+ statement ID can be used to fetch status and results in the same way as in the asynchronous mode.
8833
8971
 
8834
8972
  Depending on the size, the result can be split into multiple chunks. If the statement execution is
8835
8973
  successful, the statement response contains a manifest and the first chunk of the result. The manifest
@@ -8884,7 +9022,7 @@ class StatementExecutionAPI:
8884
9022
 
8885
9023
  def cancel_execution(self, statement_id: str):
8886
9024
  """Requests that an executing statement be canceled. Callers must poll for status to see the terminal
8887
- state.
9025
+ state. Cancel response is empty; receiving response indicates successful receipt.
8888
9026
 
8889
9027
  :param statement_id: str
8890
9028
  The statement ID is returned upon successfully submitting a SQL statement, and is a required
@@ -8912,7 +9050,52 @@ class StatementExecutionAPI:
8912
9050
  schema: Optional[str] = None,
8913
9051
  wait_timeout: Optional[str] = None,
8914
9052
  ) -> StatementResponse:
8915
- """Execute a SQL statement
9053
+ """Execute a SQL statement and optionally await its results for a specified time.
9054
+
9055
+ **Use case: small result sets with INLINE + JSON_ARRAY**
9056
+
9057
+ For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of
9058
+ `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.
9059
+
9060
+ **Use case: large result sets with EXTERNAL_LINKS**
9061
+
9062
+ Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets efficiently. The
9063
+ main differences from using `INLINE` disposition are that the result data is accessed with URLs, and
9064
+ that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and `CSV` compared to only
9065
+ `JSON_ARRAY` with `INLINE`.
9066
+
9067
+ ** URLs**
9068
+
9069
+ External links point to data stored within your workspace's internal storage, in the form of a URL.
9070
+ The URLs are valid for only a short period, <= 15 minutes. Alongside each `external_link` is an
9071
+ expiration field indicating the time at which the URL is no longer valid. In `EXTERNAL_LINKS` mode,
9072
+ chunks can be resolved and fetched multiple times and in parallel.
9073
+
9074
+ ----
9075
+
9076
+ ### **Warning: Databricks strongly recommends that you protect the URLs that are returned by the
9077
+ `EXTERNAL_LINKS` disposition.**
9078
+
9079
+ When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be used to
9080
+ download the results directly from . As a short-lived is embedded in this URL, you should protect the
9081
+ URL.
9082
+
9083
+ Because URLs are already generated with embedded temporary s, you must not set an `Authorization`
9084
+ header in the download requests.
9085
+
9086
+ The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case.
9087
+
9088
+ See also [Security best practices].
9089
+
9090
+ ----
9091
+
9092
+ StatementResponse contains `statement_id` and `status`; other fields might be absent or present
9093
+ depending on context. If the SQL warehouse fails to execute the provided statement, a 200 response is
9094
+ returned with `status.state` set to `FAILED` (in contrast to a failure when accepting the request,
9095
+ which results in a non-200 response). Details of the error can be found at `status.error` in case of
9096
+ execution failures.
9097
+
9098
+ [Security best practices]: https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices
8916
9099
 
8917
9100
  :param statement: str
8918
9101
  The SQL statement to execute. The statement can optionally be parameterized, see `parameters`. The
@@ -8926,12 +9109,32 @@ class StatementExecutionAPI:
8926
9109
  representations and might not match the final size in the requested `format`. If the result was
8927
9110
  truncated due to the byte limit, then `truncated` in the response is set to `true`. When using
8928
9111
  `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is applied if `byte_limit` is not
8929
- explcitly set.
9112
+ explicitly set.
8930
9113
  :param catalog: str (optional)
8931
9114
  Sets default catalog for statement execution, similar to [`USE CATALOG`] in SQL.
8932
9115
 
8933
9116
  [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html
8934
9117
  :param disposition: :class:`Disposition` (optional)
9118
+ The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`.
9119
+
9120
+ Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY`
9121
+ format, in a series of chunks. If a given statement produces a result set with a size larger than 25
9122
+ MiB, that statement execution is aborted, and no result set will be available.
9123
+
9124
+ **NOTE** Byte limits are computed based upon internal representations of the result set data, and
9125
+ might not match the sizes visible in JSON responses.
9126
+
9127
+ Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links:
9128
+ URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition
9129
+ allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The
9130
+ resulting links have two important properties:
9131
+
9132
+ 1. They point to resources _external_ to the Databricks compute; therefore any associated
9133
+ authentication information (typically a personal access token, OAuth token, or similar) _must be
9134
+ removed_ when fetching from these links.
9135
+
9136
+ 2. These are URLs with a specific expiration, indicated in the response. The behavior when
9137
+ attempting to use an expired link is cloud specific.
8935
9138
  :param format: :class:`Format` (optional)
8936
9139
  Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and
8937
9140
  `CSV`.
@@ -8982,13 +9185,13 @@ class StatementExecutionAPI:
8982
9185
 
8983
9186
  For example, the following statement contains two parameters, `my_name` and `my_date`:
8984
9187
 
8985
- SELECT * FROM my_table WHERE name = :my_name AND date = :my_date
9188
+ ``` SELECT * FROM my_table WHERE name = :my_name AND date = :my_date ```
8986
9189
 
8987
9190
  The parameters can be passed in the request body as follows:
8988
9191
 
8989
- { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
9192
+ ` { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
8990
9193
  "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value":
8991
- "2020-01-01", "type": "DATE" } ] }
9194
+ "2020-01-01", "type": "DATE" } ] } `
8992
9195
 
8993
9196
  Currently, positional parameters denoted by a `?` marker are not supported by the Databricks SQL
8994
9197
  Statement Execution API.
@@ -9049,15 +9252,16 @@ class StatementExecutionAPI:
9049
9252
  "Content-Type": "application/json",
9050
9253
  }
9051
9254
 
9052
- res = self._api.do("POST", "/api/2.0/sql/statements/", body=body, headers=headers)
9255
+ res = self._api.do("POST", "/api/2.0/sql/statements", body=body, headers=headers)
9053
9256
  return StatementResponse.from_dict(res)
9054
9257
 
9055
9258
  def get_statement(self, statement_id: str) -> StatementResponse:
9056
- """This request can be used to poll for the statement's status. When the `status.state` field is
9057
- `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the
9058
- statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the
9059
- state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and
9060
- further calls will receive an HTTP 404 response.
9259
+ """This request can be used to poll for the statement's status. StatementResponse contains `statement_id`
9260
+ and `status`; other fields might be absent or present depending on context. When the `status.state`
9261
+ field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data.
9262
+ When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200
9263
+ with the state set. After at least 12 hours in terminal state, the statement is removed from the
9264
+ warehouse and further calls will receive an HTTP 404 response.
9061
9265
 
9062
9266
  **NOTE** This call currently might take up to 5 seconds to get the latest status and result.
9063
9267
 
@@ -9082,6 +9286,7 @@ class StatementExecutionAPI:
9082
9286
  can be used to fetch subsequent chunks. The response structure is identical to the nested `result`
9083
9287
  element described in the :method:statementexecution/getStatement request, and similarly includes the
9084
9288
  `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set.
9289
+ Depending on `disposition`, the response returns chunks of data either inline, or as links.
9085
9290
 
9086
9291
  :param statement_id: str
9087
9292
  The statement ID is returned upon successfully submitting a SQL statement, and is a required
@@ -9192,8 +9397,7 @@ class WarehousesAPI:
9192
9397
  The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it
9193
9398
  is automatically stopped.
9194
9399
 
9195
- Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for
9196
- non-serverless warehouses - 0 indicates no autostop.
9400
+ Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
9197
9401
 
9198
9402
  Defaults to 120 mins
9199
9403
  :param channel: :class:`Channel` (optional)
@@ -9218,7 +9422,7 @@ class WarehousesAPI:
9218
9422
  :param max_num_clusters: int (optional)
9219
9423
  Maximum number of clusters that the autoscaler will create to handle concurrent queries.
9220
9424
 
9221
- Supported values: - Must be >= min_num_clusters - Must be <= 30.
9425
+ Supported values: - Must be >= min_num_clusters - Must be <= 40.
9222
9426
 
9223
9427
  Defaults to min_clusters if unset.
9224
9428
  :param min_num_clusters: int (optional)
@@ -9234,12 +9438,15 @@ class WarehousesAPI:
9234
9438
 
9235
9439
  Supported values: - Must be unique within an org. - Must be less than 100 characters.
9236
9440
  :param spot_instance_policy: :class:`SpotInstancePolicy` (optional)
9441
+ Configurations whether the endpoint should use spot instances.
9237
9442
  :param tags: :class:`EndpointTags` (optional)
9238
9443
  A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes)
9239
9444
  associated with this SQL warehouse.
9240
9445
 
9241
9446
  Supported values: - Number of tags < 45.
9242
9447
  :param warehouse_type: :class:`CreateWarehouseRequestWarehouseType` (optional)
9448
+ Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and
9449
+ also set the field `enable_serverless_compute` to `true`.
9243
9450
 
9244
9451
  :returns:
9245
9452
  Long-running operation waiter for :class:`GetWarehouseResponse`.
@@ -9378,13 +9585,13 @@ class WarehousesAPI:
9378
9585
 
9379
9586
  Defaults to false.
9380
9587
  :param enable_serverless_compute: bool (optional)
9381
- Configures whether the warehouse should use serverless compute.
9588
+ Configures whether the warehouse should use serverless compute
9382
9589
  :param instance_profile_arn: str (optional)
9383
9590
  Deprecated. Instance profile used to pass IAM role to the cluster
9384
9591
  :param max_num_clusters: int (optional)
9385
9592
  Maximum number of clusters that the autoscaler will create to handle concurrent queries.
9386
9593
 
9387
- Supported values: - Must be >= min_num_clusters - Must be <= 30.
9594
+ Supported values: - Must be >= min_num_clusters - Must be <= 40.
9388
9595
 
9389
9596
  Defaults to min_clusters if unset.
9390
9597
  :param min_num_clusters: int (optional)
@@ -9400,12 +9607,15 @@ class WarehousesAPI:
9400
9607
 
9401
9608
  Supported values: - Must be unique within an org. - Must be less than 100 characters.
9402
9609
  :param spot_instance_policy: :class:`SpotInstancePolicy` (optional)
9610
+ Configurations whether the endpoint should use spot instances.
9403
9611
  :param tags: :class:`EndpointTags` (optional)
9404
9612
  A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes)
9405
9613
  associated with this SQL warehouse.
9406
9614
 
9407
9615
  Supported values: - Number of tags < 45.
9408
9616
  :param warehouse_type: :class:`EditWarehouseRequestWarehouseType` (optional)
9617
+ Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and
9618
+ also set the field `enable_serverless_compute` to `true`.
9409
9619
 
9410
9620
  :returns:
9411
9621
  Long-running operation waiter for :class:`GetWarehouseResponse`.
@@ -9444,7 +9654,7 @@ class WarehousesAPI:
9444
9654
  }
9445
9655
 
9446
9656
  op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/edit", body=body, headers=headers)
9447
- return Wait(self.wait_get_warehouse_running, response=EditWarehouseResponse.from_dict(op_response), id=id)
9657
+ return Wait(self.wait_get_warehouse_running, id=id)
9448
9658
 
9449
9659
  def edit_and_wait(
9450
9660
  self,
@@ -9545,26 +9755,45 @@ class WarehousesAPI:
9545
9755
  res = self._api.do("GET", "/api/2.0/sql/config/warehouses", headers=headers)
9546
9756
  return GetWorkspaceWarehouseConfigResponse.from_dict(res)
9547
9757
 
9548
- def list(self, *, run_as_user_id: Optional[int] = None) -> Iterator[EndpointInfo]:
9758
+ def list(
9759
+ self, *, page_size: Optional[int] = None, page_token: Optional[str] = None, run_as_user_id: Optional[int] = None
9760
+ ) -> Iterator[EndpointInfo]:
9549
9761
  """Lists all SQL warehouses that a user has access to.
9550
9762
 
9763
+ :param page_size: int (optional)
9764
+ The max number of warehouses to return.
9765
+ :param page_token: str (optional)
9766
+ A page token, received from a previous `ListWarehouses` call. Provide this to retrieve the
9767
+ subsequent page; otherwise the first will be retrieved.
9768
+
9769
+ When paginating, all other parameters provided to `ListWarehouses` must match the call that provided
9770
+ the page token.
9551
9771
  :param run_as_user_id: int (optional)
9552
- Service Principal which will be used to fetch the list of warehouses. If not specified, the user
9553
- from the session header is used.
9772
+ Service Principal which will be used to fetch the list of endpoints. If not specified, SQL Gateway
9773
+ will use the user from the session header.
9554
9774
 
9555
9775
  :returns: Iterator over :class:`EndpointInfo`
9556
9776
  """
9557
9777
 
9558
9778
  query = {}
9779
+ if page_size is not None:
9780
+ query["page_size"] = page_size
9781
+ if page_token is not None:
9782
+ query["page_token"] = page_token
9559
9783
  if run_as_user_id is not None:
9560
9784
  query["run_as_user_id"] = run_as_user_id
9561
9785
  headers = {
9562
9786
  "Accept": "application/json",
9563
9787
  }
9564
9788
 
9565
- json = self._api.do("GET", "/api/2.0/sql/warehouses", query=query, headers=headers)
9566
- parsed = ListWarehousesResponse.from_dict(json).warehouses
9567
- return parsed if parsed is not None else []
9789
+ while True:
9790
+ json = self._api.do("GET", "/api/2.0/sql/warehouses", query=query, headers=headers)
9791
+ if "warehouses" in json:
9792
+ for v in json["warehouses"]:
9793
+ yield EndpointInfo.from_dict(v)
9794
+ if "next_page_token" not in json or not json["next_page_token"]:
9795
+ return
9796
+ query["page_token"] = json["next_page_token"]
9568
9797
 
9569
9798
  def set_permissions(
9570
9799
  self, warehouse_id: str, *, access_control_list: Optional[List[WarehouseAccessControlRequest]] = None
@@ -9595,6 +9824,7 @@ class WarehousesAPI:
9595
9824
  channel: Optional[Channel] = None,
9596
9825
  config_param: Optional[RepeatedEndpointConfPairs] = None,
9597
9826
  data_access_config: Optional[List[EndpointConfPair]] = None,
9827
+ enable_serverless_compute: Optional[bool] = None,
9598
9828
  enabled_warehouse_types: Optional[List[WarehouseTypePair]] = None,
9599
9829
  global_param: Optional[RepeatedEndpointConfPairs] = None,
9600
9830
  google_service_account: Optional[str] = None,
@@ -9610,6 +9840,8 @@ class WarehousesAPI:
9610
9840
  Deprecated: Use sql_configuration_parameters
9611
9841
  :param data_access_config: List[:class:`EndpointConfPair`] (optional)
9612
9842
  Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K
9843
+ :param enable_serverless_compute: bool (optional)
9844
+ Enable Serverless compute for SQL warehouses
9613
9845
  :param enabled_warehouse_types: List[:class:`WarehouseTypePair`] (optional)
9614
9846
  List of Warehouse Types allowed in this workspace (limits allowed value of the type field in
9615
9847
  CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be
@@ -9621,7 +9853,8 @@ class WarehousesAPI:
9621
9853
  :param google_service_account: str (optional)
9622
9854
  GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage
9623
9855
  :param instance_profile_arn: str (optional)
9624
- AWS Only: Instance profile used to pass IAM role to the cluster
9856
+ AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This configuration is
9857
+ also applied to the workspace's serverless compute for notebooks and jobs.
9625
9858
  :param security_policy: :class:`SetWorkspaceWarehouseConfigRequestSecurityPolicy` (optional)
9626
9859
  Security policy for warehouses
9627
9860
  :param sql_configuration_parameters: :class:`RepeatedEndpointConfPairs` (optional)
@@ -9636,6 +9869,8 @@ class WarehousesAPI:
9636
9869
  body["config_param"] = config_param.as_dict()
9637
9870
  if data_access_config is not None:
9638
9871
  body["data_access_config"] = [v.as_dict() for v in data_access_config]
9872
+ if enable_serverless_compute is not None:
9873
+ body["enable_serverless_compute"] = enable_serverless_compute
9639
9874
  if enabled_warehouse_types is not None:
9640
9875
  body["enabled_warehouse_types"] = [v.as_dict() for v in enabled_warehouse_types]
9641
9876
  if global_param is not None:
@@ -9671,7 +9906,7 @@ class WarehousesAPI:
9671
9906
  }
9672
9907
 
9673
9908
  op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/start", headers=headers)
9674
- return Wait(self.wait_get_warehouse_running, response=StartWarehouseResponse.from_dict(op_response), id=id)
9909
+ return Wait(self.wait_get_warehouse_running, id=id)
9675
9910
 
9676
9911
  def start_and_wait(self, id: str, timeout=timedelta(minutes=20)) -> GetWarehouseResponse:
9677
9912
  return self.start(id=id).result(timeout=timeout)
@@ -9692,7 +9927,7 @@ class WarehousesAPI:
9692
9927
  }
9693
9928
 
9694
9929
  op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/stop", headers=headers)
9695
- return Wait(self.wait_get_warehouse_stopped, response=StopWarehouseResponse.from_dict(op_response), id=id)
9930
+ return Wait(self.wait_get_warehouse_stopped, id=id)
9696
9931
 
9697
9932
  def stop_and_wait(self, id: str, timeout=timedelta(minutes=20)) -> GetWarehouseResponse:
9698
9933
  return self.stop(id=id).result(timeout=timeout)