databricks-sdk 0.67.0__py3-none-any.whl → 0.69.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (49) hide show
  1. databricks/sdk/__init__.py +14 -10
  2. databricks/sdk/_base_client.py +4 -1
  3. databricks/sdk/common/lro.py +17 -0
  4. databricks/sdk/common/types/__init__.py +0 -0
  5. databricks/sdk/common/types/fieldmask.py +39 -0
  6. databricks/sdk/config.py +62 -14
  7. databricks/sdk/credentials_provider.py +61 -12
  8. databricks/sdk/dbutils.py +5 -1
  9. databricks/sdk/errors/parser.py +8 -3
  10. databricks/sdk/mixins/files.py +1156 -111
  11. databricks/sdk/mixins/files_utils.py +293 -0
  12. databricks/sdk/oidc_token_supplier.py +80 -0
  13. databricks/sdk/retries.py +102 -2
  14. databricks/sdk/service/_internal.py +93 -1
  15. databricks/sdk/service/agentbricks.py +1 -1
  16. databricks/sdk/service/apps.py +264 -1
  17. databricks/sdk/service/billing.py +2 -3
  18. databricks/sdk/service/catalog.py +1026 -540
  19. databricks/sdk/service/cleanrooms.py +3 -3
  20. databricks/sdk/service/compute.py +21 -33
  21. databricks/sdk/service/dashboards.py +7 -3
  22. databricks/sdk/service/database.py +3 -2
  23. databricks/sdk/service/dataquality.py +1145 -0
  24. databricks/sdk/service/files.py +2 -1
  25. databricks/sdk/service/iam.py +2 -1
  26. databricks/sdk/service/iamv2.py +1 -1
  27. databricks/sdk/service/jobs.py +6 -9
  28. databricks/sdk/service/marketplace.py +3 -1
  29. databricks/sdk/service/ml.py +3 -1
  30. databricks/sdk/service/oauth2.py +1 -1
  31. databricks/sdk/service/pipelines.py +5 -6
  32. databricks/sdk/service/provisioning.py +544 -655
  33. databricks/sdk/service/qualitymonitorv2.py +1 -1
  34. databricks/sdk/service/serving.py +3 -1
  35. databricks/sdk/service/settings.py +5 -2
  36. databricks/sdk/service/settingsv2.py +1 -1
  37. databricks/sdk/service/sharing.py +12 -3
  38. databricks/sdk/service/sql.py +305 -70
  39. databricks/sdk/service/tags.py +1 -1
  40. databricks/sdk/service/vectorsearch.py +3 -1
  41. databricks/sdk/service/workspace.py +70 -17
  42. databricks/sdk/version.py +1 -1
  43. {databricks_sdk-0.67.0.dist-info → databricks_sdk-0.69.0.dist-info}/METADATA +4 -2
  44. databricks_sdk-0.69.0.dist-info/RECORD +84 -0
  45. databricks_sdk-0.67.0.dist-info/RECORD +0 -79
  46. {databricks_sdk-0.67.0.dist-info → databricks_sdk-0.69.0.dist-info}/WHEEL +0 -0
  47. {databricks_sdk-0.67.0.dist-info → databricks_sdk-0.69.0.dist-info}/licenses/LICENSE +0 -0
  48. {databricks_sdk-0.67.0.dist-info → databricks_sdk-0.69.0.dist-info}/licenses/NOTICE +0 -0
  49. {databricks_sdk-0.67.0.dist-info → databricks_sdk-0.69.0.dist-info}/top_level.txt +0 -0
@@ -10,8 +10,10 @@ from datetime import timedelta
10
10
  from enum import Enum
11
11
  from typing import Any, Callable, Dict, Iterator, List, Optional
12
12
 
13
+ from databricks.sdk.service._internal import (Wait, _enum, _from_dict,
14
+ _repeated_dict, _repeated_enum)
15
+
13
16
  from ..errors import OperationFailed
14
- from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum
15
17
 
16
18
  _LOG = logging.getLogger("databricks.sdk")
17
19
 
@@ -55,9 +57,9 @@ class AwsKeyInfo:
55
57
  """The AWS KMS key alias."""
56
58
 
57
59
  reuse_key_for_cluster_volumes: Optional[bool] = None
58
- """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to `true`
60
+ """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true
59
61
  or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this
60
- key for encrypting EBS volumes, set to `false`."""
62
+ key for encrypting EBS volumes, set to false."""
61
63
 
62
64
  def as_dict(self) -> dict:
63
65
  """Serializes the AwsKeyInfo into a dictionary suitable for use as a JSON request body."""
@@ -96,6 +98,75 @@ class AwsKeyInfo:
96
98
  )
97
99
 
98
100
 
101
+ @dataclass
102
+ class AzureKeyInfo:
103
+ disk_encryption_set_id: Optional[str] = None
104
+ """The Disk Encryption Set id that is used to represent the key info used for Managed Disk BYOK use
105
+ case"""
106
+
107
+ key_access_configuration: Optional[KeyAccessConfiguration] = None
108
+ """The structure to store key access credential This is set if the Managed Identity is being used
109
+ to access the Azure Key Vault key."""
110
+
111
+ key_name: Optional[str] = None
112
+ """The name of the key in KeyVault."""
113
+
114
+ key_vault_uri: Optional[str] = None
115
+ """The base URI of the KeyVault."""
116
+
117
+ tenant_id: Optional[str] = None
118
+ """The tenant id where the KeyVault lives."""
119
+
120
+ version: Optional[str] = None
121
+ """The current key version."""
122
+
123
+ def as_dict(self) -> dict:
124
+ """Serializes the AzureKeyInfo into a dictionary suitable for use as a JSON request body."""
125
+ body = {}
126
+ if self.disk_encryption_set_id is not None:
127
+ body["disk_encryption_set_id"] = self.disk_encryption_set_id
128
+ if self.key_access_configuration:
129
+ body["key_access_configuration"] = self.key_access_configuration.as_dict()
130
+ if self.key_name is not None:
131
+ body["key_name"] = self.key_name
132
+ if self.key_vault_uri is not None:
133
+ body["key_vault_uri"] = self.key_vault_uri
134
+ if self.tenant_id is not None:
135
+ body["tenant_id"] = self.tenant_id
136
+ if self.version is not None:
137
+ body["version"] = self.version
138
+ return body
139
+
140
+ def as_shallow_dict(self) -> dict:
141
+ """Serializes the AzureKeyInfo into a shallow dictionary of its immediate attributes."""
142
+ body = {}
143
+ if self.disk_encryption_set_id is not None:
144
+ body["disk_encryption_set_id"] = self.disk_encryption_set_id
145
+ if self.key_access_configuration:
146
+ body["key_access_configuration"] = self.key_access_configuration
147
+ if self.key_name is not None:
148
+ body["key_name"] = self.key_name
149
+ if self.key_vault_uri is not None:
150
+ body["key_vault_uri"] = self.key_vault_uri
151
+ if self.tenant_id is not None:
152
+ body["tenant_id"] = self.tenant_id
153
+ if self.version is not None:
154
+ body["version"] = self.version
155
+ return body
156
+
157
+ @classmethod
158
+ def from_dict(cls, d: Dict[str, Any]) -> AzureKeyInfo:
159
+ """Deserializes the AzureKeyInfo from a dictionary."""
160
+ return cls(
161
+ disk_encryption_set_id=d.get("disk_encryption_set_id", None),
162
+ key_access_configuration=_from_dict(d, "key_access_configuration", KeyAccessConfiguration),
163
+ key_name=d.get("key_name", None),
164
+ key_vault_uri=d.get("key_vault_uri", None),
165
+ tenant_id=d.get("tenant_id", None),
166
+ version=d.get("version", None),
167
+ )
168
+
169
+
99
170
  @dataclass
100
171
  class AzureWorkspaceInfo:
101
172
  resource_group: Optional[str] = None
@@ -130,8 +201,6 @@ class AzureWorkspaceInfo:
130
201
 
131
202
  @dataclass
132
203
  class CloudResourceContainer:
133
- """The general workspace configurations that are specific to cloud providers."""
134
-
135
204
  gcp: Optional[CustomerFacingGcpCloudResourceContainer] = None
136
205
 
137
206
  def as_dict(self) -> dict:
@@ -157,16 +226,18 @@ class CloudResourceContainer:
157
226
  @dataclass
158
227
  class CreateAwsKeyInfo:
159
228
  key_arn: str
160
- """The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS region is inferred from
161
- the ARN."""
229
+ """The AWS KMS key's Amazon Resource Name (ARN)."""
162
230
 
163
231
  key_alias: Optional[str] = None
164
232
  """The AWS KMS key alias."""
165
233
 
234
+ key_region: Optional[str] = None
235
+ """The AWS KMS key region."""
236
+
166
237
  reuse_key_for_cluster_volumes: Optional[bool] = None
167
- """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to `true`
168
- or omitted, the key is also used to encrypt cluster EBS volumes. To not use this key also for
169
- encrypting EBS volumes, set this to `false`."""
238
+ """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true
239
+ or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this
240
+ key for encrypting EBS volumes, set to false."""
170
241
 
171
242
  def as_dict(self) -> dict:
172
243
  """Serializes the CreateAwsKeyInfo into a dictionary suitable for use as a JSON request body."""
@@ -175,6 +246,8 @@ class CreateAwsKeyInfo:
175
246
  body["key_alias"] = self.key_alias
176
247
  if self.key_arn is not None:
177
248
  body["key_arn"] = self.key_arn
249
+ if self.key_region is not None:
250
+ body["key_region"] = self.key_region
178
251
  if self.reuse_key_for_cluster_volumes is not None:
179
252
  body["reuse_key_for_cluster_volumes"] = self.reuse_key_for_cluster_volumes
180
253
  return body
@@ -186,6 +259,8 @@ class CreateAwsKeyInfo:
186
259
  body["key_alias"] = self.key_alias
187
260
  if self.key_arn is not None:
188
261
  body["key_arn"] = self.key_arn
262
+ if self.key_region is not None:
263
+ body["key_region"] = self.key_region
189
264
  if self.reuse_key_for_cluster_volumes is not None:
190
265
  body["reuse_key_for_cluster_volumes"] = self.reuse_key_for_cluster_volumes
191
266
  return body
@@ -196,6 +271,7 @@ class CreateAwsKeyInfo:
196
271
  return cls(
197
272
  key_alias=d.get("key_alias", None),
198
273
  key_arn=d.get("key_arn", None),
274
+ key_region=d.get("key_region", None),
199
275
  reuse_key_for_cluster_volumes=d.get("reuse_key_for_cluster_volumes", None),
200
276
  )
201
277
 
@@ -227,7 +303,7 @@ class CreateCredentialAwsCredentials:
227
303
  @dataclass
228
304
  class CreateCredentialStsRole:
229
305
  role_arn: Optional[str] = None
230
- """The Amazon Resource Name (ARN) of the cross account role."""
306
+ """The Amazon Resource Name (ARN) of the cross account IAM role."""
231
307
 
232
308
  def as_dict(self) -> dict:
233
309
  """Serializes the CreateCredentialStsRole into a dictionary suitable for use as a JSON request body."""
@@ -252,7 +328,8 @@ class CreateCredentialStsRole:
252
328
  @dataclass
253
329
  class CreateGcpKeyInfo:
254
330
  kms_key_id: str
255
- """The GCP KMS key's resource name"""
331
+ """Globally unique kms key resource id of the form
332
+ projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4"""
256
333
 
257
334
  def as_dict(self) -> dict:
258
335
  """Serializes the CreateGcpKeyInfo into a dictionary suitable for use as a JSON request body."""
@@ -332,13 +409,18 @@ class Credential:
332
409
  )
333
410
 
334
411
 
412
+ class CustomerFacingComputeMode(Enum):
413
+ """Corresponds to compute mode defined here:
414
+ https://src.dev.databricks.com/databricks/universe@9076536b18479afd639d1c1f9dd5a59f72215e69/-/blob/central/api/common.proto?L872
415
+ """
416
+
417
+ HYBRID = "HYBRID"
418
+ SERVERLESS = "SERVERLESS"
419
+
420
+
335
421
  @dataclass
336
422
  class CustomerFacingGcpCloudResourceContainer:
337
- """The general workspace configurations that are specific to Google Cloud."""
338
-
339
423
  project_id: Optional[str] = None
340
- """The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your
341
- workspace."""
342
424
 
343
425
  def as_dict(self) -> dict:
344
426
  """Serializes the CustomerFacingGcpCloudResourceContainer into a dictionary suitable for use as a JSON request body."""
@@ -360,6 +442,12 @@ class CustomerFacingGcpCloudResourceContainer:
360
442
  return cls(project_id=d.get("project_id", None))
361
443
 
362
444
 
445
+ class CustomerFacingStorageMode(Enum):
446
+
447
+ CUSTOMER_HOSTED = "CUSTOMER_HOSTED"
448
+ DEFAULT_STORAGE = "DEFAULT_STORAGE"
449
+
450
+
363
451
  @dataclass
364
452
  class CustomerManagedKey:
365
453
  account_id: Optional[str] = None
@@ -367,6 +455,8 @@ class CustomerManagedKey:
367
455
 
368
456
  aws_key_info: Optional[AwsKeyInfo] = None
369
457
 
458
+ azure_key_info: Optional[AzureKeyInfo] = None
459
+
370
460
  creation_time: Optional[int] = None
371
461
  """Time in epoch milliseconds when the customer key was created."""
372
462
 
@@ -385,6 +475,8 @@ class CustomerManagedKey:
385
475
  body["account_id"] = self.account_id
386
476
  if self.aws_key_info:
387
477
  body["aws_key_info"] = self.aws_key_info.as_dict()
478
+ if self.azure_key_info:
479
+ body["azure_key_info"] = self.azure_key_info.as_dict()
388
480
  if self.creation_time is not None:
389
481
  body["creation_time"] = self.creation_time
390
482
  if self.customer_managed_key_id is not None:
@@ -402,6 +494,8 @@ class CustomerManagedKey:
402
494
  body["account_id"] = self.account_id
403
495
  if self.aws_key_info:
404
496
  body["aws_key_info"] = self.aws_key_info
497
+ if self.azure_key_info:
498
+ body["azure_key_info"] = self.azure_key_info
405
499
  if self.creation_time is not None:
406
500
  body["creation_time"] = self.creation_time
407
501
  if self.customer_managed_key_id is not None:
@@ -418,6 +512,7 @@ class CustomerManagedKey:
418
512
  return cls(
419
513
  account_id=d.get("account_id", None),
420
514
  aws_key_info=_from_dict(d, "aws_key_info", AwsKeyInfo),
515
+ azure_key_info=_from_dict(d, "azure_key_info", AzureKeyInfo),
421
516
  creation_time=d.get("creation_time", None),
422
517
  customer_managed_key_id=d.get("customer_managed_key_id", None),
423
518
  gcp_key_info=_from_dict(d, "gcp_key_info", GcpKeyInfo),
@@ -425,37 +520,15 @@ class CustomerManagedKey:
425
520
  )
426
521
 
427
522
 
428
- @dataclass
429
- class DeleteResponse:
430
- def as_dict(self) -> dict:
431
- """Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body."""
432
- body = {}
433
- return body
434
-
435
- def as_shallow_dict(self) -> dict:
436
- """Serializes the DeleteResponse into a shallow dictionary of its immediate attributes."""
437
- body = {}
438
- return body
439
-
440
- @classmethod
441
- def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse:
442
- """Deserializes the DeleteResponse from a dictionary."""
443
- return cls()
444
-
445
-
446
523
  class EndpointUseCase(Enum):
447
- """This enumeration represents the type of Databricks VPC [endpoint service] that was used when
448
- creating this VPC endpoint.
449
-
450
- [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html"""
451
524
 
452
525
  DATAPLANE_RELAY_ACCESS = "DATAPLANE_RELAY_ACCESS"
453
526
  WORKSPACE_ACCESS = "WORKSPACE_ACCESS"
454
527
 
455
528
 
456
529
  class ErrorType(Enum):
457
- """The AWS resource associated with this error: credentials, VPC, subnet, security group, or
458
- network ACL."""
530
+ """ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth
531
+ and NetworkWarning defined in central/api/accounts/accounts.proto"""
459
532
 
460
533
  CREDENTIALS = "credentials"
461
534
  NETWORK_ACL = "networkAcl"
@@ -465,52 +538,49 @@ class ErrorType(Enum):
465
538
 
466
539
 
467
540
  @dataclass
468
- class ExternalCustomerInfo:
469
- authoritative_user_email: Optional[str] = None
470
- """Email of the authoritative user."""
541
+ class GcpCommonNetworkConfig:
542
+ """The shared network config for GCP workspace. This object has common network configurations that
543
+ are network attributions of a workspace. DEPRECATED. Use GkeConfig instead."""
471
544
 
472
- authoritative_user_full_name: Optional[str] = None
473
- """The authoritative user full name."""
545
+ gke_cluster_master_ip_range: Optional[str] = None
546
+ """The IP range that will be used to allocate GKE cluster master resources from. This field must
547
+ not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER."""
474
548
 
475
- customer_name: Optional[str] = None
476
- """The legal entity name for the external workspace"""
549
+ gke_connectivity_type: Optional[GkeConfigConnectivityType] = None
550
+ """The type of network connectivity of the GKE cluster."""
477
551
 
478
552
  def as_dict(self) -> dict:
479
- """Serializes the ExternalCustomerInfo into a dictionary suitable for use as a JSON request body."""
480
- body = {}
481
- if self.authoritative_user_email is not None:
482
- body["authoritative_user_email"] = self.authoritative_user_email
483
- if self.authoritative_user_full_name is not None:
484
- body["authoritative_user_full_name"] = self.authoritative_user_full_name
485
- if self.customer_name is not None:
486
- body["customer_name"] = self.customer_name
553
+ """Serializes the GcpCommonNetworkConfig into a dictionary suitable for use as a JSON request body."""
554
+ body = {}
555
+ if self.gke_cluster_master_ip_range is not None:
556
+ body["gke_cluster_master_ip_range"] = self.gke_cluster_master_ip_range
557
+ if self.gke_connectivity_type is not None:
558
+ body["gke_connectivity_type"] = self.gke_connectivity_type.value
487
559
  return body
488
560
 
489
561
  def as_shallow_dict(self) -> dict:
490
- """Serializes the ExternalCustomerInfo into a shallow dictionary of its immediate attributes."""
491
- body = {}
492
- if self.authoritative_user_email is not None:
493
- body["authoritative_user_email"] = self.authoritative_user_email
494
- if self.authoritative_user_full_name is not None:
495
- body["authoritative_user_full_name"] = self.authoritative_user_full_name
496
- if self.customer_name is not None:
497
- body["customer_name"] = self.customer_name
562
+ """Serializes the GcpCommonNetworkConfig into a shallow dictionary of its immediate attributes."""
563
+ body = {}
564
+ if self.gke_cluster_master_ip_range is not None:
565
+ body["gke_cluster_master_ip_range"] = self.gke_cluster_master_ip_range
566
+ if self.gke_connectivity_type is not None:
567
+ body["gke_connectivity_type"] = self.gke_connectivity_type
498
568
  return body
499
569
 
500
570
  @classmethod
501
- def from_dict(cls, d: Dict[str, Any]) -> ExternalCustomerInfo:
502
- """Deserializes the ExternalCustomerInfo from a dictionary."""
571
+ def from_dict(cls, d: Dict[str, Any]) -> GcpCommonNetworkConfig:
572
+ """Deserializes the GcpCommonNetworkConfig from a dictionary."""
503
573
  return cls(
504
- authoritative_user_email=d.get("authoritative_user_email", None),
505
- authoritative_user_full_name=d.get("authoritative_user_full_name", None),
506
- customer_name=d.get("customer_name", None),
574
+ gke_cluster_master_ip_range=d.get("gke_cluster_master_ip_range", None),
575
+ gke_connectivity_type=_enum(d, "gke_connectivity_type", GkeConfigConnectivityType),
507
576
  )
508
577
 
509
578
 
510
579
  @dataclass
511
580
  class GcpKeyInfo:
512
581
  kms_key_id: str
513
- """The GCP KMS key's resource name"""
582
+ """Globally unique kms key resource id of the form
583
+ projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4"""
514
584
 
515
585
  def as_dict(self) -> dict:
516
586
  """Serializes the GcpKeyInfo into a dictionary suitable for use as a JSON request body."""
@@ -534,37 +604,17 @@ class GcpKeyInfo:
534
604
 
535
605
  @dataclass
536
606
  class GcpManagedNetworkConfig:
537
- """The network settings for the workspace. The configurations are only for Databricks-managed VPCs.
538
- It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP
539
- range configurations must be mutually exclusive. An attempt to create a workspace fails if
540
- Databricks detects an IP range overlap.
541
-
542
- Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and
543
- all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`,
544
- `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`.
545
-
546
- The sizes of these IP ranges affect the maximum number of nodes for the workspace.
547
-
548
- **Important**: Confirm the IP ranges used by your Databricks workspace before creating the
549
- workspace. You cannot change them after your workspace is deployed. If the IP address ranges for
550
- your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To
551
- determine the address range sizes that you need, Databricks provides a calculator as a Microsoft
552
- Excel spreadsheet. See [calculate subnet sizes for a new workspace].
553
-
554
- [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html
555
- """
607
+ """The network configuration for the workspace."""
556
608
 
557
609
  gke_cluster_pod_ip_range: Optional[str] = None
558
- """The IP range from which to allocate GKE cluster pods. No bigger than `/9` and no smaller than
559
- `/21`."""
610
+ """The IP range that will be used to allocate GKE cluster Pods from."""
560
611
 
561
612
  gke_cluster_service_ip_range: Optional[str] = None
562
- """The IP range from which to allocate GKE cluster services. No bigger than `/16` and no smaller
563
- than `/27`."""
613
+ """The IP range that will be used to allocate GKE cluster Services from."""
564
614
 
565
615
  subnet_cidr: Optional[str] = None
566
- """The IP range from which to allocate GKE cluster nodes. No bigger than `/9` and no smaller than
567
- `/29`."""
616
+ """The IP range which will be used to allocate GKE cluster nodes from. Note: Pods, services and
617
+ master IP range must be mutually exclusive."""
568
618
 
569
619
  def as_dict(self) -> dict:
570
620
  """Serializes the GcpManagedNetworkConfig into a dictionary suitable for use as a JSON request body."""
@@ -600,29 +650,24 @@ class GcpManagedNetworkConfig:
600
650
 
601
651
  @dataclass
602
652
  class GcpNetworkInfo:
603
- """The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and
604
- secondary IP ranges)."""
605
-
606
653
  network_project_id: str
607
- """The Google Cloud project ID of the VPC network."""
654
+ """The GCP project ID for network resources. This project is where the VPC and subnet resides."""
608
655
 
609
656
  vpc_id: str
610
- """The ID of the VPC associated with this network. VPC IDs can be used in multiple network
611
- configurations."""
657
+ """The customer-provided VPC ID."""
612
658
 
613
659
  subnet_id: str
614
- """The ID of the subnet associated with this network."""
660
+ """The customer-provided Subnet ID that will be available to Clusters in Workspaces using this
661
+ Network."""
615
662
 
616
663
  subnet_region: str
617
- """The Google Cloud region of the workspace data plane (for example, `us-east4`)."""
618
664
 
619
665
  pod_ip_range_name: str
620
- """The name of the secondary IP range for pods. A Databricks-managed GKE cluster uses this IP range
621
- for its pods. This secondary IP range can be used by only one workspace."""
666
+ """Name of the secondary range within the subnet that will be used by GKE as Pod IP range. This is
667
+ BYO VPC specific. DB VPC uses network.getGcpManagedNetworkConfig.getGkeClusterPodIpRange"""
622
668
 
623
669
  service_ip_range_name: str
624
- """The name of the secondary IP range for services. A Databricks-managed GKE cluster uses this IP
625
- range for its services. This secondary IP range can be used by only one workspace."""
670
+ """Name of the secondary range within the subnet that will be used by GKE as Service IP range."""
626
671
 
627
672
  def as_dict(self) -> dict:
628
673
  """Serializes the GcpNetworkInfo into a dictionary suitable for use as a JSON request body."""
@@ -673,22 +718,15 @@ class GcpNetworkInfo:
673
718
 
674
719
  @dataclass
675
720
  class GcpVpcEndpointInfo:
676
- """The Google Cloud specific information for this Private Service Connect endpoint."""
677
-
678
721
  project_id: str
679
- """The Google Cloud project ID of the VPC network where the PSC connection resides."""
680
722
 
681
723
  psc_endpoint_name: str
682
- """The name of the PSC endpoint in the Google Cloud project."""
683
724
 
684
725
  endpoint_region: str
685
- """Region of the PSC endpoint."""
686
726
 
687
727
  psc_connection_id: Optional[str] = None
688
- """The unique ID of this PSC connection."""
689
728
 
690
729
  service_attachment_id: Optional[str] = None
691
- """The service attachment this PSC connection connects to."""
692
730
 
693
731
  def as_dict(self) -> dict:
694
732
  """Serializes the GcpVpcEndpointInfo into a dictionary suitable for use as a JSON request body."""
@@ -734,22 +772,14 @@ class GcpVpcEndpointInfo:
734
772
 
735
773
  @dataclass
736
774
  class GkeConfig:
737
- """The configurations for the GKE cluster of a Databricks workspace."""
775
+ """The configurations of the GKE cluster used by the GCP workspace."""
738
776
 
739
777
  connectivity_type: Optional[GkeConfigConnectivityType] = None
740
- """Specifies the network connectivity types for the GKE nodes and the GKE master network.
741
-
742
- Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE nodes
743
- will not have public IPs.
744
-
745
- Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE cluster
746
- have public IP addresses."""
778
+ """The type of network connectivity of the GKE cluster."""
747
779
 
748
780
  master_ip_range: Optional[str] = None
749
- """The IP range from which to allocate GKE cluster master resources. This field will be ignored if
750
- GKE private cluster is not enabled.
751
-
752
- It must be exactly as big as `/28`."""
781
+ """The IP range that will be used to allocate GKE cluster master resources from. This field must
782
+ not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER."""
753
783
 
754
784
  def as_dict(self) -> dict:
755
785
  """Serializes the GkeConfig into a dictionary suitable for use as a JSON request body."""
@@ -791,10 +821,33 @@ class GkeConfigConnectivityType(Enum):
791
821
  PUBLIC_NODE_PUBLIC_MASTER = "PUBLIC_NODE_PUBLIC_MASTER"
792
822
 
793
823
 
824
+ @dataclass
825
+ class KeyAccessConfiguration:
826
+ """The credential ID that is used to access the key vault."""
827
+
828
+ credential_id: Optional[str] = None
829
+
830
+ def as_dict(self) -> dict:
831
+ """Serializes the KeyAccessConfiguration into a dictionary suitable for use as a JSON request body."""
832
+ body = {}
833
+ if self.credential_id is not None:
834
+ body["credential_id"] = self.credential_id
835
+ return body
836
+
837
+ def as_shallow_dict(self) -> dict:
838
+ """Serializes the KeyAccessConfiguration into a shallow dictionary of its immediate attributes."""
839
+ body = {}
840
+ if self.credential_id is not None:
841
+ body["credential_id"] = self.credential_id
842
+ return body
843
+
844
+ @classmethod
845
+ def from_dict(cls, d: Dict[str, Any]) -> KeyAccessConfiguration:
846
+ """Deserializes the KeyAccessConfiguration from a dictionary."""
847
+ return cls(credential_id=d.get("credential_id", None))
848
+
849
+
794
850
  class KeyUseCase(Enum):
795
- """Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control
796
- plane * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and,
797
- optionally, cluster EBS volumes."""
798
851
 
799
852
  MANAGED_SERVICES = "MANAGED_SERVICES"
800
853
  STORAGE = "STORAGE"
@@ -820,8 +873,12 @@ class Network:
820
873
  """The human-readable name of the network configuration."""
821
874
 
822
875
  security_group_ids: Optional[List[str]] = None
876
+ """IDs of one to five security groups associated with this network. Security group IDs **cannot**
877
+ be used in multiple network configurations."""
823
878
 
824
879
  subnet_ids: Optional[List[str]] = None
880
+ """IDs of at least two subnets associated with this network. Subnet IDs **cannot** be used in
881
+ multiple network configurations."""
825
882
 
826
883
  vpc_endpoints: Optional[NetworkVpcEndpoints] = None
827
884
 
@@ -952,18 +1009,13 @@ class NetworkHealth:
952
1009
 
953
1010
  @dataclass
954
1011
  class NetworkVpcEndpoints:
955
- """If specified, contains the VPC endpoints used to allow cluster communication from this VPC over
956
- [AWS PrivateLink].
957
-
958
- [AWS PrivateLink]: https://aws.amazon.com/privatelink/"""
959
-
960
- rest_api: List[str]
961
- """The VPC endpoint ID used by this network to access the Databricks REST API."""
962
-
963
- dataplane_relay: List[str]
1012
+ dataplane_relay: Optional[List[str]] = None
964
1013
  """The VPC endpoint ID used by this network to access the Databricks secure cluster connectivity
965
1014
  relay."""
966
1015
 
1016
+ rest_api: Optional[List[str]] = None
1017
+ """The VPC endpoint ID used by this network to access the Databricks REST API."""
1018
+
967
1019
  def as_dict(self) -> dict:
968
1020
  """Serializes the NetworkVpcEndpoints into a dictionary suitable for use as a JSON request body."""
969
1021
  body = {}
@@ -1020,9 +1072,6 @@ class NetworkWarning:
1020
1072
 
1021
1073
 
1022
1074
  class PricingTier(Enum):
1023
- """The pricing tier of the workspace. For pricing tier information, see [AWS Pricing].
1024
-
1025
- [AWS Pricing]: https://databricks.com/product/aws-pricing"""
1026
1075
 
1027
1076
  COMMUNITY_EDITION = "COMMUNITY_EDITION"
1028
1077
  DEDICATED = "DEDICATED"
@@ -1033,11 +1082,6 @@ class PricingTier(Enum):
1033
1082
 
1034
1083
 
1035
1084
  class PrivateAccessLevel(Enum):
1036
- """The private access level controls which VPC endpoints can connect to the UI or API of any
1037
- workspace that attaches this private access settings object. * `ACCOUNT` level access (the
1038
- default) allows only VPC endpoints that are registered in your Databricks account connect to
1039
- your workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your
1040
- workspace. For details, see `allowed_vpc_endpoint_ids`."""
1041
1085
 
1042
1086
  ACCOUNT = "ACCOUNT"
1043
1087
  ENDPOINT = "ENDPOINT"
@@ -1045,13 +1089,26 @@ class PrivateAccessLevel(Enum):
1045
1089
 
1046
1090
  @dataclass
1047
1091
  class PrivateAccessSettings:
1092
+ """*"""
1093
+
1048
1094
  account_id: Optional[str] = None
1049
- """The Databricks account ID that hosts the credential."""
1095
+ """The Databricks account ID that hosts the private access settings."""
1050
1096
 
1051
1097
  allowed_vpc_endpoint_ids: Optional[List[str]] = None
1052
- """An array of Databricks VPC endpoint IDs."""
1098
+ """An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when
1099
+ registering the VPC endpoint configuration in your Databricks account. This is not the ID of the
1100
+ VPC endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an allow
1101
+ list of VPC endpoints that in your account that can connect to your workspace over AWS
1102
+ PrivateLink. If hybrid access to your workspace is enabled by setting public_access_enabled to
1103
+ true, this control only works for PrivateLink connections. To control how your workspace is
1104
+ accessed via public internet, see IP access lists."""
1053
1105
 
1054
1106
  private_access_level: Optional[PrivateAccessLevel] = None
1107
+ """The private access level controls which VPC endpoints can connect to the UI or API of any
1108
+ workspace that attaches this private access settings object. `ACCOUNT` level access (the
1109
+ default) allows only VPC endpoints that are registered in your Databricks account connect to
1110
+ your workspace. `ENDPOINT` level access allows only specified VPC endpoints connect to your
1111
+ workspace. For details, see allowed_vpc_endpoint_ids."""
1055
1112
 
1056
1113
  private_access_settings_id: Optional[str] = None
1057
1114
  """Databricks private access settings ID."""
@@ -1061,12 +1118,11 @@ class PrivateAccessSettings:
1061
1118
 
1062
1119
  public_access_enabled: Optional[bool] = None
1063
1120
  """Determines if the workspace can be accessed over public internet. For fully private workspaces,
1064
- you can optionally specify `false`, but only if you implement both the front-end and the
1065
- back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is
1066
- enabled."""
1121
+ you can optionally specify false, but only if you implement both the front-end and the back-end
1122
+ PrivateLink connections. Otherwise, specify true, which means that public access is enabled."""
1067
1123
 
1068
1124
  region: Optional[str] = None
1069
- """The cloud region for workspaces attached to this private access settings object."""
1125
+ """The AWS region for workspaces attached to this private access settings object."""
1070
1126
 
1071
1127
  def as_dict(self) -> dict:
1072
1128
  """Serializes the PrivateAccessSettings into a dictionary suitable for use as a JSON request body."""
@@ -1120,30 +1176,10 @@ class PrivateAccessSettings:
1120
1176
  )
1121
1177
 
1122
1178
 
1123
- @dataclass
1124
- class ReplaceResponse:
1125
- def as_dict(self) -> dict:
1126
- """Serializes the ReplaceResponse into a dictionary suitable for use as a JSON request body."""
1127
- body = {}
1128
- return body
1129
-
1130
- def as_shallow_dict(self) -> dict:
1131
- """Serializes the ReplaceResponse into a shallow dictionary of its immediate attributes."""
1132
- body = {}
1133
- return body
1134
-
1135
- @classmethod
1136
- def from_dict(cls, d: Dict[str, Any]) -> ReplaceResponse:
1137
- """Deserializes the ReplaceResponse from a dictionary."""
1138
- return cls()
1139
-
1140
-
1141
1179
  @dataclass
1142
1180
  class RootBucketInfo:
1143
- """Root S3 bucket information."""
1144
-
1145
1181
  bucket_name: Optional[str] = None
1146
- """The name of the S3 bucket."""
1182
+ """Name of the S3 bucket"""
1147
1183
 
1148
1184
  def as_dict(self) -> dict:
1149
1185
  """Serializes the RootBucketInfo into a dictionary suitable for use as a JSON request body."""
@@ -1168,12 +1204,20 @@ class RootBucketInfo:
1168
1204
  @dataclass
1169
1205
  class StorageConfiguration:
1170
1206
  account_id: Optional[str] = None
1171
- """The Databricks account ID that hosts the credential."""
1207
+ """The Databricks account ID associated with this storage configuration."""
1172
1208
 
1173
1209
  creation_time: Optional[int] = None
1174
1210
  """Time in epoch milliseconds when the storage configuration was created."""
1175
1211
 
1212
+ role_arn: Optional[str] = None
1213
+ """Optional IAM role that is used to access the workspace catalog which is created during workspace
1214
+ creation for UC by Default. If a storage configuration with this field populated is used to
1215
+ create a workspace, then a workspace catalog is created together with the workspace. The
1216
+ workspace catalog shares the root bucket with internal workspace storage (including DBFS root)
1217
+ but uses a dedicated bucket path prefix."""
1218
+
1176
1219
  root_bucket_info: Optional[RootBucketInfo] = None
1220
+ """The root bucket information for the storage configuration."""
1177
1221
 
1178
1222
  storage_configuration_id: Optional[str] = None
1179
1223
  """Databricks storage configuration ID."""
@@ -1188,6 +1232,8 @@ class StorageConfiguration:
1188
1232
  body["account_id"] = self.account_id
1189
1233
  if self.creation_time is not None:
1190
1234
  body["creation_time"] = self.creation_time
1235
+ if self.role_arn is not None:
1236
+ body["role_arn"] = self.role_arn
1191
1237
  if self.root_bucket_info:
1192
1238
  body["root_bucket_info"] = self.root_bucket_info.as_dict()
1193
1239
  if self.storage_configuration_id is not None:
@@ -1203,6 +1249,8 @@ class StorageConfiguration:
1203
1249
  body["account_id"] = self.account_id
1204
1250
  if self.creation_time is not None:
1205
1251
  body["creation_time"] = self.creation_time
1252
+ if self.role_arn is not None:
1253
+ body["role_arn"] = self.role_arn
1206
1254
  if self.root_bucket_info:
1207
1255
  body["root_bucket_info"] = self.root_bucket_info
1208
1256
  if self.storage_configuration_id is not None:
@@ -1217,6 +1265,7 @@ class StorageConfiguration:
1217
1265
  return cls(
1218
1266
  account_id=d.get("account_id", None),
1219
1267
  creation_time=d.get("creation_time", None),
1268
+ role_arn=d.get("role_arn", None),
1220
1269
  root_bucket_info=_from_dict(d, "root_bucket_info", RootBucketInfo),
1221
1270
  storage_configuration_id=d.get("storage_configuration_id", None),
1222
1271
  storage_configuration_name=d.get("storage_configuration_name", None),
@@ -1225,18 +1274,12 @@ class StorageConfiguration:
1225
1274
 
1226
1275
  @dataclass
1227
1276
  class StsRole:
1228
- external_id: Optional[str] = None
1229
- """The external ID that needs to be trusted by the cross-account role. This is always your
1230
- Databricks account ID."""
1231
-
1232
1277
  role_arn: Optional[str] = None
1233
- """The Amazon Resource Name (ARN) of the cross account role."""
1278
+ """The Amazon Resource Name (ARN) of the cross account IAM role."""
1234
1279
 
1235
1280
  def as_dict(self) -> dict:
1236
1281
  """Serializes the StsRole into a dictionary suitable for use as a JSON request body."""
1237
1282
  body = {}
1238
- if self.external_id is not None:
1239
- body["external_id"] = self.external_id
1240
1283
  if self.role_arn is not None:
1241
1284
  body["role_arn"] = self.role_arn
1242
1285
  return body
@@ -1244,8 +1287,6 @@ class StsRole:
1244
1287
  def as_shallow_dict(self) -> dict:
1245
1288
  """Serializes the StsRole into a shallow dictionary of its immediate attributes."""
1246
1289
  body = {}
1247
- if self.external_id is not None:
1248
- body["external_id"] = self.external_id
1249
1290
  if self.role_arn is not None:
1250
1291
  body["role_arn"] = self.role_arn
1251
1292
  return body
@@ -1253,31 +1294,16 @@ class StsRole:
1253
1294
  @classmethod
1254
1295
  def from_dict(cls, d: Dict[str, Any]) -> StsRole:
1255
1296
  """Deserializes the StsRole from a dictionary."""
1256
- return cls(external_id=d.get("external_id", None), role_arn=d.get("role_arn", None))
1257
-
1258
-
1259
- @dataclass
1260
- class UpdateResponse:
1261
- def as_dict(self) -> dict:
1262
- """Serializes the UpdateResponse into a dictionary suitable for use as a JSON request body."""
1263
- body = {}
1264
- return body
1265
-
1266
- def as_shallow_dict(self) -> dict:
1267
- """Serializes the UpdateResponse into a shallow dictionary of its immediate attributes."""
1268
- body = {}
1269
- return body
1270
-
1271
- @classmethod
1272
- def from_dict(cls, d: Dict[str, Any]) -> UpdateResponse:
1273
- """Deserializes the UpdateResponse from a dictionary."""
1274
- return cls()
1297
+ return cls(role_arn=d.get("role_arn", None))
1275
1298
 
1276
1299
 
1277
1300
  @dataclass
1278
1301
  class VpcEndpoint:
1302
+ """*"""
1303
+
1279
1304
  account_id: Optional[str] = None
1280
- """The Databricks account ID that hosts the VPC endpoint configuration."""
1305
+ """The Databricks account ID that hosts the VPC endpoint configuration. TODO - This may signal an
1306
+ OpenAPI diff; it does not show up in the generated spec"""
1281
1307
 
1282
1308
  aws_account_id: Optional[str] = None
1283
1309
  """The AWS Account in which the VPC endpoint object exists."""
@@ -1294,6 +1320,7 @@ class VpcEndpoint:
1294
1320
  """The ID of the VPC endpoint object in AWS."""
1295
1321
 
1296
1322
  gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo] = None
1323
+ """The cloud info of this vpc endpoint. Info for a GCP vpc endpoint."""
1297
1324
 
1298
1325
  region: Optional[str] = None
1299
1326
  """The AWS region in which this VPC endpoint object exists."""
@@ -1305,6 +1332,11 @@ class VpcEndpoint:
1305
1332
  [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html"""
1306
1333
 
1307
1334
  use_case: Optional[EndpointUseCase] = None
1335
+ """This enumeration represents the type of Databricks VPC endpoint service that was used when
1336
+ creating this VPC endpoint. If the VPC endpoint connects to the Databricks control plane for
1337
+ either the front-end connection or the back-end REST API connection, the value is
1338
+ WORKSPACE_ACCESS. If the VPC endpoint connects to the Databricks workspace for the back-end
1339
+ secure cluster connectivity relay, the value is DATAPLANE_RELAY_ACCESS."""
1308
1340
 
1309
1341
  vpc_endpoint_id: Optional[str] = None
1310
1342
  """Databricks VPC endpoint ID. This is the Databricks-specific name of the VPC endpoint. Do not
@@ -1381,8 +1413,6 @@ class VpcEndpoint:
1381
1413
 
1382
1414
 
1383
1415
  class VpcStatus(Enum):
1384
- """The status of this network configuration object in terms of its use in a workspace: *
1385
- `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. * `WARNED`: Warned."""
1386
1416
 
1387
1417
  BROKEN = "BROKEN"
1388
1418
  UNATTACHED = "UNATTACHED"
@@ -1391,7 +1421,6 @@ class VpcStatus(Enum):
1391
1421
 
1392
1422
 
1393
1423
  class WarningType(Enum):
1394
- """The AWS resource associated with this warning: a subnet or a security group."""
1395
1424
 
1396
1425
  SECURITY_GROUP = "securityGroup"
1397
1426
  SUBNET = "subnet"
@@ -1403,7 +1432,6 @@ class Workspace:
1403
1432
  """Databricks account ID."""
1404
1433
 
1405
1434
  aws_region: Optional[str] = None
1406
- """The AWS region of the workspace data plane (for example, `us-west-2`)."""
1407
1435
 
1408
1436
  azure_workspace_info: Optional[AzureWorkspaceInfo] = None
1409
1437
 
@@ -1412,6 +1440,9 @@ class Workspace:
1412
1440
 
1413
1441
  cloud_resource_container: Optional[CloudResourceContainer] = None
1414
1442
 
1443
+ compute_mode: Optional[CustomerFacingComputeMode] = None
1444
+ """The compute mode of the workspace."""
1445
+
1415
1446
  creation_time: Optional[int] = None
1416
1447
  """Time in epoch milliseconds when the workspace was created."""
1417
1448
 
@@ -1424,22 +1455,15 @@ class Workspace:
1424
1455
  characters. The key can be of maximum length of 127 characters, and cannot be empty."""
1425
1456
 
1426
1457
  deployment_name: Optional[str] = None
1427
- """The deployment name defines part of the subdomain for the workspace. The workspace URL for web
1428
- application and REST APIs is `<deployment-name>.cloud.databricks.com`.
1429
-
1430
- This value must be unique across all non-deleted deployments across all AWS regions."""
1431
1458
 
1432
- external_customer_info: Optional[ExternalCustomerInfo] = None
1433
- """If this workspace is for a external customer, then external_customer_info is populated. If this
1434
- workspace is not for a external customer, then external_customer_info is empty."""
1459
+ expected_workspace_status: Optional[WorkspaceStatus] = None
1460
+ """A client owned field used to indicate the workspace status that the client expects to be in. For
1461
+ now this is only used to unblock Temporal workflow for GCP least privileged workspace."""
1435
1462
 
1436
1463
  gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None
1437
1464
 
1438
1465
  gke_config: Optional[GkeConfig] = None
1439
1466
 
1440
- is_no_public_ip_enabled: Optional[bool] = None
1441
- """Whether no public IP is enabled for the workspace."""
1442
-
1443
1467
  location: Optional[str] = None
1444
1468
  """The Google Cloud region of the workspace data plane in your Google account (for example,
1445
1469
  `us-east4`)."""
@@ -1447,9 +1471,17 @@ class Workspace:
1447
1471
  managed_services_customer_managed_key_id: Optional[str] = None
1448
1472
  """ID of the key configuration for encrypting managed services."""
1449
1473
 
1474
+ network: Optional[WorkspaceNetwork] = None
1475
+ """The network configuration for the workspace.
1476
+
1477
+ DEPRECATED. Use `network_id` instead."""
1478
+
1479
+ network_connectivity_config_id: Optional[str] = None
1480
+ """The object ID of network connectivity config."""
1481
+
1450
1482
  network_id: Optional[str] = None
1451
- """The network configuration ID that is attached to the workspace. This field is available only if
1452
- the network is a customer-managed network."""
1483
+ """If this workspace is BYO VPC, then the network_id will be populated. If this workspace is not
1484
+ BYO VPC, then the network_id will be empty."""
1453
1485
 
1454
1486
  pricing_tier: Optional[PricingTier] = None
1455
1487
 
@@ -1469,6 +1501,9 @@ class Workspace:
1469
1501
  storage_customer_managed_key_id: Optional[str] = None
1470
1502
  """ID of the key configuration for encrypting workspace storage."""
1471
1503
 
1504
+ storage_mode: Optional[CustomerFacingStorageMode] = None
1505
+ """The storage mode of the workspace."""
1506
+
1472
1507
  workspace_id: Optional[int] = None
1473
1508
  """A unique integer ID for the workspace"""
1474
1509
 
@@ -1476,6 +1511,7 @@ class Workspace:
1476
1511
  """The human-readable name of the workspace."""
1477
1512
 
1478
1513
  workspace_status: Optional[WorkspaceStatus] = None
1514
+ """The status of a workspace"""
1479
1515
 
1480
1516
  workspace_status_message: Optional[str] = None
1481
1517
  """Message describing the current workspace status."""
@@ -1493,6 +1529,8 @@ class Workspace:
1493
1529
  body["cloud"] = self.cloud
1494
1530
  if self.cloud_resource_container:
1495
1531
  body["cloud_resource_container"] = self.cloud_resource_container.as_dict()
1532
+ if self.compute_mode is not None:
1533
+ body["compute_mode"] = self.compute_mode.value
1496
1534
  if self.creation_time is not None:
1497
1535
  body["creation_time"] = self.creation_time
1498
1536
  if self.credentials_id is not None:
@@ -1501,18 +1539,20 @@ class Workspace:
1501
1539
  body["custom_tags"] = self.custom_tags
1502
1540
  if self.deployment_name is not None:
1503
1541
  body["deployment_name"] = self.deployment_name
1504
- if self.external_customer_info:
1505
- body["external_customer_info"] = self.external_customer_info.as_dict()
1542
+ if self.expected_workspace_status is not None:
1543
+ body["expected_workspace_status"] = self.expected_workspace_status.value
1506
1544
  if self.gcp_managed_network_config:
1507
1545
  body["gcp_managed_network_config"] = self.gcp_managed_network_config.as_dict()
1508
1546
  if self.gke_config:
1509
1547
  body["gke_config"] = self.gke_config.as_dict()
1510
- if self.is_no_public_ip_enabled is not None:
1511
- body["is_no_public_ip_enabled"] = self.is_no_public_ip_enabled
1512
1548
  if self.location is not None:
1513
1549
  body["location"] = self.location
1514
1550
  if self.managed_services_customer_managed_key_id is not None:
1515
1551
  body["managed_services_customer_managed_key_id"] = self.managed_services_customer_managed_key_id
1552
+ if self.network:
1553
+ body["network"] = self.network.as_dict()
1554
+ if self.network_connectivity_config_id is not None:
1555
+ body["network_connectivity_config_id"] = self.network_connectivity_config_id
1516
1556
  if self.network_id is not None:
1517
1557
  body["network_id"] = self.network_id
1518
1558
  if self.pricing_tier is not None:
@@ -1523,6 +1563,8 @@ class Workspace:
1523
1563
  body["storage_configuration_id"] = self.storage_configuration_id
1524
1564
  if self.storage_customer_managed_key_id is not None:
1525
1565
  body["storage_customer_managed_key_id"] = self.storage_customer_managed_key_id
1566
+ if self.storage_mode is not None:
1567
+ body["storage_mode"] = self.storage_mode.value
1526
1568
  if self.workspace_id is not None:
1527
1569
  body["workspace_id"] = self.workspace_id
1528
1570
  if self.workspace_name is not None:
@@ -1546,6 +1588,8 @@ class Workspace:
1546
1588
  body["cloud"] = self.cloud
1547
1589
  if self.cloud_resource_container:
1548
1590
  body["cloud_resource_container"] = self.cloud_resource_container
1591
+ if self.compute_mode is not None:
1592
+ body["compute_mode"] = self.compute_mode
1549
1593
  if self.creation_time is not None:
1550
1594
  body["creation_time"] = self.creation_time
1551
1595
  if self.credentials_id is not None:
@@ -1554,18 +1598,20 @@ class Workspace:
1554
1598
  body["custom_tags"] = self.custom_tags
1555
1599
  if self.deployment_name is not None:
1556
1600
  body["deployment_name"] = self.deployment_name
1557
- if self.external_customer_info:
1558
- body["external_customer_info"] = self.external_customer_info
1601
+ if self.expected_workspace_status is not None:
1602
+ body["expected_workspace_status"] = self.expected_workspace_status
1559
1603
  if self.gcp_managed_network_config:
1560
1604
  body["gcp_managed_network_config"] = self.gcp_managed_network_config
1561
1605
  if self.gke_config:
1562
1606
  body["gke_config"] = self.gke_config
1563
- if self.is_no_public_ip_enabled is not None:
1564
- body["is_no_public_ip_enabled"] = self.is_no_public_ip_enabled
1565
1607
  if self.location is not None:
1566
1608
  body["location"] = self.location
1567
1609
  if self.managed_services_customer_managed_key_id is not None:
1568
1610
  body["managed_services_customer_managed_key_id"] = self.managed_services_customer_managed_key_id
1611
+ if self.network:
1612
+ body["network"] = self.network
1613
+ if self.network_connectivity_config_id is not None:
1614
+ body["network_connectivity_config_id"] = self.network_connectivity_config_id
1569
1615
  if self.network_id is not None:
1570
1616
  body["network_id"] = self.network_id
1571
1617
  if self.pricing_tier is not None:
@@ -1576,6 +1622,8 @@ class Workspace:
1576
1622
  body["storage_configuration_id"] = self.storage_configuration_id
1577
1623
  if self.storage_customer_managed_key_id is not None:
1578
1624
  body["storage_customer_managed_key_id"] = self.storage_customer_managed_key_id
1625
+ if self.storage_mode is not None:
1626
+ body["storage_mode"] = self.storage_mode
1579
1627
  if self.workspace_id is not None:
1580
1628
  body["workspace_id"] = self.workspace_id
1581
1629
  if self.workspace_name is not None:
@@ -1595,21 +1643,24 @@ class Workspace:
1595
1643
  azure_workspace_info=_from_dict(d, "azure_workspace_info", AzureWorkspaceInfo),
1596
1644
  cloud=d.get("cloud", None),
1597
1645
  cloud_resource_container=_from_dict(d, "cloud_resource_container", CloudResourceContainer),
1646
+ compute_mode=_enum(d, "compute_mode", CustomerFacingComputeMode),
1598
1647
  creation_time=d.get("creation_time", None),
1599
1648
  credentials_id=d.get("credentials_id", None),
1600
1649
  custom_tags=d.get("custom_tags", None),
1601
1650
  deployment_name=d.get("deployment_name", None),
1602
- external_customer_info=_from_dict(d, "external_customer_info", ExternalCustomerInfo),
1651
+ expected_workspace_status=_enum(d, "expected_workspace_status", WorkspaceStatus),
1603
1652
  gcp_managed_network_config=_from_dict(d, "gcp_managed_network_config", GcpManagedNetworkConfig),
1604
1653
  gke_config=_from_dict(d, "gke_config", GkeConfig),
1605
- is_no_public_ip_enabled=d.get("is_no_public_ip_enabled", None),
1606
1654
  location=d.get("location", None),
1607
1655
  managed_services_customer_managed_key_id=d.get("managed_services_customer_managed_key_id", None),
1656
+ network=_from_dict(d, "network", WorkspaceNetwork),
1657
+ network_connectivity_config_id=d.get("network_connectivity_config_id", None),
1608
1658
  network_id=d.get("network_id", None),
1609
1659
  pricing_tier=_enum(d, "pricing_tier", PricingTier),
1610
1660
  private_access_settings_id=d.get("private_access_settings_id", None),
1611
1661
  storage_configuration_id=d.get("storage_configuration_id", None),
1612
1662
  storage_customer_managed_key_id=d.get("storage_customer_managed_key_id", None),
1663
+ storage_mode=_enum(d, "storage_mode", CustomerFacingStorageMode),
1613
1664
  workspace_id=d.get("workspace_id", None),
1614
1665
  workspace_name=d.get("workspace_name", None),
1615
1666
  workspace_status=_enum(d, "workspace_status", WorkspaceStatus),
@@ -1617,9 +1668,65 @@ class Workspace:
1617
1668
  )
1618
1669
 
1619
1670
 
1671
+ @dataclass
1672
+ class WorkspaceNetwork:
1673
+ """The network configuration for workspaces."""
1674
+
1675
+ gcp_common_network_config: Optional[GcpCommonNetworkConfig] = None
1676
+ """The shared network config for GCP workspace. This object has common network configurations that
1677
+ are network attributions of a workspace. This object is input-only."""
1678
+
1679
+ gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None
1680
+ """The mutually exclusive network deployment modes. The option decides which network mode the
1681
+ workspace will use. The network config for GCP workspace with Databricks managed network. This
1682
+ object is input-only and will not be provided when listing workspaces. See
1683
+ go/gcp-byovpc-alpha-design for interface decisions."""
1684
+
1685
+ network_id: Optional[str] = None
1686
+ """The ID of the network object, if the workspace is a BYOVPC workspace. This should apply to
1687
+ workspaces on all clouds in internal services. In accounts-rest-api, user will use
1688
+ workspace.network_id for input and output instead. Currently (2021-06-19) the network ID is only
1689
+ used by GCP."""
1690
+
1691
+ def as_dict(self) -> dict:
1692
+ """Serializes the WorkspaceNetwork into a dictionary suitable for use as a JSON request body."""
1693
+ body = {}
1694
+ if self.gcp_common_network_config:
1695
+ body["gcp_common_network_config"] = self.gcp_common_network_config.as_dict()
1696
+ if self.gcp_managed_network_config:
1697
+ body["gcp_managed_network_config"] = self.gcp_managed_network_config.as_dict()
1698
+ if self.network_id is not None:
1699
+ body["network_id"] = self.network_id
1700
+ return body
1701
+
1702
+ def as_shallow_dict(self) -> dict:
1703
+ """Serializes the WorkspaceNetwork into a shallow dictionary of its immediate attributes."""
1704
+ body = {}
1705
+ if self.gcp_common_network_config:
1706
+ body["gcp_common_network_config"] = self.gcp_common_network_config
1707
+ if self.gcp_managed_network_config:
1708
+ body["gcp_managed_network_config"] = self.gcp_managed_network_config
1709
+ if self.network_id is not None:
1710
+ body["network_id"] = self.network_id
1711
+ return body
1712
+
1713
+ @classmethod
1714
+ def from_dict(cls, d: Dict[str, Any]) -> WorkspaceNetwork:
1715
+ """Deserializes the WorkspaceNetwork from a dictionary."""
1716
+ return cls(
1717
+ gcp_common_network_config=_from_dict(d, "gcp_common_network_config", GcpCommonNetworkConfig),
1718
+ gcp_managed_network_config=_from_dict(d, "gcp_managed_network_config", GcpManagedNetworkConfig),
1719
+ network_id=d.get("network_id", None),
1720
+ )
1721
+
1722
+
1620
1723
  class WorkspaceStatus(Enum):
1621
- """The status of the workspace. For workspace creation, usually it is set to `PROVISIONING`
1622
- initially. Continue to check the status until the status is `RUNNING`."""
1724
+ """The different statuses of a workspace. The following represents the current set of valid
1725
+ transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING ->
1726
+ RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace
1727
+ Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED
1728
+ BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid.
1729
+ TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING)"""
1623
1730
 
1624
1731
  BANNED = "BANNED"
1625
1732
  CANCELLING = "CANCELLING"
@@ -1671,29 +1778,30 @@ class CredentialsAPI:
1671
1778
  res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/credentials", body=body, headers=headers)
1672
1779
  return Credential.from_dict(res)
1673
1780
 
1674
- def delete(self, credentials_id: str):
1781
+ def delete(self, credentials_id: str) -> Credential:
1675
1782
  """Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot
1676
1783
  delete a credential that is associated with any workspace.
1677
1784
 
1678
1785
  :param credentials_id: str
1679
1786
  Databricks Account API credential configuration ID
1680
1787
 
1681
-
1788
+ :returns: :class:`Credential`
1682
1789
  """
1683
1790
 
1684
1791
  headers = {
1685
1792
  "Accept": "application/json",
1686
1793
  }
1687
1794
 
1688
- self._api.do(
1795
+ res = self._api.do(
1689
1796
  "DELETE", f"/api/2.0/accounts/{self._api.account_id}/credentials/{credentials_id}", headers=headers
1690
1797
  )
1798
+ return Credential.from_dict(res)
1691
1799
 
1692
1800
  def get(self, credentials_id: str) -> Credential:
1693
1801
  """Gets a Databricks credential configuration object for an account, both specified by ID.
1694
1802
 
1695
1803
  :param credentials_id: str
1696
- Databricks Account API credential configuration ID
1804
+ Credential configuration ID
1697
1805
 
1698
1806
  :returns: :class:`Credential`
1699
1807
  """
@@ -1708,7 +1816,7 @@ class CredentialsAPI:
1708
1816
  return Credential.from_dict(res)
1709
1817
 
1710
1818
  def list(self) -> Iterator[Credential]:
1711
- """Gets all Databricks credential configurations associated with an account specified by ID.
1819
+ """List Databricks credential configuration objects for an account, specified by ID.
1712
1820
 
1713
1821
 
1714
1822
  :returns: Iterator over :class:`Credential`
@@ -1785,25 +1893,26 @@ class EncryptionKeysAPI:
1785
1893
  )
1786
1894
  return CustomerManagedKey.from_dict(res)
1787
1895
 
1788
- def delete(self, customer_managed_key_id: str):
1896
+ def delete(self, customer_managed_key_id: str) -> CustomerManagedKey:
1789
1897
  """Deletes a customer-managed key configuration object for an account. You cannot delete a configuration
1790
1898
  that is associated with a running workspace.
1791
1899
 
1792
1900
  :param customer_managed_key_id: str
1793
1901
  Databricks encryption key configuration ID.
1794
1902
 
1795
-
1903
+ :returns: :class:`CustomerManagedKey`
1796
1904
  """
1797
1905
 
1798
1906
  headers = {
1799
1907
  "Accept": "application/json",
1800
1908
  }
1801
1909
 
1802
- self._api.do(
1910
+ res = self._api.do(
1803
1911
  "DELETE",
1804
1912
  f"/api/2.0/accounts/{self._api.account_id}/customer-managed-keys/{customer_managed_key_id}",
1805
1913
  headers=headers,
1806
1914
  )
1915
+ return CustomerManagedKey.from_dict(res)
1807
1916
 
1808
1917
  def get(self, customer_managed_key_id: str) -> CustomerManagedKey:
1809
1918
  """Gets a customer-managed key configuration object for an account, specified by ID. This operation
@@ -1837,16 +1946,7 @@ class EncryptionKeysAPI:
1837
1946
  return CustomerManagedKey.from_dict(res)
1838
1947
 
1839
1948
  def list(self) -> Iterator[CustomerManagedKey]:
1840
- """Gets all customer-managed key configuration objects for an account. If the key is specified as a
1841
- workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's
1842
- notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history.
1843
- If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the
1844
- workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane.
1845
-
1846
- **Important**: Customer-managed keys are supported only for some deployment types, subscription types,
1847
- and AWS regions.
1848
-
1849
- This operation is available only if your account is on the E2 version of the platform.
1949
+ """Lists Databricks customer-managed key configurations for an account.
1850
1950
 
1851
1951
 
1852
1952
  :returns: Iterator over :class:`CustomerManagedKey`
@@ -1869,9 +1969,9 @@ class NetworksAPI:
1869
1969
 
1870
1970
  def create(
1871
1971
  self,
1872
- network_name: str,
1873
1972
  *,
1874
1973
  gcp_network_info: Optional[GcpNetworkInfo] = None,
1974
+ network_name: Optional[str] = None,
1875
1975
  security_group_ids: Optional[List[str]] = None,
1876
1976
  subnet_ids: Optional[List[str]] = None,
1877
1977
  vpc_endpoints: Optional[NetworkVpcEndpoints] = None,
@@ -1880,9 +1980,9 @@ class NetworksAPI:
1880
1980
  """Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be
1881
1981
  used for new Databricks clusters. This requires a pre-existing VPC and subnets.
1882
1982
 
1883
- :param network_name: str
1884
- The human-readable name of the network configuration.
1885
1983
  :param gcp_network_info: :class:`GcpNetworkInfo` (optional)
1984
+ :param network_name: str (optional)
1985
+ The human-readable name of the network configuration.
1886
1986
  :param security_group_ids: List[str] (optional)
1887
1987
  IDs of one to five security groups associated with this network. Security group IDs **cannot** be
1888
1988
  used in multiple network configurations.
@@ -1891,8 +1991,8 @@ class NetworksAPI:
1891
1991
  network configurations.
1892
1992
  :param vpc_endpoints: :class:`NetworkVpcEndpoints` (optional)
1893
1993
  :param vpc_id: str (optional)
1894
- The ID of the VPC associated with this network. VPC IDs can be used in multiple network
1895
- configurations.
1994
+ The ID of the VPC associated with this network configuration. VPC IDs can be used in multiple
1995
+ networks.
1896
1996
 
1897
1997
  :returns: :class:`Network`
1898
1998
  """
@@ -1917,7 +2017,7 @@ class NetworksAPI:
1917
2017
  res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/networks", body=body, headers=headers)
1918
2018
  return Network.from_dict(res)
1919
2019
 
1920
- def delete(self, network_id: str):
2020
+ def delete(self, network_id: str) -> Network:
1921
2021
  """Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot
1922
2022
  delete a network that is associated with a workspace.
1923
2023
 
@@ -1926,14 +2026,15 @@ class NetworksAPI:
1926
2026
  :param network_id: str
1927
2027
  Databricks Account API network configuration ID.
1928
2028
 
1929
-
2029
+ :returns: :class:`Network`
1930
2030
  """
1931
2031
 
1932
2032
  headers = {
1933
2033
  "Accept": "application/json",
1934
2034
  }
1935
2035
 
1936
- self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/networks/{network_id}", headers=headers)
2036
+ res = self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/networks/{network_id}", headers=headers)
2037
+ return Network.from_dict(res)
1937
2038
 
1938
2039
  def get(self, network_id: str) -> Network:
1939
2040
  """Gets a Databricks network configuration, which represents a cloud VPC and its resources.
@@ -1952,9 +2053,7 @@ class NetworksAPI:
1952
2053
  return Network.from_dict(res)
1953
2054
 
1954
2055
  def list(self) -> Iterator[Network]:
1955
- """Gets a list of all Databricks network configurations for an account, specified by ID.
1956
-
1957
- This operation is available only if your account is on the E2 version of the platform.
2056
+ """Lists Databricks network configurations for an account.
1958
2057
 
1959
2058
 
1960
2059
  :returns: Iterator over :class:`Network`
@@ -1976,48 +2075,39 @@ class PrivateAccessAPI:
1976
2075
 
1977
2076
  def create(
1978
2077
  self,
1979
- private_access_settings_name: str,
1980
- region: str,
1981
2078
  *,
1982
2079
  allowed_vpc_endpoint_ids: Optional[List[str]] = None,
1983
2080
  private_access_level: Optional[PrivateAccessLevel] = None,
2081
+ private_access_settings_name: Optional[str] = None,
1984
2082
  public_access_enabled: Optional[bool] = None,
2083
+ region: Optional[str] = None,
1985
2084
  ) -> PrivateAccessSettings:
1986
- """Creates a private access settings object, which specifies how your workspace is accessed over [AWS
1987
- PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object
1988
- referenced by ID in the workspace's `private_access_settings_id` property.
1989
-
1990
- You can share one private access settings with multiple workspaces in a single account. However,
1991
- private access settings are specific to AWS regions, so only workspaces in the same AWS region can use
1992
- a given private access settings object.
1993
-
1994
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].
1995
-
1996
- [AWS PrivateLink]: https://aws.amazon.com/privatelink
1997
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2085
+ """Creates a private access settings configuration, which represents network access restrictions for
2086
+ workspace resources. Private access settings configure whether workspaces can be accessed from the
2087
+ public internet or only from private endpoints.
1998
2088
 
1999
- :param private_access_settings_name: str
2000
- The human-readable name of the private access settings object.
2001
- :param region: str
2002
- The cloud region for workspaces associated with this private access settings object.
2003
2089
  :param allowed_vpc_endpoint_ids: List[str] (optional)
2004
- An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering
2005
- the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in
2006
- AWS.
2007
-
2008
- Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints
2009
- that in your account that can connect to your workspace over AWS PrivateLink.
2010
-
2011
- If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this
2012
- control only works for PrivateLink connections. To control how your workspace is accessed via public
2013
- internet, see [IP access lists].
2014
-
2015
- [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
2090
+ An array of Databricks VPC endpoint IDs. This is the Databricks ID returned when registering the VPC
2091
+ endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in AWS.
2092
+ Only used when private_access_level is set to ENDPOINT. This is an allow list of VPC endpoints
2093
+ registered in your Databricks account that can connect to your workspace over AWS PrivateLink. Note:
2094
+ If hybrid access to your workspace is enabled by setting public_access_enabled to true, this control
2095
+ only works for PrivateLink connections. To control how your workspace is accessed via public
2096
+ internet, see IP access lists.
2016
2097
  :param private_access_level: :class:`PrivateAccessLevel` (optional)
2098
+ The private access level controls which VPC endpoints can connect to the UI or API of any workspace
2099
+ that attaches this private access settings object. `ACCOUNT` level access (the default) allows only
2100
+ VPC endpoints that are registered in your Databricks account connect to your workspace. `ENDPOINT`
2101
+ level access allows only specified VPC endpoints connect to your workspace. For details, see
2102
+ allowed_vpc_endpoint_ids.
2103
+ :param private_access_settings_name: str (optional)
2104
+ The human-readable name of the private access settings object.
2017
2105
  :param public_access_enabled: bool (optional)
2018
2106
  Determines if the workspace can be accessed over public internet. For fully private workspaces, you
2019
- can optionally specify `false`, but only if you implement both the front-end and the back-end
2020
- PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled.
2107
+ can optionally specify false, but only if you implement both the front-end and the back-end
2108
+ PrivateLink connections. Otherwise, specify true, which means that public access is enabled.
2109
+ :param region: str (optional)
2110
+ The AWS region for workspaces attached to this private access settings object.
2021
2111
 
2022
2112
  :returns: :class:`PrivateAccessSettings`
2023
2113
  """
@@ -2042,42 +2132,29 @@ class PrivateAccessAPI:
2042
2132
  )
2043
2133
  return PrivateAccessSettings.from_dict(res)
2044
2134
 
2045
- def delete(self, private_access_settings_id: str):
2046
- """Deletes a private access settings object, which determines how your workspace is accessed over [AWS
2047
- PrivateLink].
2048
-
2049
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
2050
-
2051
- [AWS PrivateLink]: https://aws.amazon.com/privatelink
2052
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2135
+ def delete(self, private_access_settings_id: str) -> PrivateAccessSettings:
2136
+ """Deletes a Databricks private access settings configuration, both specified by ID.
2053
2137
 
2054
2138
  :param private_access_settings_id: str
2055
- Databricks Account API private access settings ID.
2056
-
2057
2139
 
2140
+ :returns: :class:`PrivateAccessSettings`
2058
2141
  """
2059
2142
 
2060
2143
  headers = {
2061
2144
  "Accept": "application/json",
2062
2145
  }
2063
2146
 
2064
- self._api.do(
2147
+ res = self._api.do(
2065
2148
  "DELETE",
2066
2149
  f"/api/2.0/accounts/{self._api.account_id}/private-access-settings/{private_access_settings_id}",
2067
2150
  headers=headers,
2068
2151
  )
2152
+ return PrivateAccessSettings.from_dict(res)
2069
2153
 
2070
2154
  def get(self, private_access_settings_id: str) -> PrivateAccessSettings:
2071
- """Gets a private access settings object, which specifies how your workspace is accessed over [AWS
2072
- PrivateLink].
2073
-
2074
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
2075
-
2076
- [AWS PrivateLink]: https://aws.amazon.com/privatelink
2077
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2155
+ """Gets a Databricks private access settings configuration, both specified by ID.
2078
2156
 
2079
2157
  :param private_access_settings_id: str
2080
- Databricks Account API private access settings ID.
2081
2158
 
2082
2159
  :returns: :class:`PrivateAccessSettings`
2083
2160
  """
@@ -2094,7 +2171,7 @@ class PrivateAccessAPI:
2094
2171
  return PrivateAccessSettings.from_dict(res)
2095
2172
 
2096
2173
  def list(self) -> Iterator[PrivateAccessSettings]:
2097
- """Gets a list of all private access settings objects for an account, specified by ID.
2174
+ """Lists Databricks private access settings for an account.
2098
2175
 
2099
2176
 
2100
2177
  :returns: Iterator over :class:`PrivateAccessSettings`
@@ -2108,82 +2185,39 @@ class PrivateAccessAPI:
2108
2185
  return [PrivateAccessSettings.from_dict(v) for v in res]
2109
2186
 
2110
2187
  def replace(
2111
- self,
2112
- private_access_settings_id: str,
2113
- private_access_settings_name: str,
2114
- region: str,
2115
- *,
2116
- allowed_vpc_endpoint_ids: Optional[List[str]] = None,
2117
- private_access_level: Optional[PrivateAccessLevel] = None,
2118
- public_access_enabled: Optional[bool] = None,
2119
- ):
2188
+ self, private_access_settings_id: str, customer_facing_private_access_settings: PrivateAccessSettings
2189
+ ) -> PrivateAccessSettings:
2120
2190
  """Updates an existing private access settings object, which specifies how your workspace is accessed
2121
- over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object
2122
- referenced by ID in the workspace's `private_access_settings_id` property.
2123
-
2124
- This operation completely overwrites your existing private access settings object attached to your
2125
- workspaces. All workspaces attached to the private access settings are affected by any change. If
2126
- `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of
2127
- these changes might take several minutes to propagate to the workspace API.
2128
-
2129
- You can share one private access settings object with multiple workspaces in a single account.
2130
- However, private access settings are specific to AWS regions, so only workspaces in the same AWS
2131
- region can use a given private access settings object.
2132
-
2133
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].
2134
-
2135
- [AWS PrivateLink]: https://aws.amazon.com/privatelink
2136
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2191
+ over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object
2192
+ referenced by ID in the workspace's private_access_settings_id property. This operation completely
2193
+ overwrites your existing private access settings object attached to your workspaces. All workspaces
2194
+ attached to the private access settings are affected by any change. If public_access_enabled,
2195
+ private_access_level, or allowed_vpc_endpoint_ids are updated, effects of these changes might take
2196
+ several minutes to propagate to the workspace API. You can share one private access settings object
2197
+ with multiple workspaces in a single account. However, private access settings are specific to AWS
2198
+ regions, so only workspaces in the same AWS region can use a given private access settings object.
2199
+ Before configuring PrivateLink, read the Databricks article about PrivateLink.
2137
2200
 
2138
2201
  :param private_access_settings_id: str
2139
- Databricks Account API private access settings ID.
2140
- :param private_access_settings_name: str
2141
- The human-readable name of the private access settings object.
2142
- :param region: str
2143
- The cloud region for workspaces associated with this private access settings object.
2144
- :param allowed_vpc_endpoint_ids: List[str] (optional)
2145
- An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering
2146
- the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in
2147
- AWS.
2148
-
2149
- Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints
2150
- that in your account that can connect to your workspace over AWS PrivateLink.
2151
-
2152
- If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this
2153
- control only works for PrivateLink connections. To control how your workspace is accessed via public
2154
- internet, see [IP access lists].
2155
-
2156
- [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
2157
- :param private_access_level: :class:`PrivateAccessLevel` (optional)
2158
- :param public_access_enabled: bool (optional)
2159
- Determines if the workspace can be accessed over public internet. For fully private workspaces, you
2160
- can optionally specify `false`, but only if you implement both the front-end and the back-end
2161
- PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled.
2162
-
2202
+ Databricks private access settings ID.
2203
+ :param customer_facing_private_access_settings: :class:`PrivateAccessSettings`
2204
+ Properties of the new private access settings object.
2163
2205
 
2206
+ :returns: :class:`PrivateAccessSettings`
2164
2207
  """
2165
- body = {}
2166
- if allowed_vpc_endpoint_ids is not None:
2167
- body["allowed_vpc_endpoint_ids"] = [v for v in allowed_vpc_endpoint_ids]
2168
- if private_access_level is not None:
2169
- body["private_access_level"] = private_access_level.value
2170
- if private_access_settings_name is not None:
2171
- body["private_access_settings_name"] = private_access_settings_name
2172
- if public_access_enabled is not None:
2173
- body["public_access_enabled"] = public_access_enabled
2174
- if region is not None:
2175
- body["region"] = region
2208
+ body = customer_facing_private_access_settings.as_dict()
2176
2209
  headers = {
2177
2210
  "Accept": "application/json",
2178
2211
  "Content-Type": "application/json",
2179
2212
  }
2180
2213
 
2181
- self._api.do(
2214
+ res = self._api.do(
2182
2215
  "PUT",
2183
2216
  f"/api/2.0/accounts/{self._api.account_id}/private-access-settings/{private_access_settings_id}",
2184
2217
  body=body,
2185
2218
  headers=headers,
2186
2219
  )
2220
+ return PrivateAccessSettings.from_dict(res)
2187
2221
 
2188
2222
 
2189
2223
  class StorageAPI:
@@ -2195,24 +2229,27 @@ class StorageAPI:
2195
2229
  def __init__(self, api_client):
2196
2230
  self._api = api_client
2197
2231
 
2198
- def create(self, storage_configuration_name: str, root_bucket_info: RootBucketInfo) -> StorageConfiguration:
2199
- """Creates new storage configuration for an account, specified by ID. Uploads a storage configuration
2200
- object that represents the root AWS S3 bucket in your account. Databricks stores related workspace
2201
- assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the
2202
- required bucket policy.
2203
-
2204
- For information about how to create a new workspace with this API, see [Create a new workspace using
2205
- the Account API]
2206
-
2207
- [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
2232
+ def create(
2233
+ self, storage_configuration_name: str, root_bucket_info: RootBucketInfo, *, role_arn: Optional[str] = None
2234
+ ) -> StorageConfiguration:
2235
+ """Creates a Databricks storage configuration for an account.
2208
2236
 
2209
2237
  :param storage_configuration_name: str
2210
2238
  The human-readable name of the storage configuration.
2211
2239
  :param root_bucket_info: :class:`RootBucketInfo`
2240
+ Root S3 bucket information.
2241
+ :param role_arn: str (optional)
2242
+ Optional IAM role that is used to access the workspace catalog which is created during workspace
2243
+ creation for UC by Default. If a storage configuration with this field populated is used to create a
2244
+ workspace, then a workspace catalog is created together with the workspace. The workspace catalog
2245
+ shares the root bucket with internal workspace storage (including DBFS root) but uses a dedicated
2246
+ bucket path prefix.
2212
2247
 
2213
2248
  :returns: :class:`StorageConfiguration`
2214
2249
  """
2215
2250
  body = {}
2251
+ if role_arn is not None:
2252
+ body["role_arn"] = role_arn
2216
2253
  if root_bucket_info is not None:
2217
2254
  body["root_bucket_info"] = root_bucket_info.as_dict()
2218
2255
  if storage_configuration_name is not None:
@@ -2227,31 +2264,30 @@ class StorageAPI:
2227
2264
  )
2228
2265
  return StorageConfiguration.from_dict(res)
2229
2266
 
2230
- def delete(self, storage_configuration_id: str):
2267
+ def delete(self, storage_configuration_id: str) -> StorageConfiguration:
2231
2268
  """Deletes a Databricks storage configuration. You cannot delete a storage configuration that is
2232
2269
  associated with any workspace.
2233
2270
 
2234
2271
  :param storage_configuration_id: str
2235
- Databricks Account API storage configuration ID.
2236
-
2237
2272
 
2273
+ :returns: :class:`StorageConfiguration`
2238
2274
  """
2239
2275
 
2240
2276
  headers = {
2241
2277
  "Accept": "application/json",
2242
2278
  }
2243
2279
 
2244
- self._api.do(
2280
+ res = self._api.do(
2245
2281
  "DELETE",
2246
2282
  f"/api/2.0/accounts/{self._api.account_id}/storage-configurations/{storage_configuration_id}",
2247
2283
  headers=headers,
2248
2284
  )
2285
+ return StorageConfiguration.from_dict(res)
2249
2286
 
2250
2287
  def get(self, storage_configuration_id: str) -> StorageConfiguration:
2251
2288
  """Gets a Databricks storage configuration for an account, both specified by ID.
2252
2289
 
2253
2290
  :param storage_configuration_id: str
2254
- Databricks Account API storage configuration ID.
2255
2291
 
2256
2292
  :returns: :class:`StorageConfiguration`
2257
2293
  """
@@ -2268,7 +2304,7 @@ class StorageAPI:
2268
2304
  return StorageConfiguration.from_dict(res)
2269
2305
 
2270
2306
  def list(self) -> Iterator[StorageConfiguration]:
2271
- """Gets a list of all Databricks storage configurations for your account, specified by ID.
2307
+ """Lists Databricks storage configurations for an account, specified by ID.
2272
2308
 
2273
2309
 
2274
2310
  :returns: Iterator over :class:`StorageConfiguration`
@@ -2290,11 +2326,11 @@ class VpcEndpointsAPI:
2290
2326
 
2291
2327
  def create(
2292
2328
  self,
2293
- vpc_endpoint_name: str,
2294
2329
  *,
2295
2330
  aws_vpc_endpoint_id: Optional[str] = None,
2296
2331
  gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo] = None,
2297
2332
  region: Optional[str] = None,
2333
+ vpc_endpoint_name: Optional[str] = None,
2298
2334
  ) -> VpcEndpoint:
2299
2335
  """Creates a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to
2300
2336
  communicate privately with Databricks over [AWS PrivateLink].
@@ -2309,13 +2345,14 @@ class VpcEndpointsAPI:
2309
2345
  [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html
2310
2346
  [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html
2311
2347
 
2312
- :param vpc_endpoint_name: str
2313
- The human-readable name of the storage configuration.
2314
2348
  :param aws_vpc_endpoint_id: str (optional)
2315
2349
  The ID of the VPC endpoint object in AWS.
2316
2350
  :param gcp_vpc_endpoint_info: :class:`GcpVpcEndpointInfo` (optional)
2351
+ The cloud info of this vpc endpoint.
2317
2352
  :param region: str (optional)
2318
- The AWS region in which this VPC endpoint object exists.
2353
+ The region in which this VPC endpoint object exists.
2354
+ :param vpc_endpoint_name: str (optional)
2355
+ The human-readable name of the storage configuration.
2319
2356
 
2320
2357
  :returns: :class:`VpcEndpoint`
2321
2358
  """
@@ -2338,29 +2375,23 @@ class VpcEndpointsAPI:
2338
2375
  )
2339
2376
  return VpcEndpoint.from_dict(res)
2340
2377
 
2341
- def delete(self, vpc_endpoint_id: str):
2342
- """Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can communicate
2343
- privately with Databricks over [AWS PrivateLink].
2344
-
2345
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].
2346
-
2347
- [AWS PrivateLink]: https://aws.amazon.com/privatelink
2348
- [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
2349
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2378
+ def delete(self, vpc_endpoint_id: str) -> VpcEndpoint:
2379
+ """Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration that
2380
+ is associated with any workspace.
2350
2381
 
2351
2382
  :param vpc_endpoint_id: str
2352
- Databricks VPC endpoint ID.
2353
-
2354
2383
 
2384
+ :returns: :class:`VpcEndpoint`
2355
2385
  """
2356
2386
 
2357
2387
  headers = {
2358
2388
  "Accept": "application/json",
2359
2389
  }
2360
2390
 
2361
- self._api.do(
2391
+ res = self._api.do(
2362
2392
  "DELETE", f"/api/2.0/accounts/{self._api.account_id}/vpc-endpoints/{vpc_endpoint_id}", headers=headers
2363
2393
  )
2394
+ return VpcEndpoint.from_dict(res)
2364
2395
 
2365
2396
  def get(self, vpc_endpoint_id: str) -> VpcEndpoint:
2366
2397
  """Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate
@@ -2385,11 +2416,7 @@ class VpcEndpointsAPI:
2385
2416
  return VpcEndpoint.from_dict(res)
2386
2417
 
2387
2418
  def list(self) -> Iterator[VpcEndpoint]:
2388
- """Gets a list of all VPC endpoints for an account, specified by ID.
2389
-
2390
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].
2391
-
2392
- [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2419
+ """Lists Databricks VPC endpoint configurations for an account.
2393
2420
 
2394
2421
 
2395
2422
  :returns: Iterator over :class:`VpcEndpoint`
@@ -2448,17 +2475,16 @@ class WorkspacesAPI:
2448
2475
 
2449
2476
  def create(
2450
2477
  self,
2451
- workspace_name: str,
2452
2478
  *,
2453
2479
  aws_region: Optional[str] = None,
2454
2480
  cloud: Optional[str] = None,
2455
2481
  cloud_resource_container: Optional[CloudResourceContainer] = None,
2482
+ compute_mode: Optional[CustomerFacingComputeMode] = None,
2456
2483
  credentials_id: Optional[str] = None,
2457
2484
  custom_tags: Optional[Dict[str, str]] = None,
2458
2485
  deployment_name: Optional[str] = None,
2459
2486
  gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None,
2460
2487
  gke_config: Optional[GkeConfig] = None,
2461
- is_no_public_ip_enabled: Optional[bool] = None,
2462
2488
  location: Optional[str] = None,
2463
2489
  managed_services_customer_managed_key_id: Optional[str] = None,
2464
2490
  network_id: Optional[str] = None,
@@ -2466,24 +2492,48 @@ class WorkspacesAPI:
2466
2492
  private_access_settings_id: Optional[str] = None,
2467
2493
  storage_configuration_id: Optional[str] = None,
2468
2494
  storage_customer_managed_key_id: Optional[str] = None,
2495
+ workspace_name: Optional[str] = None,
2469
2496
  ) -> Wait[Workspace]:
2470
- """Creates a new workspace.
2497
+ """Creates a new workspace using a credential configuration and a storage configuration, an optional
2498
+ network configuration (if using a customer-managed VPC), an optional managed services key
2499
+ configuration (if using customer-managed keys for managed services), and an optional storage key
2500
+ configuration (if using customer-managed keys for storage). The key configurations used for managed
2501
+ services and storage encryption can be the same or different.
2502
+
2503
+ Important: This operation is asynchronous. A response with HTTP status code 200 means the request has
2504
+ been accepted and is in progress, but does not mean that the workspace deployed successfully and is
2505
+ running. The initial workspace status is typically PROVISIONING. Use the workspace ID (workspace_id)
2506
+ field in the response to identify the new workspace and make repeated GET requests with the workspace
2507
+ ID and check its status. The workspace becomes available when the status changes to RUNNING.
2508
+
2509
+ You can share one customer-managed VPC with multiple workspaces in a single account. It is not
2510
+ required to create a new VPC for each workspace. However, you cannot reuse subnets or Security Groups
2511
+ between workspaces. If you plan to share one VPC with multiple workspaces, make sure you size your VPC
2512
+ and subnets accordingly. Because a Databricks Account API network configuration encapsulates this
2513
+ information, you cannot reuse a Databricks Account API network configuration across workspaces.
2514
+
2515
+ For information about how to create a new workspace with this API including error handling, see
2516
+ [Create a new workspace using the Account API].
2517
+
2518
+ Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a limited
2519
+ set of deployment and subscription types. If you have questions about availability, contact your
2520
+ Databricks representative.
2471
2521
 
2472
- **Important**: This operation is asynchronous. A response with HTTP status code 200 means the request
2473
- has been accepted and is in progress, but does not mean that the workspace deployed successfully and
2474
- is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID
2475
- (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests
2476
- with the workspace ID and check its status. The workspace becomes available when the status changes to
2477
- `RUNNING`.
2522
+ This operation is available only if your account is on the E2 version of the platform or on a select
2523
+ custom plan that allows multiple workspaces per account.
2524
+
2525
+ [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
2478
2526
 
2479
- :param workspace_name: str
2480
- The workspace's human-readable name.
2481
2527
  :param aws_region: str (optional)
2482
- The AWS region of the workspace's data plane.
2483
2528
  :param cloud: str (optional)
2484
- The cloud provider which the workspace uses. For Google Cloud workspaces, always set this field to
2485
- `gcp`.
2529
+ The cloud name. This field always has the value `gcp`.
2486
2530
  :param cloud_resource_container: :class:`CloudResourceContainer` (optional)
2531
+ :param compute_mode: :class:`CustomerFacingComputeMode` (optional)
2532
+ If the compute mode is `SERVERLESS`, a serverless workspace is created that comes pre-configured
2533
+ with serverless compute and default storage, providing a fully-managed, enterprise-ready SaaS
2534
+ experience. This means you don't need to provide any resources managed by you, such as credentials,
2535
+ storage, or network. If the compute mode is `HYBRID` (which is the default option), a classic
2536
+ workspace is created that uses customer-managed resources.
2487
2537
  :param credentials_id: str (optional)
2488
2538
  ID of the workspace's credential configuration object.
2489
2539
  :param custom_tags: Dict[str,str] (optional)
@@ -2492,55 +2542,49 @@ class WorkspacesAPI:
2492
2542
  key can be of maximum length of 127 characters, and cannot be empty.
2493
2543
  :param deployment_name: str (optional)
2494
2544
  The deployment name defines part of the subdomain for the workspace. The workspace URL for the web
2495
- application and REST APIs is `<workspace-deployment-name>.cloud.databricks.com`. For example, if the
2496
- deployment name is `abcsales`, your workspace URL will be `https://abcsales.cloud.databricks.com`.
2545
+ application and REST APIs is <workspace-deployment-name>.cloud.databricks.com. For example, if the
2546
+ deployment name is abcsales, your workspace URL will be https://abcsales.cloud.databricks.com.
2497
2547
  Hyphens are allowed. This property supports only the set of characters that are allowed in a
2498
- subdomain.
2499
-
2500
- To set this value, you must have a deployment name prefix. Contact your Databricks account team to
2501
- add an account deployment name prefix to your account.
2502
-
2503
- Workspace deployment names follow the account prefix and a hyphen. For example, if your account's
2504
- deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the JSON response
2505
- for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL would be
2506
- `acme-workspace-1.cloud.databricks.com`.
2507
-
2508
- You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the deployment
2509
- name to only include the deployment prefix. For example, if your account's deployment prefix is
2510
- `acme` and the workspace deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and
2511
- the workspace URL is `acme.cloud.databricks.com`.
2512
-
2513
- This value must be unique across all non-deleted deployments across all AWS regions.
2514
-
2515
- If a new workspace omits this property, the server generates a unique deployment name for you with
2516
- the pattern `dbc-xxxxxxxx-xxxx`.
2548
+ subdomain. To set this value, you must have a deployment name prefix. Contact your Databricks
2549
+ account team to add an account deployment name prefix to your account. Workspace deployment names
2550
+ follow the account prefix and a hyphen. For example, if your account's deployment prefix is acme and
2551
+ the workspace deployment name is workspace-1, the JSON response for the deployment_name field
2552
+ becomes acme-workspace-1. The workspace URL would be acme-workspace-1.cloud.databricks.com. You can
2553
+ also set the deployment_name to the reserved keyword EMPTY if you want the deployment name to only
2554
+ include the deployment prefix. For example, if your account's deployment prefix is acme and the
2555
+ workspace deployment name is EMPTY, the deployment_name becomes acme only and the workspace URL is
2556
+ acme.cloud.databricks.com. This value must be unique across all non-deleted deployments across all
2557
+ AWS regions. If a new workspace omits this property, the server generates a unique deployment name
2558
+ for you with the pattern dbc-xxxxxxxx-xxxx.
2517
2559
  :param gcp_managed_network_config: :class:`GcpManagedNetworkConfig` (optional)
2518
2560
  :param gke_config: :class:`GkeConfig` (optional)
2519
- :param is_no_public_ip_enabled: bool (optional)
2520
- Whether no public IP is enabled for the workspace.
2521
2561
  :param location: str (optional)
2522
- The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`.
2562
+ The Google Cloud region of the workspace data plane in your Google account (for example,
2563
+ `us-east4`).
2523
2564
  :param managed_services_customer_managed_key_id: str (optional)
2524
2565
  The ID of the workspace's managed services encryption key configuration object. This is used to help
2525
2566
  protect and control access to the workspace's notebooks, secrets, Databricks SQL queries, and query
2526
- history. The provided key configuration object property `use_cases` must contain `MANAGED_SERVICES`.
2567
+ history. The provided key configuration object property use_cases must contain MANAGED_SERVICES.
2527
2568
  :param network_id: str (optional)
2569
+ The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is
2570
+ required.
2528
2571
  :param pricing_tier: :class:`PricingTier` (optional)
2529
2572
  :param private_access_settings_id: str (optional)
2530
- ID of the workspace's private access settings object. Only used for PrivateLink. This ID must be
2531
- specified for customers using [AWS PrivateLink] for either front-end (user-to-workspace connection),
2532
- back-end (data plane to control plane connection), or both connection types.
2533
-
2534
- Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
2573
+ ID of the workspace's private access settings object. Only used for PrivateLink. You must specify
2574
+ this ID if you are using [AWS PrivateLink] for either front-end (user-to-workspace connection),
2575
+ back-end (data plane to control plane connection), or both connection types. Before configuring
2576
+ PrivateLink, read the [Databricks article about PrivateLink].",
2535
2577
 
2536
2578
  [AWS PrivateLink]: https://aws.amazon.com/privatelink/
2537
2579
  [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
2538
2580
  :param storage_configuration_id: str (optional)
2539
- The ID of the workspace's storage configuration object.
2581
+ ID of the workspace's storage configuration object.
2540
2582
  :param storage_customer_managed_key_id: str (optional)
2541
2583
  The ID of the workspace's storage encryption key configuration object. This is used to encrypt the
2542
2584
  workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes. The
2543
- provided key configuration object property `use_cases` must contain `STORAGE`.
2585
+ provided key configuration object property use_cases must contain STORAGE.
2586
+ :param workspace_name: str (optional)
2587
+ The human-readable name of the workspace.
2544
2588
 
2545
2589
  :returns:
2546
2590
  Long-running operation waiter for :class:`Workspace`.
@@ -2553,6 +2597,8 @@ class WorkspacesAPI:
2553
2597
  body["cloud"] = cloud
2554
2598
  if cloud_resource_container is not None:
2555
2599
  body["cloud_resource_container"] = cloud_resource_container.as_dict()
2600
+ if compute_mode is not None:
2601
+ body["compute_mode"] = compute_mode.value
2556
2602
  if credentials_id is not None:
2557
2603
  body["credentials_id"] = credentials_id
2558
2604
  if custom_tags is not None:
@@ -2563,8 +2609,6 @@ class WorkspacesAPI:
2563
2609
  body["gcp_managed_network_config"] = gcp_managed_network_config.as_dict()
2564
2610
  if gke_config is not None:
2565
2611
  body["gke_config"] = gke_config.as_dict()
2566
- if is_no_public_ip_enabled is not None:
2567
- body["is_no_public_ip_enabled"] = is_no_public_ip_enabled
2568
2612
  if location is not None:
2569
2613
  body["location"] = location
2570
2614
  if managed_services_customer_managed_key_id is not None:
@@ -2597,17 +2641,16 @@ class WorkspacesAPI:
2597
2641
 
2598
2642
  def create_and_wait(
2599
2643
  self,
2600
- workspace_name: str,
2601
2644
  *,
2602
2645
  aws_region: Optional[str] = None,
2603
2646
  cloud: Optional[str] = None,
2604
2647
  cloud_resource_container: Optional[CloudResourceContainer] = None,
2648
+ compute_mode: Optional[CustomerFacingComputeMode] = None,
2605
2649
  credentials_id: Optional[str] = None,
2606
2650
  custom_tags: Optional[Dict[str, str]] = None,
2607
2651
  deployment_name: Optional[str] = None,
2608
2652
  gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None,
2609
2653
  gke_config: Optional[GkeConfig] = None,
2610
- is_no_public_ip_enabled: Optional[bool] = None,
2611
2654
  location: Optional[str] = None,
2612
2655
  managed_services_customer_managed_key_id: Optional[str] = None,
2613
2656
  network_id: Optional[str] = None,
@@ -2615,18 +2658,19 @@ class WorkspacesAPI:
2615
2658
  private_access_settings_id: Optional[str] = None,
2616
2659
  storage_configuration_id: Optional[str] = None,
2617
2660
  storage_customer_managed_key_id: Optional[str] = None,
2661
+ workspace_name: Optional[str] = None,
2618
2662
  timeout=timedelta(minutes=20),
2619
2663
  ) -> Workspace:
2620
2664
  return self.create(
2621
2665
  aws_region=aws_region,
2622
2666
  cloud=cloud,
2623
2667
  cloud_resource_container=cloud_resource_container,
2668
+ compute_mode=compute_mode,
2624
2669
  credentials_id=credentials_id,
2625
2670
  custom_tags=custom_tags,
2626
2671
  deployment_name=deployment_name,
2627
2672
  gcp_managed_network_config=gcp_managed_network_config,
2628
2673
  gke_config=gke_config,
2629
- is_no_public_ip_enabled=is_no_public_ip_enabled,
2630
2674
  location=location,
2631
2675
  managed_services_customer_managed_key_id=managed_services_customer_managed_key_id,
2632
2676
  network_id=network_id,
@@ -2637,42 +2681,34 @@ class WorkspacesAPI:
2637
2681
  workspace_name=workspace_name,
2638
2682
  ).result(timeout=timeout)
2639
2683
 
2640
- def delete(self, workspace_id: int):
2641
- """Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate.
2642
- However, it might take a few minutes for all workspaces resources to be deleted, depending on the size
2643
- and number of workspace resources.
2644
-
2645
- This operation is available only if your account is on the E2 version of the platform or on a select
2646
- custom plan that allows multiple workspaces per account.
2684
+ def delete(self, workspace_id: int) -> Workspace:
2685
+ """Deletes a Databricks workspace, both specified by ID.
2647
2686
 
2648
2687
  :param workspace_id: int
2649
- Workspace ID.
2650
-
2651
2688
 
2689
+ :returns: :class:`Workspace`
2652
2690
  """
2653
2691
 
2654
2692
  headers = {
2655
2693
  "Accept": "application/json",
2656
2694
  }
2657
2695
 
2658
- self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", headers=headers)
2696
+ res = self._api.do(
2697
+ "DELETE", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", headers=headers
2698
+ )
2699
+ return Workspace.from_dict(res)
2659
2700
 
2660
2701
  def get(self, workspace_id: int) -> Workspace:
2661
2702
  """Gets information including status for a Databricks workspace, specified by ID. In the response, the
2662
2703
  `workspace_status` field indicates the current status. After initial workspace creation (which is
2663
2704
  asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace
2664
- becomes available when the status changes to `RUNNING`.
2665
-
2666
- For information about how to create a new workspace with this API **including error handling**, see
2667
- [Create a new workspace using the Account API].
2668
-
2669
- This operation is available only if your account is on the E2 version of the platform or on a select
2670
- custom plan that allows multiple workspaces per account.
2705
+ becomes available when the status changes to `RUNNING`. For information about how to create a new
2706
+ workspace with this API **including error handling**, see [Create a new workspace using the Account
2707
+ API].
2671
2708
 
2672
2709
  [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
2673
2710
 
2674
2711
  :param workspace_id: int
2675
- Workspace ID.
2676
2712
 
2677
2713
  :returns: :class:`Workspace`
2678
2714
  """
@@ -2687,10 +2723,7 @@ class WorkspacesAPI:
2687
2723
  return Workspace.from_dict(res)
2688
2724
 
2689
2725
  def list(self) -> Iterator[Workspace]:
2690
- """Gets a list of all workspaces associated with an account, specified by ID.
2691
-
2692
- This operation is available only if your account is on the E2 version of the platform or on a select
2693
- custom plan that allows multiple workspaces per account.
2726
+ """Lists Databricks workspaces for an account.
2694
2727
 
2695
2728
 
2696
2729
  :returns: Iterator over :class:`Workspace`
@@ -2704,202 +2737,58 @@ class WorkspacesAPI:
2704
2737
  return [Workspace.from_dict(v) for v in res]
2705
2738
 
2706
2739
  def update(
2707
- self,
2708
- workspace_id: int,
2709
- *,
2710
- aws_region: Optional[str] = None,
2711
- credentials_id: Optional[str] = None,
2712
- custom_tags: Optional[Dict[str, str]] = None,
2713
- managed_services_customer_managed_key_id: Optional[str] = None,
2714
- network_connectivity_config_id: Optional[str] = None,
2715
- network_id: Optional[str] = None,
2716
- private_access_settings_id: Optional[str] = None,
2717
- storage_configuration_id: Optional[str] = None,
2718
- storage_customer_managed_key_id: Optional[str] = None,
2740
+ self, workspace_id: int, customer_facing_workspace: Workspace, *, update_mask: Optional[str] = None
2719
2741
  ) -> Wait[Workspace]:
2720
- """Updates a workspace configuration for either a running workspace or a failed workspace. The elements
2721
- that can be updated varies between these two use cases.
2722
-
2723
- ### Update a failed workspace You can update a Databricks workspace configuration for failed workspace
2724
- deployment for some fields, but not all fields. For a failed workspace, this request supports updates
2725
- to the following fields only: - Credential configuration ID - Storage configuration ID - Network
2726
- configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a
2727
- failed workspace only, you can convert a workspace with Databricks-managed VPC to use a
2728
- customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC
2729
- to be a Databricks-managed VPC. You can update the network configuration for a failed or running
2730
- workspace to add PrivateLink support, though you must also add a private access settings object. - Key
2731
- configuration ID for managed services (control plane storage, such as notebook source and Databricks
2732
- SQL queries). Used only if you use customer-managed keys for managed services. - Key configuration ID
2733
- for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use
2734
- customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running
2735
- state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID
2736
- for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update
2737
- the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both
2738
- types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink
2739
- support on a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be
2740
- applied. - Network connectivity configuration ID to add serverless stable IP support. You can add or
2741
- update the network connectivity configuration ID to ensure the workspace uses the same set of stable
2742
- IP CIDR blocks to access your resources. You cannot remove a network connectivity configuration from
2743
- the workspace once attached, you can only switch to another one.
2744
-
2745
- After calling the `PATCH` operation to update the workspace configuration, make repeated `GET`
2746
- requests with the workspace ID and check the workspace status. The workspace is successful if the
2747
- status changes to `RUNNING`.
2748
-
2749
- For information about how to create a new workspace with this API **including error handling**, see
2750
- [Create a new workspace using the Account API].
2751
-
2752
- ### Update a running workspace You can update a Databricks workspace configuration for running
2753
- workspaces for some fields, but not all fields. For a running workspace, this request supports
2754
- updating the following fields only: - Credential configuration ID - Network configuration ID. Used
2755
- only if you already use a customer-managed VPC. You cannot convert a running workspace from a
2756
- Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in this
2757
- API for a failed or running workspace to add support for PrivateLink, although you also need to add a
2758
- private access settings object. - Key configuration ID for managed services (control plane storage,
2759
- such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data
2760
- with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK)
2761
- that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to
2762
- encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK
2763
- for managed services, adding this ID enables managed services encryption for new or updated data.
2764
- Existing managed services data that existed before adding the key remains not encrypted with the DEK
2765
- until it is modified. If the workspace already has customer-managed keys for managed services, this
2766
- request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key
2767
- configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this
2768
- only if the workspace does not already have a customer-managed key configuration for workspace
2769
- storage. - Private access settings ID to add PrivateLink support. You can add or update the private
2770
- access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of
2771
- connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on
2772
- a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be applied. -
2773
- Network connectivity configuration ID to add serverless stable IP support. You can add or update the
2774
- network connectivity configuration ID to ensure the workspace uses the same set of stable IP CIDR
2775
- blocks to access your resources. You cannot remove a network connectivity configuration from the
2776
- workspace once attached, you can only switch to another one.
2777
-
2778
- **Important**: To update a running workspace, your workspace must have no running compute resources
2779
- that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose
2780
- clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not
2781
- terminate all cluster instances in the workspace before calling this API, the request will fail.
2782
-
2783
- ### Wait until changes take effect. After calling the `PATCH` operation to update the workspace
2784
- configuration, make repeated `GET` requests with the workspace ID and check the workspace status and
2785
- the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes
2786
- `PROVISIONING` temporarily (typically under 20 minutes). If the workspace update is successful, the
2787
- workspace status changes to `RUNNING`. Note that you can also check the workspace status in the
2788
- [Account Console]. However, you cannot use or create clusters for another 20 minutes after that status
2789
- change. This results in a total of up to 40 minutes in which you cannot create clusters. If you create
2790
- or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could
2791
- cause other unexpected behavior. * For workspaces with a customer-managed VPC, the workspace status
2792
- stays at status `RUNNING` and the VPC change happens immediately. A change to the storage
2793
- customer-managed key configuration ID might take a few minutes to update, so continue to check the
2794
- workspace until you observe that it has been updated. If the update fails, the workspace might revert
2795
- silently to its original configuration. After the workspace has been updated, you cannot use or create
2796
- clusters for another 20 minutes. If you create or use clusters before this time interval elapses,
2797
- clusters do not launch successfully, fail, or could cause other unexpected behavior.
2798
-
2799
- If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes
2800
- to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to
2801
- the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20
2802
- minute wait.
2803
-
2804
- **Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment
2805
- types and subscription types. If you have questions about availability, contact your Databricks
2806
- representative.
2807
-
2808
- This operation is available only if your account is on the E2 version of the platform or on a select
2809
- custom plan that allows multiple workspaces per account.
2810
-
2811
- [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
2812
- [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
2742
+ """Updates a workspace.
2813
2743
 
2814
2744
  :param workspace_id: int
2815
- Workspace ID.
2816
- :param aws_region: str (optional)
2817
- The AWS region of the workspace's data plane (for example, `us-west-2`). This parameter is available
2818
- only for updating failed workspaces.
2819
- :param credentials_id: str (optional)
2820
- ID of the workspace's credential configuration object. This parameter is available for updating both
2821
- failed and running workspaces.
2822
- :param custom_tags: Dict[str,str] (optional)
2823
- The custom tags key-value pairing that is attached to this workspace. The key-value pair is a string
2824
- of utf-8 characters. The value can be an empty string, with maximum length of 255 characters. The
2825
- key can be of maximum length of 127 characters, and cannot be empty.
2826
- :param managed_services_customer_managed_key_id: str (optional)
2827
- The ID of the workspace's managed services encryption key configuration object. This parameter is
2828
- available only for updating failed workspaces.
2829
- :param network_connectivity_config_id: str (optional)
2830
- :param network_id: str (optional)
2831
- The ID of the workspace's network configuration object. Used only if you already use a
2832
- customer-managed VPC. For failed workspaces only, you can switch from a Databricks-managed VPC to a
2833
- customer-managed VPC by updating the workspace to add a network configuration ID.
2834
- :param private_access_settings_id: str (optional)
2835
- The ID of the workspace's private access settings configuration object. This parameter is available
2836
- only for updating failed workspaces.
2837
- :param storage_configuration_id: str (optional)
2838
- The ID of the workspace's storage configuration object. This parameter is available only for
2839
- updating failed workspaces.
2840
- :param storage_customer_managed_key_id: str (optional)
2841
- The ID of the key configuration object for workspace storage. This parameter is available for
2842
- updating both failed and running workspaces.
2745
+ A unique integer ID for the workspace
2746
+ :param customer_facing_workspace: :class:`Workspace`
2747
+ :param update_mask: str (optional)
2748
+ The field mask must be a single string, with multiple fields separated by commas (no spaces). The
2749
+ field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g.,
2750
+ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only
2751
+ the entire collection field can be specified. Field names must exactly match the resource field
2752
+ names.
2753
+
2754
+ A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
2755
+ fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API
2756
+ changes in the future.
2843
2757
 
2844
2758
  :returns:
2845
2759
  Long-running operation waiter for :class:`Workspace`.
2846
2760
  See :method:wait_get_workspace_running for more details.
2847
2761
  """
2848
- body = {}
2849
- if aws_region is not None:
2850
- body["aws_region"] = aws_region
2851
- if credentials_id is not None:
2852
- body["credentials_id"] = credentials_id
2853
- if custom_tags is not None:
2854
- body["custom_tags"] = custom_tags
2855
- if managed_services_customer_managed_key_id is not None:
2856
- body["managed_services_customer_managed_key_id"] = managed_services_customer_managed_key_id
2857
- if network_connectivity_config_id is not None:
2858
- body["network_connectivity_config_id"] = network_connectivity_config_id
2859
- if network_id is not None:
2860
- body["network_id"] = network_id
2861
- if private_access_settings_id is not None:
2862
- body["private_access_settings_id"] = private_access_settings_id
2863
- if storage_configuration_id is not None:
2864
- body["storage_configuration_id"] = storage_configuration_id
2865
- if storage_customer_managed_key_id is not None:
2866
- body["storage_customer_managed_key_id"] = storage_customer_managed_key_id
2762
+ body = customer_facing_workspace.as_dict()
2763
+ query = {}
2764
+ if update_mask is not None:
2765
+ query["update_mask"] = update_mask
2867
2766
  headers = {
2868
2767
  "Accept": "application/json",
2869
2768
  "Content-Type": "application/json",
2870
2769
  }
2871
2770
 
2872
2771
  op_response = self._api.do(
2873
- "PATCH", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", body=body, headers=headers
2772
+ "PATCH",
2773
+ f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}",
2774
+ query=query,
2775
+ body=body,
2776
+ headers=headers,
2874
2777
  )
2875
2778
  return Wait(
2876
- self.wait_get_workspace_running, response=UpdateResponse.from_dict(op_response), workspace_id=workspace_id
2779
+ self.wait_get_workspace_running,
2780
+ response=Workspace.from_dict(op_response),
2781
+ workspace_id=op_response["workspace_id"],
2877
2782
  )
2878
2783
 
2879
2784
  def update_and_wait(
2880
2785
  self,
2881
2786
  workspace_id: int,
2787
+ customer_facing_workspace: Workspace,
2882
2788
  *,
2883
- aws_region: Optional[str] = None,
2884
- credentials_id: Optional[str] = None,
2885
- custom_tags: Optional[Dict[str, str]] = None,
2886
- managed_services_customer_managed_key_id: Optional[str] = None,
2887
- network_connectivity_config_id: Optional[str] = None,
2888
- network_id: Optional[str] = None,
2889
- private_access_settings_id: Optional[str] = None,
2890
- storage_configuration_id: Optional[str] = None,
2891
- storage_customer_managed_key_id: Optional[str] = None,
2789
+ update_mask: Optional[str] = None,
2892
2790
  timeout=timedelta(minutes=20),
2893
2791
  ) -> Workspace:
2894
2792
  return self.update(
2895
- aws_region=aws_region,
2896
- credentials_id=credentials_id,
2897
- custom_tags=custom_tags,
2898
- managed_services_customer_managed_key_id=managed_services_customer_managed_key_id,
2899
- network_connectivity_config_id=network_connectivity_config_id,
2900
- network_id=network_id,
2901
- private_access_settings_id=private_access_settings_id,
2902
- storage_configuration_id=storage_configuration_id,
2903
- storage_customer_managed_key_id=storage_customer_managed_key_id,
2904
- workspace_id=workspace_id,
2793
+ customer_facing_workspace=customer_facing_workspace, update_mask=update_mask, workspace_id=workspace_id
2905
2794
  ).result(timeout=timeout)