pulumi-databricks 1.77.0a1762276204__py3-none-any.whl → 1.78.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pulumi_databricks/_inputs.py +117 -120
  2. pulumi_databricks/account_federation_policy.py +24 -0
  3. pulumi_databricks/account_network_policy.py +50 -0
  4. pulumi_databricks/alert_v2.py +72 -0
  5. pulumi_databricks/app.py +64 -0
  6. pulumi_databricks/apps_settings_custom_template.py +66 -0
  7. pulumi_databricks/catalog.py +7 -7
  8. pulumi_databricks/cluster_policy.py +120 -0
  9. pulumi_databricks/config/__init__.pyi +4 -0
  10. pulumi_databricks/config/vars.py +8 -0
  11. pulumi_databricks/database_synced_database_table.py +212 -0
  12. pulumi_databricks/external_location.py +131 -7
  13. pulumi_databricks/file.py +2 -2
  14. pulumi_databricks/get_aws_assume_role_policy.py +14 -14
  15. pulumi_databricks/get_aws_bucket_policy.py +10 -10
  16. pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py +10 -10
  17. pulumi_databricks/get_aws_unity_catalog_policy.py +10 -10
  18. pulumi_databricks/get_cluster.py +54 -0
  19. pulumi_databricks/get_current_config.py +4 -4
  20. pulumi_databricks/get_metastore.py +6 -6
  21. pulumi_databricks/get_notebook.py +20 -1
  22. pulumi_databricks/get_service_principals.py +64 -0
  23. pulumi_databricks/instance_profile.py +0 -182
  24. pulumi_databricks/metastore.py +81 -7
  25. pulumi_databricks/metastore_data_access.py +48 -0
  26. pulumi_databricks/mlflow_webhook.py +4 -4
  27. pulumi_databricks/mws_credentials.py +10 -10
  28. pulumi_databricks/mws_customer_managed_keys.py +0 -288
  29. pulumi_databricks/mws_log_delivery.py +146 -0
  30. pulumi_databricks/mws_storage_configurations.py +16 -16
  31. pulumi_databricks/mws_vpc_endpoint.py +56 -56
  32. pulumi_databricks/mws_workspaces.py +85 -51
  33. pulumi_databricks/notebook.py +49 -0
  34. pulumi_databricks/outputs.py +99 -76
  35. pulumi_databricks/permission_assignment.py +49 -0
  36. pulumi_databricks/permissions.py +6 -6
  37. pulumi_databricks/provider.py +36 -1
  38. pulumi_databricks/pulumi-plugin.json +1 -1
  39. pulumi_databricks/recipient.py +74 -0
  40. pulumi_databricks/registered_model.py +7 -7
  41. pulumi_databricks/schema.py +7 -7
  42. pulumi_databricks/service_principal_federation_policy.py +28 -0
  43. pulumi_databricks/sql_table.py +7 -7
  44. pulumi_databricks/volume.py +7 -7
  45. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/METADATA +1 -1
  46. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/RECORD +48 -48
  47. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/WHEEL +0 -0
  48. {pulumi_databricks-1.77.0a1762276204.dist-info → pulumi_databricks-1.78.0.dist-info}/top_level.txt +0 -0
@@ -78,6 +78,38 @@ def get_service_principals(application_ids: Optional[Sequence[_builtins.str]] =
78
78
 
79
79
  > This data source can be used with an account or workspace-level provider.
80
80
 
81
+ ## Example Usage
82
+
83
+ Adding all service principals of which display name contains `my-spn` to admin group
84
+
85
+ ```python
86
+ import pulumi
87
+ import pulumi_databricks as databricks
88
+ import pulumi_std as std
89
+
90
+ admins = databricks.get_group(display_name="admins")
91
+ spns = databricks.get_service_principals(display_name_contains="my-spn")
92
+ spn = {__key: databricks.get_service_principal(application_id=__value) for __key, __value in std.toset(input=spns.application_ids).result}
93
+ my_member_spn = []
94
+ for range in [{"key": k, "value": v} for [k, v] in enumerate(std.toset(input=spns.application_ids).result)]:
95
+ my_member_spn.append(databricks.GroupMember(f"my_member_spn-{range['key']}",
96
+ group_id=admins.id,
97
+ member_id=spn[range["value"]].sp_id))
98
+ ```
99
+
100
+ ## Related Resources
101
+
102
+ The following resources are used in the same context:
103
+
104
+ - End to end workspace management guide.
105
+ - get_current_user data to retrieve information about User or databricks_service_principal, that is calling Databricks REST API.
106
+ - Group to manage [Account-level](https://docs.databricks.com/aws/en/admin/users-groups/groups) or [Workspace-level](https://docs.databricks.com/aws/en/admin/users-groups/workspace-local-groups) groups.
107
+ - Group data to retrieve information about Group members, entitlements and instance profiles.
108
+ - GroupInstanceProfile to attach InstanceProfile (AWS) to databricks_group.
109
+ - GroupMember to attach users and groups as group members.
110
+ - Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace.
111
+ - databricks_service principal to manage service principals
112
+
81
113
 
82
114
  :param Sequence[_builtins.str] application_ids: List of `application_ids` of service principals. Individual service principal can be retrieved using ServicePrincipal data source
83
115
  :param _builtins.str display_name_contains: Only return ServicePrincipal display name that match the given name string
@@ -100,6 +132,38 @@ def get_service_principals_output(application_ids: Optional[pulumi.Input[Optiona
100
132
 
101
133
  > This data source can be used with an account or workspace-level provider.
102
134
 
135
+ ## Example Usage
136
+
137
+ Adding all service principals of which display name contains `my-spn` to admin group
138
+
139
+ ```python
140
+ import pulumi
141
+ import pulumi_databricks as databricks
142
+ import pulumi_std as std
143
+
144
+ admins = databricks.get_group(display_name="admins")
145
+ spns = databricks.get_service_principals(display_name_contains="my-spn")
146
+ spn = {__key: databricks.get_service_principal(application_id=__value) for __key, __value in std.toset(input=spns.application_ids).result}
147
+ my_member_spn = []
148
+ for range in [{"key": k, "value": v} for [k, v] in enumerate(std.toset(input=spns.application_ids).result)]:
149
+ my_member_spn.append(databricks.GroupMember(f"my_member_spn-{range['key']}",
150
+ group_id=admins.id,
151
+ member_id=spn[range["value"]].sp_id))
152
+ ```
153
+
154
+ ## Related Resources
155
+
156
+ The following resources are used in the same context:
157
+
158
+ - End to end workspace management guide.
159
+ - get_current_user data to retrieve information about User or databricks_service_principal, that is calling Databricks REST API.
160
+ - Group to manage [Account-level](https://docs.databricks.com/aws/en/admin/users-groups/groups) or [Workspace-level](https://docs.databricks.com/aws/en/admin/users-groups/workspace-local-groups) groups.
161
+ - Group data to retrieve information about Group members, entitlements and instance profiles.
162
+ - GroupInstanceProfile to attach InstanceProfile (AWS) to databricks_group.
163
+ - GroupMember to attach users and groups as group members.
164
+ - Permissions to manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace.
165
+ - databricks_service principal to manage service principals
166
+
103
167
 
104
168
  :param Sequence[_builtins.str] application_ids: List of `application_ids` of service principals. Individual service principal can be retrieved using ServicePrincipal data source
105
169
  :param _builtins.str display_name_contains: Only return ServicePrincipal display name that match the given name string
@@ -177,62 +177,6 @@ class InstanceProfile(pulumi.CustomResource):
177
177
 
178
178
  > Please switch to StorageCredential with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security.
179
179
 
180
- ```python
181
- import pulumi
182
- import pulumi_aws as aws
183
- import pulumi_databricks as databricks
184
-
185
- config = pulumi.Config()
186
- # Role that you've specified on https://accounts.cloud.databricks.com/#aws
187
- crossaccount_role_name = config.require("crossaccountRoleName")
188
- assume_role_for_ec2 = aws.iam.get_policy_document(statements=[{
189
- "effect": "Allow",
190
- "actions": ["sts:AssumeRole"],
191
- "principals": [{
192
- "identifiers": ["ec2.amazonaws.com"],
193
- "type": "Service",
194
- }],
195
- }])
196
- role_for_s3_access = aws.iam.Role("role_for_s3_access",
197
- name="shared-ec2-role-for-s3",
198
- description="Role for shared access",
199
- assume_role_policy=assume_role_for_ec2.json)
200
- pass_role_for_s3_access = aws.iam.get_policy_document_output(statements=[{
201
- "effect": "Allow",
202
- "actions": ["iam:PassRole"],
203
- "resources": [role_for_s3_access.arn],
204
- }])
205
- pass_role_for_s3_access_policy = aws.iam.Policy("pass_role_for_s3_access",
206
- name="shared-pass-role-for-s3-access",
207
- path="/",
208
- policy=pass_role_for_s3_access.json)
209
- cross_account = aws.iam.RolePolicyAttachment("cross_account",
210
- policy_arn=pass_role_for_s3_access_policy.arn,
211
- role=crossaccount_role_name)
212
- shared = aws.iam.InstanceProfile("shared",
213
- name="shared-instance-profile",
214
- role=role_for_s3_access.name)
215
- shared_instance_profile = databricks.InstanceProfile("shared", instance_profile_arn=shared.arn)
216
- latest = databricks.get_spark_version()
217
- smallest = databricks.get_node_type(local_disk=True)
218
- this = databricks.Cluster("this",
219
- cluster_name="Shared Autoscaling",
220
- spark_version=latest.id,
221
- node_type_id=smallest.id,
222
- autotermination_minutes=20,
223
- autoscale={
224
- "min_workers": 1,
225
- "max_workers": 50,
226
- },
227
- aws_attributes={
228
- "instance_profile_arn": shared_instance_profile.id,
229
- "availability": "SPOT",
230
- "zone_id": "us-east-1",
231
- "first_on_demand": 1,
232
- "spot_bid_price_percent": 100,
233
- })
234
- ```
235
-
236
180
  ## Usage with Cluster Policies
237
181
 
238
182
  It is advised to keep all common configurations in Cluster Policies to maintain control of the environments launched, so `Cluster` above could be replaced with `ClusterPolicy`:
@@ -267,41 +211,6 @@ class InstanceProfile(pulumi.CustomResource):
267
211
  instance_profile_id=this.id)
268
212
  ```
269
213
 
270
- ## Usage with Databricks SQL serverless
271
-
272
- When the instance profile ARN and its associated IAM role ARN don't match and the instance profile is intended for use with Databricks SQL serverless, the `iam_role_arn` parameter can be specified.
273
-
274
- ```python
275
- import pulumi
276
- import pulumi_aws as aws
277
- import pulumi_databricks as databricks
278
-
279
- sql_serverless_assume_role = aws.iam.get_policy_document(statements=[{
280
- "actions": ["sts:AssumeRole"],
281
- "principals": [{
282
- "type": "AWS",
283
- "identifiers": ["arn:aws:iam::790110701330:role/serverless-customer-resource-role"],
284
- }],
285
- "conditions": [{
286
- "test": "StringEquals",
287
- "variable": "sts:ExternalID",
288
- "values": [
289
- "databricks-serverless-<YOUR_WORKSPACE_ID1>",
290
- "databricks-serverless-<YOUR_WORKSPACE_ID2>",
291
- ],
292
- }],
293
- }])
294
- this = aws.iam.Role("this",
295
- name="my-databricks-sql-serverless-role",
296
- assume_role_policy=sql_serverless_assume_role.json)
297
- this_instance_profile = aws.iam.InstanceProfile("this",
298
- name="my-databricks-sql-serverless-instance-profile",
299
- role=this.name)
300
- this_instance_profile2 = databricks.InstanceProfile("this",
301
- instance_profile_arn=this_instance_profile.arn,
302
- iam_role_arn=this.arn)
303
- ```
304
-
305
214
  ## Import
306
215
 
307
216
  The resource instance profile can be imported using the ARN of it
@@ -344,62 +253,6 @@ class InstanceProfile(pulumi.CustomResource):
344
253
 
345
254
  > Please switch to StorageCredential with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security.
346
255
 
347
- ```python
348
- import pulumi
349
- import pulumi_aws as aws
350
- import pulumi_databricks as databricks
351
-
352
- config = pulumi.Config()
353
- # Role that you've specified on https://accounts.cloud.databricks.com/#aws
354
- crossaccount_role_name = config.require("crossaccountRoleName")
355
- assume_role_for_ec2 = aws.iam.get_policy_document(statements=[{
356
- "effect": "Allow",
357
- "actions": ["sts:AssumeRole"],
358
- "principals": [{
359
- "identifiers": ["ec2.amazonaws.com"],
360
- "type": "Service",
361
- }],
362
- }])
363
- role_for_s3_access = aws.iam.Role("role_for_s3_access",
364
- name="shared-ec2-role-for-s3",
365
- description="Role for shared access",
366
- assume_role_policy=assume_role_for_ec2.json)
367
- pass_role_for_s3_access = aws.iam.get_policy_document_output(statements=[{
368
- "effect": "Allow",
369
- "actions": ["iam:PassRole"],
370
- "resources": [role_for_s3_access.arn],
371
- }])
372
- pass_role_for_s3_access_policy = aws.iam.Policy("pass_role_for_s3_access",
373
- name="shared-pass-role-for-s3-access",
374
- path="/",
375
- policy=pass_role_for_s3_access.json)
376
- cross_account = aws.iam.RolePolicyAttachment("cross_account",
377
- policy_arn=pass_role_for_s3_access_policy.arn,
378
- role=crossaccount_role_name)
379
- shared = aws.iam.InstanceProfile("shared",
380
- name="shared-instance-profile",
381
- role=role_for_s3_access.name)
382
- shared_instance_profile = databricks.InstanceProfile("shared", instance_profile_arn=shared.arn)
383
- latest = databricks.get_spark_version()
384
- smallest = databricks.get_node_type(local_disk=True)
385
- this = databricks.Cluster("this",
386
- cluster_name="Shared Autoscaling",
387
- spark_version=latest.id,
388
- node_type_id=smallest.id,
389
- autotermination_minutes=20,
390
- autoscale={
391
- "min_workers": 1,
392
- "max_workers": 50,
393
- },
394
- aws_attributes={
395
- "instance_profile_arn": shared_instance_profile.id,
396
- "availability": "SPOT",
397
- "zone_id": "us-east-1",
398
- "first_on_demand": 1,
399
- "spot_bid_price_percent": 100,
400
- })
401
- ```
402
-
403
256
  ## Usage with Cluster Policies
404
257
 
405
258
  It is advised to keep all common configurations in Cluster Policies to maintain control of the environments launched, so `Cluster` above could be replaced with `ClusterPolicy`:
@@ -434,41 +287,6 @@ class InstanceProfile(pulumi.CustomResource):
434
287
  instance_profile_id=this.id)
435
288
  ```
436
289
 
437
- ## Usage with Databricks SQL serverless
438
-
439
- When the instance profile ARN and its associated IAM role ARN don't match and the instance profile is intended for use with Databricks SQL serverless, the `iam_role_arn` parameter can be specified.
440
-
441
- ```python
442
- import pulumi
443
- import pulumi_aws as aws
444
- import pulumi_databricks as databricks
445
-
446
- sql_serverless_assume_role = aws.iam.get_policy_document(statements=[{
447
- "actions": ["sts:AssumeRole"],
448
- "principals": [{
449
- "type": "AWS",
450
- "identifiers": ["arn:aws:iam::790110701330:role/serverless-customer-resource-role"],
451
- }],
452
- "conditions": [{
453
- "test": "StringEquals",
454
- "variable": "sts:ExternalID",
455
- "values": [
456
- "databricks-serverless-<YOUR_WORKSPACE_ID1>",
457
- "databricks-serverless-<YOUR_WORKSPACE_ID2>",
458
- ],
459
- }],
460
- }])
461
- this = aws.iam.Role("this",
462
- name="my-databricks-sql-serverless-role",
463
- assume_role_policy=sql_serverless_assume_role.json)
464
- this_instance_profile = aws.iam.InstanceProfile("this",
465
- name="my-databricks-sql-serverless-instance-profile",
466
- role=this.name)
467
- this_instance_profile2 = databricks.InstanceProfile("this",
468
- instance_profile_arn=this_instance_profile.arn,
469
- iam_role_arn=this.arn)
470
- ```
471
-
472
290
  ## Import
473
291
 
474
292
  The resource instance profile can be imported using the ARN of it
@@ -45,7 +45,7 @@ class MetastoreArgs:
45
45
  :param pulumi.Input[_builtins.str] name: Name of metastore.
46
46
  :param pulumi.Input[_builtins.str] owner: Username/groupname/sp application_id of the metastore owner.
47
47
  :param pulumi.Input[_builtins.str] region: The region of the metastore
48
- :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
48
+ :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
49
49
  """
50
50
  if cloud is not None:
51
51
  pulumi.set(__self__, "cloud", cloud)
@@ -224,7 +224,7 @@ class MetastoreArgs:
224
224
  @pulumi.getter(name="storageRoot")
225
225
  def storage_root(self) -> Optional[pulumi.Input[_builtins.str]]:
226
226
  """
227
- Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
227
+ Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
228
228
  """
229
229
  return pulumi.get(self, "storage_root")
230
230
 
@@ -289,7 +289,7 @@ class _MetastoreState:
289
289
  :param pulumi.Input[_builtins.str] name: Name of metastore.
290
290
  :param pulumi.Input[_builtins.str] owner: Username/groupname/sp application_id of the metastore owner.
291
291
  :param pulumi.Input[_builtins.str] region: The region of the metastore
292
- :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
292
+ :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
293
293
  """
294
294
  if cloud is not None:
295
295
  pulumi.set(__self__, "cloud", cloud)
@@ -468,7 +468,7 @@ class _MetastoreState:
468
468
  @pulumi.getter(name="storageRoot")
469
469
  def storage_root(self) -> Optional[pulumi.Input[_builtins.str]]:
470
470
  """
471
- Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
471
+ Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
472
472
  """
473
473
  return pulumi.get(self, "storage_root")
474
474
 
@@ -558,6 +558,43 @@ class Metastore(pulumi.CustomResource):
558
558
 
559
559
  For Azure
560
560
 
561
+ ```python
562
+ import pulumi
563
+ import pulumi_databricks as databricks
564
+ import pulumi_std as std
565
+
566
+ this = databricks.Metastore("this",
567
+ name="primary",
568
+ storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
569
+ args=[
570
+ unity_catalog["name"],
571
+ unity_catalog_azurerm_storage_account["name"],
572
+ ]).result,
573
+ owner="uc admins",
574
+ region="eastus",
575
+ force_destroy=True)
576
+ this_metastore_assignment = databricks.MetastoreAssignment("this",
577
+ metastore_id=this.id,
578
+ workspace_id=workspace_id)
579
+ ```
580
+
581
+ For GCP
582
+
583
+ ```python
584
+ import pulumi
585
+ import pulumi_databricks as databricks
586
+
587
+ this = databricks.Metastore("this",
588
+ name="primary",
589
+ storage_root=f"gs://{unity_metastore['name']}",
590
+ owner="uc admins",
591
+ region=us_east1,
592
+ force_destroy=True)
593
+ this_metastore_assignment = databricks.MetastoreAssignment("this",
594
+ metastore_id=this.id,
595
+ workspace_id=workspace_id)
596
+ ```
597
+
561
598
  ## Import
562
599
 
563
600
  This resource can be imported by ID:
@@ -589,7 +626,7 @@ class Metastore(pulumi.CustomResource):
589
626
  :param pulumi.Input[_builtins.str] name: Name of metastore.
590
627
  :param pulumi.Input[_builtins.str] owner: Username/groupname/sp application_id of the metastore owner.
591
628
  :param pulumi.Input[_builtins.str] region: The region of the metastore
592
- :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
629
+ :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
593
630
  """
594
631
  ...
595
632
  @overload
@@ -627,6 +664,43 @@ class Metastore(pulumi.CustomResource):
627
664
 
628
665
  For Azure
629
666
 
667
+ ```python
668
+ import pulumi
669
+ import pulumi_databricks as databricks
670
+ import pulumi_std as std
671
+
672
+ this = databricks.Metastore("this",
673
+ name="primary",
674
+ storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
675
+ args=[
676
+ unity_catalog["name"],
677
+ unity_catalog_azurerm_storage_account["name"],
678
+ ]).result,
679
+ owner="uc admins",
680
+ region="eastus",
681
+ force_destroy=True)
682
+ this_metastore_assignment = databricks.MetastoreAssignment("this",
683
+ metastore_id=this.id,
684
+ workspace_id=workspace_id)
685
+ ```
686
+
687
+ For GCP
688
+
689
+ ```python
690
+ import pulumi
691
+ import pulumi_databricks as databricks
692
+
693
+ this = databricks.Metastore("this",
694
+ name="primary",
695
+ storage_root=f"gs://{unity_metastore['name']}",
696
+ owner="uc admins",
697
+ region=us_east1,
698
+ force_destroy=True)
699
+ this_metastore_assignment = databricks.MetastoreAssignment("this",
700
+ metastore_id=this.id,
701
+ workspace_id=workspace_id)
702
+ ```
703
+
630
704
  ## Import
631
705
 
632
706
  This resource can be imported by ID:
@@ -748,7 +822,7 @@ class Metastore(pulumi.CustomResource):
748
822
  :param pulumi.Input[_builtins.str] name: Name of metastore.
749
823
  :param pulumi.Input[_builtins.str] owner: Username/groupname/sp application_id of the metastore owner.
750
824
  :param pulumi.Input[_builtins.str] region: The region of the metastore
751
- :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
825
+ :param pulumi.Input[_builtins.str] storage_root: Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
752
826
  """
753
827
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
754
828
 
@@ -863,7 +937,7 @@ class Metastore(pulumi.CustomResource):
863
937
  @pulumi.getter(name="storageRoot")
864
938
  def storage_root(self) -> pulumi.Output[Optional[_builtins.str]]:
865
939
  """
866
- Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
940
+ Path on cloud storage account, where managed `Table` are stored. If the URL contains special characters, such as space, `&`, etc., they should be percent-encoded (space > `%20`, etc.). Change forces creation of a new resource. If no `storage_root` is defined for the metastore, each catalog must have a `storage_root` defined.
867
941
  """
868
942
  return pulumi.get(self, "storage_root")
869
943
 
@@ -479,6 +479,30 @@ class MetastoreDataAccess(pulumi.CustomResource):
479
479
 
480
480
  For Azure using managed identity as credential (recommended)
481
481
 
482
+ ```python
483
+ import pulumi
484
+ import pulumi_databricks as databricks
485
+ import pulumi_std as std
486
+
487
+ this = databricks.Metastore("this",
488
+ name="primary",
489
+ storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
490
+ args=[
491
+ unity_catalog["name"],
492
+ unity_catalog_azurerm_storage_account["name"],
493
+ ]).result,
494
+ owner="uc admins",
495
+ region="eastus",
496
+ force_destroy=True)
497
+ this_metastore_data_access = databricks.MetastoreDataAccess("this",
498
+ metastore_id=this.id,
499
+ name="mi_dac",
500
+ azure_managed_identity={
501
+ "access_connector_id": access_connector_id,
502
+ },
503
+ is_default=True)
504
+ ```
505
+
482
506
  ## Import
483
507
 
484
508
  This resource can be imported by combination of metastore id and the data access name.
@@ -541,6 +565,30 @@ class MetastoreDataAccess(pulumi.CustomResource):
541
565
 
542
566
  For Azure using managed identity as credential (recommended)
543
567
 
568
+ ```python
569
+ import pulumi
570
+ import pulumi_databricks as databricks
571
+ import pulumi_std as std
572
+
573
+ this = databricks.Metastore("this",
574
+ name="primary",
575
+ storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
576
+ args=[
577
+ unity_catalog["name"],
578
+ unity_catalog_azurerm_storage_account["name"],
579
+ ]).result,
580
+ owner="uc admins",
581
+ region="eastus",
582
+ force_destroy=True)
583
+ this_metastore_data_access = databricks.MetastoreDataAccess("this",
584
+ metastore_id=this.id,
585
+ name="mi_dac",
586
+ azure_managed_identity={
587
+ "access_connector_id": access_connector_id,
588
+ },
589
+ is_default=True)
590
+ ```
591
+
544
592
  ## Import
545
593
 
546
594
  This resource can be imported by combination of metastore id and the data access name.
@@ -252,9 +252,9 @@ class MlflowWebhook(pulumi.CustomResource):
252
252
  language="PYTHON",
253
253
  content_base64=std.base64encode(input=\"\"\"import json
254
254
 
255
- event_message = dbutils.widgets.get("event_message")
255
+ event_message = dbutils.widgets.get(\\"event_message\\")
256
256
  event_message_dict = json.loads(event_message)
257
- print(f"event data={event_message_dict}")
257
+ print(f\\"event data={event_message_dict}\\")
258
258
  \"\"\").result)
259
259
  this_job = databricks.Job("this",
260
260
  name=f"Pulumi MLflowWebhook Demo ({me.alphanumeric})",
@@ -354,9 +354,9 @@ class MlflowWebhook(pulumi.CustomResource):
354
354
  language="PYTHON",
355
355
  content_base64=std.base64encode(input=\"\"\"import json
356
356
 
357
- event_message = dbutils.widgets.get("event_message")
357
+ event_message = dbutils.widgets.get(\\"event_message\\")
358
358
  event_message_dict = json.loads(event_message)
359
- print(f"event data={event_message_dict}")
359
+ print(f\\"event data={event_message_dict}\\")
360
360
  \"\"\").result)
361
361
  this_job = databricks.Job("this",
362
362
  name=f"Pulumi MLflowWebhook Demo ({me.alphanumeric})",
@@ -249,18 +249,18 @@ class MwsCredentials(pulumi.CustomResource):
249
249
  # Names of created resources will be prefixed with this value
250
250
  prefix = config.require_object("prefix")
251
251
  this = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)
252
- cross_account_role = aws.iam.Role("cross_account_role",
253
- name=f"{prefix}-crossaccount",
252
+ cross_account_role = aws.index.IamRole("cross_account_role",
253
+ name=f{prefix}-crossaccount,
254
254
  assume_role_policy=this.json,
255
255
  tags=tags)
256
256
  this_get_aws_cross_account_policy = databricks.get_aws_cross_account_policy()
257
- this_role_policy = aws.iam.RolePolicy("this",
258
- name=f"{prefix}-policy",
257
+ this_iam_role_policy = aws.index.IamRolePolicy("this",
258
+ name=f{prefix}-policy,
259
259
  role=cross_account_role.id,
260
260
  policy=this_get_aws_cross_account_policy.json)
261
261
  this_mws_credentials = databricks.MwsCredentials("this",
262
262
  credentials_name=f"{prefix}-creds",
263
- role_arn=cross_account_role.arn)
263
+ role_arn=cross_account_role["arn"])
264
264
  ```
265
265
 
266
266
  ## Related Resources
@@ -324,18 +324,18 @@ class MwsCredentials(pulumi.CustomResource):
324
324
  # Names of created resources will be prefixed with this value
325
325
  prefix = config.require_object("prefix")
326
326
  this = databricks.get_aws_assume_role_policy(external_id=databricks_account_id)
327
- cross_account_role = aws.iam.Role("cross_account_role",
328
- name=f"{prefix}-crossaccount",
327
+ cross_account_role = aws.index.IamRole("cross_account_role",
328
+ name=f{prefix}-crossaccount,
329
329
  assume_role_policy=this.json,
330
330
  tags=tags)
331
331
  this_get_aws_cross_account_policy = databricks.get_aws_cross_account_policy()
332
- this_role_policy = aws.iam.RolePolicy("this",
333
- name=f"{prefix}-policy",
332
+ this_iam_role_policy = aws.index.IamRolePolicy("this",
333
+ name=f{prefix}-policy,
334
334
  role=cross_account_role.id,
335
335
  policy=this_get_aws_cross_account_policy.json)
336
336
  this_mws_credentials = databricks.MwsCredentials("this",
337
337
  credentials_name=f"{prefix}-creds",
338
- role_arn=cross_account_role.arn)
338
+ role_arn=cross_account_role["arn"])
339
339
  ```
340
340
 
341
341
  ## Related Resources