pulumi-databricks 1.78.0a1762407761__py3-none-any.whl → 1.79.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_databricks/__init__.py +23 -0
- pulumi_databricks/_inputs.py +1630 -181
- pulumi_databricks/access_control_rule_set.py +81 -0
- pulumi_databricks/account_federation_policy.py +24 -0
- pulumi_databricks/account_network_policy.py +50 -0
- pulumi_databricks/account_setting_v2.py +181 -12
- pulumi_databricks/alert_v2.py +74 -2
- pulumi_databricks/app.py +71 -7
- pulumi_databricks/apps_settings_custom_template.py +102 -0
- pulumi_databricks/catalog.py +8 -8
- pulumi_databricks/cluster_policy.py +120 -0
- pulumi_databricks/config/__init__.pyi +4 -0
- pulumi_databricks/config/vars.py +8 -0
- pulumi_databricks/credential.py +7 -7
- pulumi_databricks/dashboard.py +94 -0
- pulumi_databricks/data_quality_monitor.py +2 -2
- pulumi_databricks/data_quality_refresh.py +78 -2
- pulumi_databricks/database_synced_database_table.py +212 -0
- pulumi_databricks/external_location.py +131 -7
- pulumi_databricks/feature_engineering_feature.py +52 -19
- pulumi_databricks/feature_engineering_kafka_config.py +463 -0
- pulumi_databricks/feature_engineering_materialized_feature.py +47 -0
- pulumi_databricks/file.py +2 -2
- pulumi_databricks/get_account_setting_v2.py +16 -16
- pulumi_databricks/get_alert_v2.py +2 -2
- pulumi_databricks/get_alerts_v2.py +2 -2
- pulumi_databricks/get_aws_assume_role_policy.py +14 -14
- pulumi_databricks/get_aws_bucket_policy.py +10 -10
- pulumi_databricks/get_aws_unity_catalog_assume_role_policy.py +10 -10
- pulumi_databricks/get_aws_unity_catalog_policy.py +10 -10
- pulumi_databricks/get_cluster.py +54 -0
- pulumi_databricks/get_current_config.py +4 -4
- pulumi_databricks/get_dashboards.py +32 -0
- pulumi_databricks/get_data_quality_monitor.py +2 -2
- pulumi_databricks/get_data_quality_monitors.py +2 -2
- pulumi_databricks/get_data_quality_refresh.py +2 -2
- pulumi_databricks/get_data_quality_refreshes.py +2 -2
- pulumi_databricks/get_feature_engineering_feature.py +12 -1
- pulumi_databricks/get_feature_engineering_kafka_config.py +182 -0
- pulumi_databricks/get_feature_engineering_kafka_configs.py +103 -0
- pulumi_databricks/get_feature_engineering_materialized_feature.py +16 -2
- pulumi_databricks/get_metastore.py +6 -6
- pulumi_databricks/get_notebook.py +20 -1
- pulumi_databricks/get_policy_info.py +36 -2
- pulumi_databricks/get_policy_infos.py +34 -2
- pulumi_databricks/get_service_principals.py +93 -7
- pulumi_databricks/get_spark_version.py +2 -2
- pulumi_databricks/get_tag_policies.py +2 -2
- pulumi_databricks/get_tag_policy.py +2 -2
- pulumi_databricks/get_users.py +194 -0
- pulumi_databricks/get_workspace_entity_tag_assignment.py +180 -0
- pulumi_databricks/get_workspace_entity_tag_assignments.py +171 -0
- pulumi_databricks/get_workspace_setting_v2.py +16 -16
- pulumi_databricks/instance_profile.py +0 -182
- pulumi_databricks/lakehouse_monitor.py +2 -2
- pulumi_databricks/metastore.py +81 -7
- pulumi_databricks/metastore_data_access.py +48 -0
- pulumi_databricks/mlflow_webhook.py +4 -4
- pulumi_databricks/mws_credentials.py +10 -10
- pulumi_databricks/mws_customer_managed_keys.py +0 -288
- pulumi_databricks/mws_log_delivery.py +146 -0
- pulumi_databricks/mws_storage_configurations.py +16 -16
- pulumi_databricks/mws_vpc_endpoint.py +56 -56
- pulumi_databricks/mws_workspaces.py +115 -55
- pulumi_databricks/notebook.py +49 -0
- pulumi_databricks/outputs.py +2017 -240
- pulumi_databricks/permission_assignment.py +49 -0
- pulumi_databricks/permissions.py +6 -6
- pulumi_databricks/pipeline.py +7 -7
- pulumi_databricks/policy_info.py +122 -2
- pulumi_databricks/provider.py +36 -1
- pulumi_databricks/pulumi-plugin.json +1 -1
- pulumi_databricks/recipient.py +74 -0
- pulumi_databricks/registered_model.py +7 -7
- pulumi_databricks/rfa_access_request_destinations.py +86 -19
- pulumi_databricks/schema.py +7 -7
- pulumi_databricks/service_principal_federation_policy.py +28 -0
- pulumi_databricks/sql_table.py +7 -7
- pulumi_databricks/tag_policy.py +2 -2
- pulumi_databricks/volume.py +7 -7
- pulumi_databricks/workspace_entity_tag_assignment.py +375 -0
- pulumi_databricks/workspace_setting_v2.py +181 -12
- {pulumi_databricks-1.78.0a1762407761.dist-info → pulumi_databricks-1.79.0.dist-info}/METADATA +1 -1
- {pulumi_databricks-1.78.0a1762407761.dist-info → pulumi_databricks-1.79.0.dist-info}/RECORD +86 -79
- {pulumi_databricks-1.78.0a1762407761.dist-info → pulumi_databricks-1.79.0.dist-info}/WHEEL +0 -0
- {pulumi_databricks-1.78.0a1762407761.dist-info → pulumi_databricks-1.79.0.dist-info}/top_level.txt +0 -0
|
@@ -13,6 +13,8 @@ if sys.version_info >= (3, 11):
|
|
|
13
13
|
else:
|
|
14
14
|
from typing_extensions import NotRequired, TypedDict, TypeAlias
|
|
15
15
|
from . import _utilities
|
|
16
|
+
from . import outputs
|
|
17
|
+
from ._inputs import *
|
|
16
18
|
|
|
17
19
|
__all__ = ['PermissionAssignmentArgs', 'PermissionAssignment']
|
|
18
20
|
|
|
@@ -22,6 +24,7 @@ class PermissionAssignmentArgs:
|
|
|
22
24
|
permissions: pulumi.Input[Sequence[pulumi.Input[_builtins.str]]],
|
|
23
25
|
group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
24
26
|
principal_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
27
|
+
provider_config: Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']] = None,
|
|
25
28
|
service_principal_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
26
29
|
user_name: Optional[pulumi.Input[_builtins.str]] = None):
|
|
27
30
|
"""
|
|
@@ -31,6 +34,7 @@ class PermissionAssignmentArgs:
|
|
|
31
34
|
* `"ADMIN"` - Adds principal to the workspace `admins` group. This gives workspace admin privileges to manage users and groups, workspace configurations, and more.
|
|
32
35
|
:param pulumi.Input[_builtins.str] group_name: the group name to assign to a workspace.
|
|
33
36
|
:param pulumi.Input[_builtins.str] principal_id: Databricks ID of the user, service principal, or group. The principal ID can be retrieved using the account-level SCIM API, or using databricks_user, ServicePrincipal or Group data sources with account API (and has to be an account admin). A more sensible approach is to retrieve the list of `principal_id` as outputs from another Pulumi stack.
|
|
37
|
+
:param pulumi.Input['PermissionAssignmentProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
|
|
34
38
|
:param pulumi.Input[_builtins.str] service_principal_name: the application ID of service principal to assign to a workspace.
|
|
35
39
|
:param pulumi.Input[_builtins.str] user_name: the user name (email) to assign to a workspace.
|
|
36
40
|
"""
|
|
@@ -39,6 +43,8 @@ class PermissionAssignmentArgs:
|
|
|
39
43
|
pulumi.set(__self__, "group_name", group_name)
|
|
40
44
|
if principal_id is not None:
|
|
41
45
|
pulumi.set(__self__, "principal_id", principal_id)
|
|
46
|
+
if provider_config is not None:
|
|
47
|
+
pulumi.set(__self__, "provider_config", provider_config)
|
|
42
48
|
if service_principal_name is not None:
|
|
43
49
|
pulumi.set(__self__, "service_principal_name", service_principal_name)
|
|
44
50
|
if user_name is not None:
|
|
@@ -82,6 +88,18 @@ class PermissionAssignmentArgs:
|
|
|
82
88
|
def principal_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
83
89
|
pulumi.set(self, "principal_id", value)
|
|
84
90
|
|
|
91
|
+
@_builtins.property
|
|
92
|
+
@pulumi.getter(name="providerConfig")
|
|
93
|
+
def provider_config(self) -> Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']]:
|
|
94
|
+
"""
|
|
95
|
+
Configure the provider for management through account provider. This block consists of the following fields:
|
|
96
|
+
"""
|
|
97
|
+
return pulumi.get(self, "provider_config")
|
|
98
|
+
|
|
99
|
+
@provider_config.setter
|
|
100
|
+
def provider_config(self, value: Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']]):
|
|
101
|
+
pulumi.set(self, "provider_config", value)
|
|
102
|
+
|
|
85
103
|
@_builtins.property
|
|
86
104
|
@pulumi.getter(name="servicePrincipalName")
|
|
87
105
|
def service_principal_name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
@@ -114,6 +132,7 @@ class _PermissionAssignmentState:
|
|
|
114
132
|
group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
115
133
|
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
116
134
|
principal_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
135
|
+
provider_config: Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']] = None,
|
|
117
136
|
service_principal_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
118
137
|
user_name: Optional[pulumi.Input[_builtins.str]] = None):
|
|
119
138
|
"""
|
|
@@ -124,6 +143,7 @@ class _PermissionAssignmentState:
|
|
|
124
143
|
* `"USER"` - Adds principal to the workspace `users` group. This gives basic workspace access.
|
|
125
144
|
* `"ADMIN"` - Adds principal to the workspace `admins` group. This gives workspace admin privileges to manage users and groups, workspace configurations, and more.
|
|
126
145
|
:param pulumi.Input[_builtins.str] principal_id: Databricks ID of the user, service principal, or group. The principal ID can be retrieved using the account-level SCIM API, or using databricks_user, ServicePrincipal or Group data sources with account API (and has to be an account admin). A more sensible approach is to retrieve the list of `principal_id` as outputs from another Pulumi stack.
|
|
146
|
+
:param pulumi.Input['PermissionAssignmentProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
|
|
127
147
|
:param pulumi.Input[_builtins.str] service_principal_name: the application ID of service principal to assign to a workspace.
|
|
128
148
|
:param pulumi.Input[_builtins.str] user_name: the user name (email) to assign to a workspace.
|
|
129
149
|
"""
|
|
@@ -135,6 +155,8 @@ class _PermissionAssignmentState:
|
|
|
135
155
|
pulumi.set(__self__, "permissions", permissions)
|
|
136
156
|
if principal_id is not None:
|
|
137
157
|
pulumi.set(__self__, "principal_id", principal_id)
|
|
158
|
+
if provider_config is not None:
|
|
159
|
+
pulumi.set(__self__, "provider_config", provider_config)
|
|
138
160
|
if service_principal_name is not None:
|
|
139
161
|
pulumi.set(__self__, "service_principal_name", service_principal_name)
|
|
140
162
|
if user_name is not None:
|
|
@@ -190,6 +212,18 @@ class _PermissionAssignmentState:
|
|
|
190
212
|
def principal_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
191
213
|
pulumi.set(self, "principal_id", value)
|
|
192
214
|
|
|
215
|
+
@_builtins.property
|
|
216
|
+
@pulumi.getter(name="providerConfig")
|
|
217
|
+
def provider_config(self) -> Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']]:
|
|
218
|
+
"""
|
|
219
|
+
Configure the provider for management through account provider. This block consists of the following fields:
|
|
220
|
+
"""
|
|
221
|
+
return pulumi.get(self, "provider_config")
|
|
222
|
+
|
|
223
|
+
@provider_config.setter
|
|
224
|
+
def provider_config(self, value: Optional[pulumi.Input['PermissionAssignmentProviderConfigArgs']]):
|
|
225
|
+
pulumi.set(self, "provider_config", value)
|
|
226
|
+
|
|
193
227
|
@_builtins.property
|
|
194
228
|
@pulumi.getter(name="servicePrincipalName")
|
|
195
229
|
def service_principal_name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
@@ -224,6 +258,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
224
258
|
group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
225
259
|
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
226
260
|
principal_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
261
|
+
provider_config: Optional[pulumi.Input[Union['PermissionAssignmentProviderConfigArgs', 'PermissionAssignmentProviderConfigArgsDict']]] = None,
|
|
227
262
|
service_principal_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
228
263
|
user_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
229
264
|
__props__=None):
|
|
@@ -351,6 +386,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
351
386
|
* `"USER"` - Adds principal to the workspace `users` group. This gives basic workspace access.
|
|
352
387
|
* `"ADMIN"` - Adds principal to the workspace `admins` group. This gives workspace admin privileges to manage users and groups, workspace configurations, and more.
|
|
353
388
|
:param pulumi.Input[_builtins.str] principal_id: Databricks ID of the user, service principal, or group. The principal ID can be retrieved using the account-level SCIM API, or using databricks_user, ServicePrincipal or Group data sources with account API (and has to be an account admin). A more sensible approach is to retrieve the list of `principal_id` as outputs from another Pulumi stack.
|
|
389
|
+
:param pulumi.Input[Union['PermissionAssignmentProviderConfigArgs', 'PermissionAssignmentProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
|
|
354
390
|
:param pulumi.Input[_builtins.str] service_principal_name: the application ID of service principal to assign to a workspace.
|
|
355
391
|
:param pulumi.Input[_builtins.str] user_name: the user name (email) to assign to a workspace.
|
|
356
392
|
"""
|
|
@@ -495,6 +531,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
495
531
|
group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
496
532
|
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
497
533
|
principal_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
534
|
+
provider_config: Optional[pulumi.Input[Union['PermissionAssignmentProviderConfigArgs', 'PermissionAssignmentProviderConfigArgsDict']]] = None,
|
|
498
535
|
service_principal_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
499
536
|
user_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
500
537
|
__props__=None):
|
|
@@ -511,6 +548,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
511
548
|
raise TypeError("Missing required property 'permissions'")
|
|
512
549
|
__props__.__dict__["permissions"] = permissions
|
|
513
550
|
__props__.__dict__["principal_id"] = principal_id
|
|
551
|
+
__props__.__dict__["provider_config"] = provider_config
|
|
514
552
|
__props__.__dict__["service_principal_name"] = service_principal_name
|
|
515
553
|
__props__.__dict__["user_name"] = user_name
|
|
516
554
|
__props__.__dict__["display_name"] = None
|
|
@@ -528,6 +566,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
528
566
|
group_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
529
567
|
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
530
568
|
principal_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
569
|
+
provider_config: Optional[pulumi.Input[Union['PermissionAssignmentProviderConfigArgs', 'PermissionAssignmentProviderConfigArgsDict']]] = None,
|
|
531
570
|
service_principal_name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
532
571
|
user_name: Optional[pulumi.Input[_builtins.str]] = None) -> 'PermissionAssignment':
|
|
533
572
|
"""
|
|
@@ -543,6 +582,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
543
582
|
* `"USER"` - Adds principal to the workspace `users` group. This gives basic workspace access.
|
|
544
583
|
* `"ADMIN"` - Adds principal to the workspace `admins` group. This gives workspace admin privileges to manage users and groups, workspace configurations, and more.
|
|
545
584
|
:param pulumi.Input[_builtins.str] principal_id: Databricks ID of the user, service principal, or group. The principal ID can be retrieved using the account-level SCIM API, or using databricks_user, ServicePrincipal or Group data sources with account API (and has to be an account admin). A more sensible approach is to retrieve the list of `principal_id` as outputs from another Pulumi stack.
|
|
585
|
+
:param pulumi.Input[Union['PermissionAssignmentProviderConfigArgs', 'PermissionAssignmentProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
|
|
546
586
|
:param pulumi.Input[_builtins.str] service_principal_name: the application ID of service principal to assign to a workspace.
|
|
547
587
|
:param pulumi.Input[_builtins.str] user_name: the user name (email) to assign to a workspace.
|
|
548
588
|
"""
|
|
@@ -554,6 +594,7 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
554
594
|
__props__.__dict__["group_name"] = group_name
|
|
555
595
|
__props__.__dict__["permissions"] = permissions
|
|
556
596
|
__props__.__dict__["principal_id"] = principal_id
|
|
597
|
+
__props__.__dict__["provider_config"] = provider_config
|
|
557
598
|
__props__.__dict__["service_principal_name"] = service_principal_name
|
|
558
599
|
__props__.__dict__["user_name"] = user_name
|
|
559
600
|
return PermissionAssignment(resource_name, opts=opts, __props__=__props__)
|
|
@@ -592,6 +633,14 @@ class PermissionAssignment(pulumi.CustomResource):
|
|
|
592
633
|
"""
|
|
593
634
|
return pulumi.get(self, "principal_id")
|
|
594
635
|
|
|
636
|
+
@_builtins.property
|
|
637
|
+
@pulumi.getter(name="providerConfig")
|
|
638
|
+
def provider_config(self) -> pulumi.Output[Optional['outputs.PermissionAssignmentProviderConfig']]:
|
|
639
|
+
"""
|
|
640
|
+
Configure the provider for management through account provider. This block consists of the following fields:
|
|
641
|
+
"""
|
|
642
|
+
return pulumi.get(self, "provider_config")
|
|
643
|
+
|
|
595
644
|
@_builtins.property
|
|
596
645
|
@pulumi.getter(name="servicePrincipalName")
|
|
597
646
|
def service_principal_name(self) -> pulumi.Output[_builtins.str]:
|
pulumi_databricks/permissions.py
CHANGED
|
@@ -941,12 +941,12 @@ class Permissions(pulumi.CustomResource):
|
|
|
941
941
|
eng = databricks.Group("eng", display_name="Engineering")
|
|
942
942
|
ldp_demo = databricks.Notebook("ldp_demo",
|
|
943
943
|
content_base64=std.base64encode(input=\"\"\"import dlt
|
|
944
|
-
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
|
|
944
|
+
json_path = \\"/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json\\"
|
|
945
945
|
@dlt.table(
|
|
946
|
-
comment
|
|
946
|
+
comment=\\"The raw wikipedia clickstream dataset, ingested from /databricks-datasets.\\"
|
|
947
947
|
)
|
|
948
948
|
def clickstream_raw():
|
|
949
|
-
return (spark.read.format("json").load(json_path))
|
|
949
|
+
return (spark.read.format(\\"json\\").load(json_path))
|
|
950
950
|
\"\"\").result,
|
|
951
951
|
language="PYTHON",
|
|
952
952
|
path=f"{me.home}/ldp_demo")
|
|
@@ -1778,12 +1778,12 @@ class Permissions(pulumi.CustomResource):
|
|
|
1778
1778
|
eng = databricks.Group("eng", display_name="Engineering")
|
|
1779
1779
|
ldp_demo = databricks.Notebook("ldp_demo",
|
|
1780
1780
|
content_base64=std.base64encode(input=\"\"\"import dlt
|
|
1781
|
-
json_path = "/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json"
|
|
1781
|
+
json_path = \\"/databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed-json/2015_2_clickstream.json\\"
|
|
1782
1782
|
@dlt.table(
|
|
1783
|
-
comment
|
|
1783
|
+
comment=\\"The raw wikipedia clickstream dataset, ingested from /databricks-datasets.\\"
|
|
1784
1784
|
)
|
|
1785
1785
|
def clickstream_raw():
|
|
1786
|
-
return (spark.read.format("json").load(json_path))
|
|
1786
|
+
return (spark.read.format(\\"json\\").load(json_path))
|
|
1787
1787
|
\"\"\").result,
|
|
1788
1788
|
language="PYTHON",
|
|
1789
1789
|
path=f"{me.home}/ldp_demo")
|
pulumi_databricks/pipeline.py
CHANGED
|
@@ -64,7 +64,7 @@ class PipelineArgs:
|
|
|
64
64
|
The set of arguments for constructing a Pipeline resource.
|
|
65
65
|
:param pulumi.Input[_builtins.bool] allow_duplicate_names: Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is `false`.
|
|
66
66
|
:param pulumi.Input[_builtins.str] budget_policy_id: optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
|
|
67
|
-
:param pulumi.Input[_builtins.str] catalog: The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
67
|
+
:param pulumi.Input[_builtins.str] catalog: The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
68
68
|
:param pulumi.Input[_builtins.str] channel: optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`.
|
|
69
69
|
:param pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]] clusters: blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. *Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in [documentation](https://docs.databricks.com/api/workspace/pipelines/create#clusters).* Also, note that `autoscale` block is extended with the `mode` parameter that controls the autoscaling algorithm (possible values are `ENHANCED` for new, enhanced autoscaling algorithm, or `LEGACY` for old algorithm).
|
|
70
70
|
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] configuration: An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
|
|
@@ -193,7 +193,7 @@ class PipelineArgs:
|
|
|
193
193
|
@pulumi.getter
|
|
194
194
|
def catalog(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
195
195
|
"""
|
|
196
|
-
The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
196
|
+
The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
197
197
|
"""
|
|
198
198
|
return pulumi.get(self, "catalog")
|
|
199
199
|
|
|
@@ -632,7 +632,7 @@ class _PipelineState:
|
|
|
632
632
|
Input properties used for looking up and filtering Pipeline resources.
|
|
633
633
|
:param pulumi.Input[_builtins.bool] allow_duplicate_names: Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is `false`.
|
|
634
634
|
:param pulumi.Input[_builtins.str] budget_policy_id: optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
|
|
635
|
-
:param pulumi.Input[_builtins.str] catalog: The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
635
|
+
:param pulumi.Input[_builtins.str] catalog: The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
636
636
|
:param pulumi.Input[_builtins.str] channel: optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`.
|
|
637
637
|
:param pulumi.Input[Sequence[pulumi.Input['PipelineClusterArgs']]] clusters: blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. *Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in [documentation](https://docs.databricks.com/api/workspace/pipelines/create#clusters).* Also, note that `autoscale` block is extended with the `mode` parameter that controls the autoscaling algorithm (possible values are `ENHANCED` for new, enhanced autoscaling algorithm, or `LEGACY` for old algorithm).
|
|
638
638
|
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] configuration: An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
|
|
@@ -761,7 +761,7 @@ class _PipelineState:
|
|
|
761
761
|
@pulumi.getter
|
|
762
762
|
def catalog(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
763
763
|
"""
|
|
764
|
-
The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
764
|
+
The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
765
765
|
"""
|
|
766
766
|
return pulumi.get(self, "catalog")
|
|
767
767
|
|
|
@@ -1305,7 +1305,7 @@ class Pipeline(pulumi.CustomResource):
|
|
|
1305
1305
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
1306
1306
|
:param pulumi.Input[_builtins.bool] allow_duplicate_names: Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is `false`.
|
|
1307
1307
|
:param pulumi.Input[_builtins.str] budget_policy_id: optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
|
|
1308
|
-
:param pulumi.Input[_builtins.str] catalog: The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
1308
|
+
:param pulumi.Input[_builtins.str] catalog: The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
1309
1309
|
:param pulumi.Input[_builtins.str] channel: optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`.
|
|
1310
1310
|
:param pulumi.Input[Sequence[pulumi.Input[Union['PipelineClusterArgs', 'PipelineClusterArgsDict']]]] clusters: blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. *Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in [documentation](https://docs.databricks.com/api/workspace/pipelines/create#clusters).* Also, note that `autoscale` block is extended with the `mode` parameter that controls the autoscaling algorithm (possible values are `ENHANCED` for new, enhanced autoscaling algorithm, or `LEGACY` for old algorithm).
|
|
1311
1311
|
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] configuration: An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
|
|
@@ -1594,7 +1594,7 @@ class Pipeline(pulumi.CustomResource):
|
|
|
1594
1594
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
1595
1595
|
:param pulumi.Input[_builtins.bool] allow_duplicate_names: Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is `false`.
|
|
1596
1596
|
:param pulumi.Input[_builtins.str] budget_policy_id: optional string specifying ID of the budget policy for this Lakeflow Declarative Pipeline.
|
|
1597
|
-
:param pulumi.Input[_builtins.str] catalog: The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
1597
|
+
:param pulumi.Input[_builtins.str] catalog: The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
1598
1598
|
:param pulumi.Input[_builtins.str] channel: optional name of the release channel for Spark version used by Lakeflow Declarative Pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`.
|
|
1599
1599
|
:param pulumi.Input[Sequence[pulumi.Input[Union['PipelineClusterArgs', 'PipelineClusterArgsDict']]]] clusters: blocks - Clusters to run the pipeline. If none is specified, pipelines will automatically select a default cluster configuration for the pipeline. *Please note that Lakeflow Declarative Pipeline clusters are supporting only subset of attributes as described in [documentation](https://docs.databricks.com/api/workspace/pipelines/create#clusters).* Also, note that `autoscale` block is extended with the `mode` parameter that controls the autoscaling algorithm (possible values are `ENHANCED` for new, enhanced autoscaling algorithm, or `LEGACY` for old algorithm).
|
|
1600
1600
|
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] configuration: An optional list of values to apply to the entire pipeline. Elements must be formatted as key:value pairs.
|
|
@@ -1681,7 +1681,7 @@ class Pipeline(pulumi.CustomResource):
|
|
|
1681
1681
|
@pulumi.getter
|
|
1682
1682
|
def catalog(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
1683
1683
|
"""
|
|
1684
|
-
The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`).
|
|
1684
|
+
The name of default catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline if you switch from `storage` to `catalog` or vice versa. If pipeline was already created with `catalog` set, the value could be changed.* (Conflicts with `storage`).
|
|
1685
1685
|
"""
|
|
1686
1686
|
return pulumi.get(self, "catalog")
|
|
1687
1687
|
|
pulumi_databricks/policy_info.py
CHANGED
|
@@ -551,7 +551,67 @@ class PolicyInfo(pulumi.CustomResource):
|
|
|
551
551
|
when_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
552
552
|
__props__=None):
|
|
553
553
|
"""
|
|
554
|
-
[](https://docs.databricks.com/aws/en/release-notes/release-types)
|
|
555
|
+
|
|
556
|
+
Attribute-Based Access Control (ABAC) policies in Unity Catalog provide high leverage governance for enforcing compliance policies. With ABAC policies, access is controlled in a hierarchical and scalable manner, based on data attributes rather than specific resources, enabling more flexible and comprehensive access control.
|
|
557
|
+
|
|
558
|
+
ABAC policies in Unity Catalog support conditions on governance tags and the user identity. Callers must have the `MANAGE` privilege on a securable to view, create, update, or delete ABAC policies.
|
|
559
|
+
|
|
560
|
+
## Example Usage
|
|
561
|
+
|
|
562
|
+
### Row Filter Policy
|
|
563
|
+
|
|
564
|
+
```python
|
|
565
|
+
import pulumi
|
|
566
|
+
import pulumi_databricks as databricks
|
|
567
|
+
|
|
568
|
+
pii_row_filter = databricks.PolicyInfo("pii_row_filter",
|
|
569
|
+
on_securable_type="catalog",
|
|
570
|
+
on_securable_fullname="main",
|
|
571
|
+
name="pii_data_policy",
|
|
572
|
+
policy_type="POLICY_TYPE_ROW_FILTER",
|
|
573
|
+
for_securable_type="table",
|
|
574
|
+
to_principals=["account users"],
|
|
575
|
+
when_condition="hasTag('pii')",
|
|
576
|
+
match_columns=[{
|
|
577
|
+
"condition": "hasTag('pii')",
|
|
578
|
+
"alias": "pii_col",
|
|
579
|
+
}],
|
|
580
|
+
row_filter={
|
|
581
|
+
"function_name": "main.filters.mask_pii_rows",
|
|
582
|
+
"usings": [{
|
|
583
|
+
"alias": "pii_col",
|
|
584
|
+
}],
|
|
585
|
+
})
|
|
586
|
+
```
|
|
587
|
+
|
|
588
|
+
### Column Mask Policy
|
|
589
|
+
|
|
590
|
+
```python
|
|
591
|
+
import pulumi
|
|
592
|
+
import pulumi_databricks as databricks
|
|
593
|
+
|
|
594
|
+
sensitive_column_mask = databricks.PolicyInfo("sensitive_column_mask",
|
|
595
|
+
on_securable_type="schema",
|
|
596
|
+
on_securable_fullname="main.finance",
|
|
597
|
+
name="sensitive_data_mask",
|
|
598
|
+
policy_type="POLICY_TYPE_COLUMN_MASK",
|
|
599
|
+
for_securable_type="table",
|
|
600
|
+
to_principals=["account users"],
|
|
601
|
+
except_principals=["finance_admins"],
|
|
602
|
+
when_condition="hasTag('pii')",
|
|
603
|
+
match_columns=[{
|
|
604
|
+
"condition": "hasTag('pii')",
|
|
605
|
+
"alias": "sensitive_col",
|
|
606
|
+
}],
|
|
607
|
+
column_mask={
|
|
608
|
+
"function_name": "main.masks.redact_sensitive",
|
|
609
|
+
"on_column": "sensitive_col",
|
|
610
|
+
"usings": [{
|
|
611
|
+
"constant": "4",
|
|
612
|
+
}],
|
|
613
|
+
})
|
|
614
|
+
```
|
|
555
615
|
|
|
556
616
|
## Import
|
|
557
617
|
|
|
@@ -608,7 +668,67 @@ class PolicyInfo(pulumi.CustomResource):
|
|
|
608
668
|
args: PolicyInfoArgs,
|
|
609
669
|
opts: Optional[pulumi.ResourceOptions] = None):
|
|
610
670
|
"""
|
|
611
|
-
[](https://docs.databricks.com/aws/en/release-notes/release-types)
|
|
672
|
+
|
|
673
|
+
Attribute-Based Access Control (ABAC) policies in Unity Catalog provide high leverage governance for enforcing compliance policies. With ABAC policies, access is controlled in a hierarchical and scalable manner, based on data attributes rather than specific resources, enabling more flexible and comprehensive access control.
|
|
674
|
+
|
|
675
|
+
ABAC policies in Unity Catalog support conditions on governance tags and the user identity. Callers must have the `MANAGE` privilege on a securable to view, create, update, or delete ABAC policies.
|
|
676
|
+
|
|
677
|
+
## Example Usage
|
|
678
|
+
|
|
679
|
+
### Row Filter Policy
|
|
680
|
+
|
|
681
|
+
```python
|
|
682
|
+
import pulumi
|
|
683
|
+
import pulumi_databricks as databricks
|
|
684
|
+
|
|
685
|
+
pii_row_filter = databricks.PolicyInfo("pii_row_filter",
|
|
686
|
+
on_securable_type="catalog",
|
|
687
|
+
on_securable_fullname="main",
|
|
688
|
+
name="pii_data_policy",
|
|
689
|
+
policy_type="POLICY_TYPE_ROW_FILTER",
|
|
690
|
+
for_securable_type="table",
|
|
691
|
+
to_principals=["account users"],
|
|
692
|
+
when_condition="hasTag('pii')",
|
|
693
|
+
match_columns=[{
|
|
694
|
+
"condition": "hasTag('pii')",
|
|
695
|
+
"alias": "pii_col",
|
|
696
|
+
}],
|
|
697
|
+
row_filter={
|
|
698
|
+
"function_name": "main.filters.mask_pii_rows",
|
|
699
|
+
"usings": [{
|
|
700
|
+
"alias": "pii_col",
|
|
701
|
+
}],
|
|
702
|
+
})
|
|
703
|
+
```
|
|
704
|
+
|
|
705
|
+
### Column Mask Policy
|
|
706
|
+
|
|
707
|
+
```python
|
|
708
|
+
import pulumi
|
|
709
|
+
import pulumi_databricks as databricks
|
|
710
|
+
|
|
711
|
+
sensitive_column_mask = databricks.PolicyInfo("sensitive_column_mask",
|
|
712
|
+
on_securable_type="schema",
|
|
713
|
+
on_securable_fullname="main.finance",
|
|
714
|
+
name="sensitive_data_mask",
|
|
715
|
+
policy_type="POLICY_TYPE_COLUMN_MASK",
|
|
716
|
+
for_securable_type="table",
|
|
717
|
+
to_principals=["account users"],
|
|
718
|
+
except_principals=["finance_admins"],
|
|
719
|
+
when_condition="hasTag('pii')",
|
|
720
|
+
match_columns=[{
|
|
721
|
+
"condition": "hasTag('pii')",
|
|
722
|
+
"alias": "sensitive_col",
|
|
723
|
+
}],
|
|
724
|
+
column_mask={
|
|
725
|
+
"function_name": "main.masks.redact_sensitive",
|
|
726
|
+
"on_column": "sensitive_col",
|
|
727
|
+
"usings": [{
|
|
728
|
+
"constant": "4",
|
|
729
|
+
}],
|
|
730
|
+
})
|
|
731
|
+
```
|
|
612
732
|
|
|
613
733
|
## Import
|
|
614
734
|
|
pulumi_databricks/provider.py
CHANGED
|
@@ -39,6 +39,7 @@ class ProviderArgs:
|
|
|
39
39
|
databricks_id_token_filepath: Optional[pulumi.Input[_builtins.str]] = None,
|
|
40
40
|
debug_headers: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
41
41
|
debug_truncate_bytes: Optional[pulumi.Input[_builtins.int]] = None,
|
|
42
|
+
experimental_is_unified_host: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
42
43
|
google_credentials: Optional[pulumi.Input[_builtins.str]] = None,
|
|
43
44
|
google_service_account: Optional[pulumi.Input[_builtins.str]] = None,
|
|
44
45
|
host: Optional[pulumi.Input[_builtins.str]] = None,
|
|
@@ -54,7 +55,8 @@ class ProviderArgs:
|
|
|
54
55
|
skip_verify: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
55
56
|
token: Optional[pulumi.Input[_builtins.str]] = None,
|
|
56
57
|
username: Optional[pulumi.Input[_builtins.str]] = None,
|
|
57
|
-
warehouse_id: Optional[pulumi.Input[_builtins.str]] = None
|
|
58
|
+
warehouse_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
59
|
+
workspace_id: Optional[pulumi.Input[_builtins.str]] = None):
|
|
58
60
|
"""
|
|
59
61
|
The set of arguments for constructing a Provider resource.
|
|
60
62
|
"""
|
|
@@ -98,6 +100,8 @@ class ProviderArgs:
|
|
|
98
100
|
pulumi.set(__self__, "debug_headers", debug_headers)
|
|
99
101
|
if debug_truncate_bytes is not None:
|
|
100
102
|
pulumi.set(__self__, "debug_truncate_bytes", debug_truncate_bytes)
|
|
103
|
+
if experimental_is_unified_host is not None:
|
|
104
|
+
pulumi.set(__self__, "experimental_is_unified_host", experimental_is_unified_host)
|
|
101
105
|
if google_credentials is not None:
|
|
102
106
|
pulumi.set(__self__, "google_credentials", google_credentials)
|
|
103
107
|
if google_service_account is not None:
|
|
@@ -130,6 +134,8 @@ class ProviderArgs:
|
|
|
130
134
|
pulumi.set(__self__, "username", username)
|
|
131
135
|
if warehouse_id is not None:
|
|
132
136
|
pulumi.set(__self__, "warehouse_id", warehouse_id)
|
|
137
|
+
if workspace_id is not None:
|
|
138
|
+
pulumi.set(__self__, "workspace_id", workspace_id)
|
|
133
139
|
|
|
134
140
|
@_builtins.property
|
|
135
141
|
@pulumi.getter(name="accountId")
|
|
@@ -311,6 +317,15 @@ class ProviderArgs:
|
|
|
311
317
|
def debug_truncate_bytes(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
312
318
|
pulumi.set(self, "debug_truncate_bytes", value)
|
|
313
319
|
|
|
320
|
+
@_builtins.property
|
|
321
|
+
@pulumi.getter(name="experimentalIsUnifiedHost")
|
|
322
|
+
def experimental_is_unified_host(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
323
|
+
return pulumi.get(self, "experimental_is_unified_host")
|
|
324
|
+
|
|
325
|
+
@experimental_is_unified_host.setter
|
|
326
|
+
def experimental_is_unified_host(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
327
|
+
pulumi.set(self, "experimental_is_unified_host", value)
|
|
328
|
+
|
|
314
329
|
@_builtins.property
|
|
315
330
|
@pulumi.getter(name="googleCredentials")
|
|
316
331
|
def google_credentials(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
@@ -455,6 +470,15 @@ class ProviderArgs:
|
|
|
455
470
|
def warehouse_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
456
471
|
pulumi.set(self, "warehouse_id", value)
|
|
457
472
|
|
|
473
|
+
@_builtins.property
|
|
474
|
+
@pulumi.getter(name="workspaceId")
|
|
475
|
+
def workspace_id(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
476
|
+
return pulumi.get(self, "workspace_id")
|
|
477
|
+
|
|
478
|
+
@workspace_id.setter
|
|
479
|
+
def workspace_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
480
|
+
pulumi.set(self, "workspace_id", value)
|
|
481
|
+
|
|
458
482
|
|
|
459
483
|
@pulumi.type_token("pulumi:providers:databricks")
|
|
460
484
|
class Provider(pulumi.ProviderResource):
|
|
@@ -482,6 +506,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
482
506
|
databricks_id_token_filepath: Optional[pulumi.Input[_builtins.str]] = None,
|
|
483
507
|
debug_headers: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
484
508
|
debug_truncate_bytes: Optional[pulumi.Input[_builtins.int]] = None,
|
|
509
|
+
experimental_is_unified_host: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
485
510
|
google_credentials: Optional[pulumi.Input[_builtins.str]] = None,
|
|
486
511
|
google_service_account: Optional[pulumi.Input[_builtins.str]] = None,
|
|
487
512
|
host: Optional[pulumi.Input[_builtins.str]] = None,
|
|
@@ -498,6 +523,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
498
523
|
token: Optional[pulumi.Input[_builtins.str]] = None,
|
|
499
524
|
username: Optional[pulumi.Input[_builtins.str]] = None,
|
|
500
525
|
warehouse_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
526
|
+
workspace_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
501
527
|
__props__=None):
|
|
502
528
|
"""
|
|
503
529
|
The provider type for the databricks package. By default, resources use package-wide configuration
|
|
@@ -555,6 +581,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
555
581
|
databricks_id_token_filepath: Optional[pulumi.Input[_builtins.str]] = None,
|
|
556
582
|
debug_headers: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
557
583
|
debug_truncate_bytes: Optional[pulumi.Input[_builtins.int]] = None,
|
|
584
|
+
experimental_is_unified_host: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
558
585
|
google_credentials: Optional[pulumi.Input[_builtins.str]] = None,
|
|
559
586
|
google_service_account: Optional[pulumi.Input[_builtins.str]] = None,
|
|
560
587
|
host: Optional[pulumi.Input[_builtins.str]] = None,
|
|
@@ -571,6 +598,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
571
598
|
token: Optional[pulumi.Input[_builtins.str]] = None,
|
|
572
599
|
username: Optional[pulumi.Input[_builtins.str]] = None,
|
|
573
600
|
warehouse_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
601
|
+
workspace_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
574
602
|
__props__=None):
|
|
575
603
|
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
|
|
576
604
|
if not isinstance(opts, pulumi.ResourceOptions):
|
|
@@ -600,6 +628,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
600
628
|
__props__.__dict__["databricks_id_token_filepath"] = databricks_id_token_filepath
|
|
601
629
|
__props__.__dict__["debug_headers"] = pulumi.Output.from_input(debug_headers).apply(pulumi.runtime.to_json) if debug_headers is not None else None
|
|
602
630
|
__props__.__dict__["debug_truncate_bytes"] = pulumi.Output.from_input(debug_truncate_bytes).apply(pulumi.runtime.to_json) if debug_truncate_bytes is not None else None
|
|
631
|
+
__props__.__dict__["experimental_is_unified_host"] = pulumi.Output.from_input(experimental_is_unified_host).apply(pulumi.runtime.to_json) if experimental_is_unified_host is not None else None
|
|
603
632
|
__props__.__dict__["google_credentials"] = None if google_credentials is None else pulumi.Output.secret(google_credentials)
|
|
604
633
|
__props__.__dict__["google_service_account"] = google_service_account
|
|
605
634
|
__props__.__dict__["host"] = host
|
|
@@ -616,6 +645,7 @@ class Provider(pulumi.ProviderResource):
|
|
|
616
645
|
__props__.__dict__["token"] = None if token is None else pulumi.Output.secret(token)
|
|
617
646
|
__props__.__dict__["username"] = username
|
|
618
647
|
__props__.__dict__["warehouse_id"] = warehouse_id
|
|
648
|
+
__props__.__dict__["workspace_id"] = workspace_id
|
|
619
649
|
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["azureClientSecret", "clientSecret", "googleCredentials", "metadataServiceUrl", "password", "token"])
|
|
620
650
|
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
|
|
621
651
|
super(Provider, __self__).__init__(
|
|
@@ -764,6 +794,11 @@ class Provider(pulumi.ProviderResource):
|
|
|
764
794
|
def warehouse_id(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
765
795
|
return pulumi.get(self, "warehouse_id")
|
|
766
796
|
|
|
797
|
+
@_builtins.property
|
|
798
|
+
@pulumi.getter(name="workspaceId")
|
|
799
|
+
def workspace_id(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
800
|
+
return pulumi.get(self, "workspace_id")
|
|
801
|
+
|
|
767
802
|
@pulumi.output_type
|
|
768
803
|
class TerraformConfigResult:
|
|
769
804
|
def __init__(__self__, result=None):
|
pulumi_databricks/recipient.py
CHANGED
|
@@ -545,6 +545,43 @@ class Recipient(pulumi.CustomResource):
|
|
|
545
545
|
})
|
|
546
546
|
```
|
|
547
547
|
|
|
548
|
+
### Databricks to Databricks Sharing
|
|
549
|
+
|
|
550
|
+
Setting `authentication_type` type to `DATABRICKS` allows you to automatically create a provider for a recipient who
|
|
551
|
+
is using Databricks. To do this they would need to provide the global metastore id that you will be sharing with. The
|
|
552
|
+
global metastore id follows the format: `<cloud>:<region>:<guid>`
|
|
553
|
+
|
|
554
|
+
```python
|
|
555
|
+
import pulumi
|
|
556
|
+
import pulumi_databricks as databricks
|
|
557
|
+
import pulumi_std as std
|
|
558
|
+
|
|
559
|
+
current = databricks.get_current_user()
|
|
560
|
+
recipient_metastore = databricks.Metastore("recipient_metastore",
|
|
561
|
+
name="recipient",
|
|
562
|
+
storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
|
|
563
|
+
args=[
|
|
564
|
+
unity_catalog["name"],
|
|
565
|
+
unity_catalog_azurerm_storage_account["name"],
|
|
566
|
+
]).result,
|
|
567
|
+
delta_sharing_scope="INTERNAL",
|
|
568
|
+
delta_sharing_recipient_token_lifetime_in_seconds=60000000,
|
|
569
|
+
force_destroy=True)
|
|
570
|
+
db2db = databricks.Recipient("db2db",
|
|
571
|
+
name=f"{current.alphanumeric}-recipient",
|
|
572
|
+
comment="Made by Pulumi",
|
|
573
|
+
authentication_type="DATABRICKS",
|
|
574
|
+
data_recipient_global_metastore_id=recipient_metastore.global_metastore_id)
|
|
575
|
+
```
|
|
576
|
+
|
|
577
|
+
## Related Resources
|
|
578
|
+
|
|
579
|
+
The following resources are often used in the same context:
|
|
580
|
+
|
|
581
|
+
* Share to create Delta Sharing shares.
|
|
582
|
+
* Grants to manage Delta Sharing permissions.
|
|
583
|
+
* get_shares to read existing Delta Sharing shares.
|
|
584
|
+
|
|
548
585
|
## Import
|
|
549
586
|
|
|
550
587
|
The recipient resource can be imported using the name of the recipient:
|
|
@@ -621,6 +658,43 @@ class Recipient(pulumi.CustomResource):
|
|
|
621
658
|
})
|
|
622
659
|
```
|
|
623
660
|
|
|
661
|
+
### Databricks to Databricks Sharing
|
|
662
|
+
|
|
663
|
+
Setting `authentication_type` type to `DATABRICKS` allows you to automatically create a provider for a recipient who
|
|
664
|
+
is using Databricks. To do this they would need to provide the global metastore id that you will be sharing with. The
|
|
665
|
+
global metastore id follows the format: `<cloud>:<region>:<guid>`
|
|
666
|
+
|
|
667
|
+
```python
|
|
668
|
+
import pulumi
|
|
669
|
+
import pulumi_databricks as databricks
|
|
670
|
+
import pulumi_std as std
|
|
671
|
+
|
|
672
|
+
current = databricks.get_current_user()
|
|
673
|
+
recipient_metastore = databricks.Metastore("recipient_metastore",
|
|
674
|
+
name="recipient",
|
|
675
|
+
storage_root=std.format(input="abfss://%s@%s.dfs.core.windows.net/",
|
|
676
|
+
args=[
|
|
677
|
+
unity_catalog["name"],
|
|
678
|
+
unity_catalog_azurerm_storage_account["name"],
|
|
679
|
+
]).result,
|
|
680
|
+
delta_sharing_scope="INTERNAL",
|
|
681
|
+
delta_sharing_recipient_token_lifetime_in_seconds=60000000,
|
|
682
|
+
force_destroy=True)
|
|
683
|
+
db2db = databricks.Recipient("db2db",
|
|
684
|
+
name=f"{current.alphanumeric}-recipient",
|
|
685
|
+
comment="Made by Pulumi",
|
|
686
|
+
authentication_type="DATABRICKS",
|
|
687
|
+
data_recipient_global_metastore_id=recipient_metastore.global_metastore_id)
|
|
688
|
+
```
|
|
689
|
+
|
|
690
|
+
## Related Resources
|
|
691
|
+
|
|
692
|
+
The following resources are often used in the same context:
|
|
693
|
+
|
|
694
|
+
* Share to create Delta Sharing shares.
|
|
695
|
+
* Grants to manage Delta Sharing permissions.
|
|
696
|
+
* get_shares to read existing Delta Sharing shares.
|
|
697
|
+
|
|
624
698
|
## Import
|
|
625
699
|
|
|
626
700
|
The recipient resource can be imported using the name of the recipient:
|