pulumi-databricks 1.75.0a1756323569__py3-none-any.whl → 1.77.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. pulumi_databricks/__init__.py +129 -46
  2. pulumi_databricks/_inputs.py +13377 -13798
  3. pulumi_databricks/account_federation_policy.py +473 -0
  4. pulumi_databricks/account_network_policy.py +12 -4
  5. pulumi_databricks/account_setting_v2.py +759 -0
  6. pulumi_databricks/alert_v2.py +223 -90
  7. pulumi_databricks/app.py +111 -4
  8. pulumi_databricks/apps_settings_custom_template.py +531 -0
  9. pulumi_databricks/budget_policy.py +28 -5
  10. pulumi_databricks/catalog.py +322 -2
  11. pulumi_databricks/cluster.py +47 -168
  12. pulumi_databricks/config/__init__.pyi +2 -0
  13. pulumi_databricks/config/vars.py +4 -0
  14. pulumi_databricks/connection.py +0 -34
  15. pulumi_databricks/data_quality_monitor.py +453 -0
  16. pulumi_databricks/data_quality_refresh.py +492 -0
  17. pulumi_databricks/database_database_catalog.py +8 -4
  18. pulumi_databricks/database_instance.py +354 -73
  19. pulumi_databricks/database_synced_database_table.py +52 -4
  20. pulumi_databricks/entitlements.py +21 -21
  21. pulumi_databricks/entity_tag_assignment.py +409 -0
  22. pulumi_databricks/external_metadata.py +15 -11
  23. pulumi_databricks/feature_engineering_feature.py +480 -0
  24. pulumi_databricks/feature_engineering_materialized_feature.py +397 -0
  25. pulumi_databricks/get_account_federation_policies.py +127 -0
  26. pulumi_databricks/get_account_federation_policy.py +214 -0
  27. pulumi_databricks/get_account_network_policies.py +4 -0
  28. pulumi_databricks/get_account_network_policy.py +17 -18
  29. pulumi_databricks/get_account_setting_v2.py +331 -0
  30. pulumi_databricks/get_alert_v2.py +78 -70
  31. pulumi_databricks/get_alerts_v2.py +31 -12
  32. pulumi_databricks/get_app.py +21 -9
  33. pulumi_databricks/get_apps.py +22 -10
  34. pulumi_databricks/get_apps_settings_custom_template.py +207 -0
  35. pulumi_databricks/get_apps_settings_custom_templates.py +133 -0
  36. pulumi_databricks/get_aws_bucket_policy.py +6 -6
  37. pulumi_databricks/get_budget_policies.py +70 -6
  38. pulumi_databricks/get_budget_policy.py +35 -30
  39. pulumi_databricks/get_catalog.py +21 -4
  40. pulumi_databricks/get_catalogs.py +23 -4
  41. pulumi_databricks/get_cluster.py +21 -4
  42. pulumi_databricks/get_cluster_policy.py +23 -4
  43. pulumi_databricks/get_clusters.py +21 -4
  44. pulumi_databricks/get_current_metastore.py +21 -4
  45. pulumi_databricks/get_dashboards.py +24 -4
  46. pulumi_databricks/get_data_quality_monitor.py +210 -0
  47. pulumi_databricks/get_data_quality_monitors.py +143 -0
  48. pulumi_databricks/get_data_quality_refresh.py +270 -0
  49. pulumi_databricks/get_data_quality_refreshes.py +207 -0
  50. pulumi_databricks/get_database_database_catalog.py +6 -20
  51. pulumi_databricks/get_database_database_catalogs.py +46 -8
  52. pulumi_databricks/get_database_instance.py +122 -75
  53. pulumi_databricks/get_database_instances.py +31 -6
  54. pulumi_databricks/get_database_synced_database_table.py +7 -28
  55. pulumi_databricks/get_database_synced_database_tables.py +43 -5
  56. pulumi_databricks/get_directory.py +20 -1
  57. pulumi_databricks/get_entity_tag_assignment.py +202 -0
  58. pulumi_databricks/get_entity_tag_assignments.py +187 -0
  59. pulumi_databricks/get_external_location.py +21 -4
  60. pulumi_databricks/get_external_locations.py +23 -4
  61. pulumi_databricks/get_external_metadata.py +12 -50
  62. pulumi_databricks/get_external_metadatas.py +55 -6
  63. pulumi_databricks/get_feature_engineering_feature.py +179 -0
  64. pulumi_databricks/get_feature_engineering_features.py +103 -0
  65. pulumi_databricks/get_feature_engineering_materialized_feature.py +180 -0
  66. pulumi_databricks/get_feature_engineering_materialized_features.py +123 -0
  67. pulumi_databricks/get_functions.py +16 -1
  68. pulumi_databricks/get_instance_profiles.py +21 -4
  69. pulumi_databricks/get_jobs.py +23 -4
  70. pulumi_databricks/get_materialized_features_feature_tag.py +3 -11
  71. pulumi_databricks/get_materialized_features_feature_tags.py +59 -8
  72. pulumi_databricks/get_metastore.py +2 -2
  73. pulumi_databricks/get_mlflow_experiment.py +18 -1
  74. pulumi_databricks/get_mlflow_model.py +18 -1
  75. pulumi_databricks/get_mlflow_models.py +23 -4
  76. pulumi_databricks/get_node_type.py +42 -5
  77. pulumi_databricks/get_notification_destinations.py +17 -1
  78. pulumi_databricks/get_online_store.py +7 -17
  79. pulumi_databricks/get_online_stores.py +29 -8
  80. pulumi_databricks/get_pipelines.py +23 -4
  81. pulumi_databricks/get_policy_info.py +28 -99
  82. pulumi_databricks/get_policy_infos.py +92 -5
  83. pulumi_databricks/get_quality_monitor_v2.py +4 -0
  84. pulumi_databricks/get_quality_monitors_v2.py +22 -3
  85. pulumi_databricks/get_registered_model.py +19 -4
  86. pulumi_databricks/get_registered_model_versions.py +19 -4
  87. pulumi_databricks/get_rfa_access_request_destinations.py +126 -0
  88. pulumi_databricks/get_schema.py +18 -1
  89. pulumi_databricks/get_schemas.py +23 -4
  90. pulumi_databricks/get_service_principal_federation_policies.py +151 -0
  91. pulumi_databricks/get_service_principal_federation_policy.py +220 -0
  92. pulumi_databricks/get_serving_endpoints.py +19 -4
  93. pulumi_databricks/get_share.py +117 -18
  94. pulumi_databricks/get_shares.py +22 -3
  95. pulumi_databricks/get_spark_version.py +20 -1
  96. pulumi_databricks/get_sql_warehouse.py +16 -1
  97. pulumi_databricks/get_sql_warehouses.py +20 -1
  98. pulumi_databricks/get_storage_credential.py +18 -1
  99. pulumi_databricks/get_storage_credentials.py +23 -4
  100. pulumi_databricks/get_table.py +18 -1
  101. pulumi_databricks/get_tables.py +20 -1
  102. pulumi_databricks/get_tag_policies.py +139 -0
  103. pulumi_databricks/get_tag_policy.py +175 -0
  104. pulumi_databricks/get_views.py +20 -1
  105. pulumi_databricks/get_volume.py +18 -1
  106. pulumi_databricks/get_volumes.py +20 -1
  107. pulumi_databricks/get_workspace_network_option.py +8 -16
  108. pulumi_databricks/get_workspace_setting_v2.py +331 -0
  109. pulumi_databricks/get_zones.py +20 -1
  110. pulumi_databricks/git_credential.py +54 -7
  111. pulumi_databricks/grant.py +2 -2
  112. pulumi_databricks/group.py +21 -21
  113. pulumi_databricks/job.py +47 -0
  114. pulumi_databricks/library.py +165 -0
  115. pulumi_databricks/materialized_features_feature_tag.py +8 -4
  116. pulumi_databricks/mws_ncc_private_endpoint_rule.py +7 -7
  117. pulumi_databricks/mws_permission_assignment.py +16 -16
  118. pulumi_databricks/mws_storage_configurations.py +6 -6
  119. pulumi_databricks/mws_workspaces.py +76 -29
  120. pulumi_databricks/online_store.py +8 -4
  121. pulumi_databricks/outputs.py +26397 -22382
  122. pulumi_databricks/permission_assignment.py +266 -24
  123. pulumi_databricks/pipeline.py +37 -3
  124. pulumi_databricks/policy_info.py +43 -39
  125. pulumi_databricks/provider.py +15 -0
  126. pulumi_databricks/pulumi-plugin.json +1 -1
  127. pulumi_databricks/quality_monitor.py +47 -0
  128. pulumi_databricks/quality_monitor_v2.py +8 -4
  129. pulumi_databricks/registered_model.py +301 -29
  130. pulumi_databricks/rfa_access_request_destinations.py +286 -0
  131. pulumi_databricks/service_principal_federation_policy.py +469 -0
  132. pulumi_databricks/share.py +71 -84
  133. pulumi_databricks/sql_endpoint.py +47 -0
  134. pulumi_databricks/sql_table.py +35 -7
  135. pulumi_databricks/storage_credential.py +59 -6
  136. pulumi_databricks/tag_policy.py +357 -0
  137. pulumi_databricks/user.py +21 -21
  138. pulumi_databricks/workspace_binding.py +0 -48
  139. pulumi_databricks/workspace_network_option.py +8 -4
  140. pulumi_databricks/workspace_setting_v2.py +759 -0
  141. {pulumi_databricks-1.75.0a1756323569.dist-info → pulumi_databricks-1.77.0.dist-info}/METADATA +1 -1
  142. pulumi_databricks-1.77.0.dist-info/RECORD +250 -0
  143. pulumi_databricks/clean_room_asset.py +0 -891
  144. pulumi_databricks/clean_room_auto_approval_rule.py +0 -426
  145. pulumi_databricks/clean_rooms_clean_room.py +0 -518
  146. pulumi_databricks/get_clean_room_asset.py +0 -399
  147. pulumi_databricks/get_clean_room_asset_revisions_clean_room_asset.py +0 -375
  148. pulumi_databricks/get_clean_room_asset_revisions_clean_room_assets.py +0 -82
  149. pulumi_databricks/get_clean_room_assets.py +0 -104
  150. pulumi_databricks/get_clean_room_auto_approval_rule.py +0 -200
  151. pulumi_databricks/get_clean_room_auto_approval_rules.py +0 -82
  152. pulumi_databricks/get_clean_rooms_clean_room.py +0 -272
  153. pulumi_databricks/get_clean_rooms_clean_rooms.py +0 -104
  154. pulumi_databricks/get_recipient_federation_policies.py +0 -82
  155. pulumi_databricks/get_recipient_federation_policy.py +0 -165
  156. pulumi_databricks/recipient_federation_policy.py +0 -346
  157. pulumi_databricks-1.75.0a1756323569.dist-info/RECORD +0 -231
  158. {pulumi_databricks-1.75.0a1756323569.dist-info → pulumi_databricks-1.77.0.dist-info}/WHEEL +0 -0
  159. {pulumi_databricks-1.75.0a1756323569.dist-info → pulumi_databricks-1.77.0.dist-info}/top_level.txt +0 -0
@@ -49,6 +49,7 @@ class ClusterArgs:
49
49
  node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
50
50
  num_workers: Optional[pulumi.Input[_builtins.int]] = None,
51
51
  policy_id: Optional[pulumi.Input[_builtins.str]] = None,
52
+ provider_config: Optional[pulumi.Input['ClusterProviderConfigArgs']] = None,
52
53
  remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
53
54
  runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
54
55
  single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -99,33 +100,10 @@ class ClusterArgs:
99
100
  :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
100
101
  :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
101
102
  :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
102
-
103
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
104
-
105
- ```python
106
- import pulumi
107
- import pulumi_databricks as databricks
108
-
109
- smallest = databricks.get_node_type(local_disk=True)
110
- latest_lts = databricks.get_spark_version(long_term_support=True)
111
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
112
- cluster_name="Shared Autoscaling",
113
- spark_version=latest_lts.id,
114
- node_type_id=smallest.id,
115
- autotermination_minutes=20,
116
- autoscale={
117
- "min_workers": 1,
118
- "max_workers": 50,
119
- },
120
- spark_conf={
121
- "spark.databricks.io.cache.enabled": "true",
122
- "spark.databricks.io.cache.maxDiskUsage": "50g",
123
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
124
- })
125
- ```
126
103
  :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
127
104
  :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
128
105
  :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
106
+ :param pulumi.Input['ClusterProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
129
107
  :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
130
108
  :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
131
109
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
@@ -190,6 +168,8 @@ class ClusterArgs:
190
168
  pulumi.set(__self__, "num_workers", num_workers)
191
169
  if policy_id is not None:
192
170
  pulumi.set(__self__, "policy_id", policy_id)
171
+ if provider_config is not None:
172
+ pulumi.set(__self__, "provider_config", provider_config)
193
173
  if remote_disk_throughput is not None:
194
174
  pulumi.set(__self__, "remote_disk_throughput", remote_disk_throughput)
195
175
  if runtime_engine is not None:
@@ -498,30 +478,6 @@ class ClusterArgs:
498
478
  def no_wait(self) -> Optional[pulumi.Input[_builtins.bool]]:
499
479
  """
500
480
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
501
-
502
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
503
-
504
- ```python
505
- import pulumi
506
- import pulumi_databricks as databricks
507
-
508
- smallest = databricks.get_node_type(local_disk=True)
509
- latest_lts = databricks.get_spark_version(long_term_support=True)
510
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
511
- cluster_name="Shared Autoscaling",
512
- spark_version=latest_lts.id,
513
- node_type_id=smallest.id,
514
- autotermination_minutes=20,
515
- autoscale={
516
- "min_workers": 1,
517
- "max_workers": 50,
518
- },
519
- spark_conf={
520
- "spark.databricks.io.cache.enabled": "true",
521
- "spark.databricks.io.cache.maxDiskUsage": "50g",
522
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
523
- })
524
- ```
525
481
  """
526
482
  return pulumi.get(self, "no_wait")
527
483
 
@@ -565,6 +521,18 @@ class ClusterArgs:
565
521
  def policy_id(self, value: Optional[pulumi.Input[_builtins.str]]):
566
522
  pulumi.set(self, "policy_id", value)
567
523
 
524
+ @_builtins.property
525
+ @pulumi.getter(name="providerConfig")
526
+ def provider_config(self) -> Optional[pulumi.Input['ClusterProviderConfigArgs']]:
527
+ """
528
+ Configure the provider for management through account provider. This block consists of the following fields:
529
+ """
530
+ return pulumi.get(self, "provider_config")
531
+
532
+ @provider_config.setter
533
+ def provider_config(self, value: Optional[pulumi.Input['ClusterProviderConfigArgs']]):
534
+ pulumi.set(self, "provider_config", value)
535
+
568
536
  @_builtins.property
569
537
  @pulumi.getter(name="remoteDiskThroughput")
570
538
  def remote_disk_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -699,6 +667,7 @@ class _ClusterState:
699
667
  node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
700
668
  num_workers: Optional[pulumi.Input[_builtins.int]] = None,
701
669
  policy_id: Optional[pulumi.Input[_builtins.str]] = None,
670
+ provider_config: Optional[pulumi.Input['ClusterProviderConfigArgs']] = None,
702
671
  remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
703
672
  runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
704
673
  single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -752,33 +721,10 @@ class _ClusterState:
752
721
  :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
753
722
  :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
754
723
  :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
755
-
756
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
757
-
758
- ```python
759
- import pulumi
760
- import pulumi_databricks as databricks
761
-
762
- smallest = databricks.get_node_type(local_disk=True)
763
- latest_lts = databricks.get_spark_version(long_term_support=True)
764
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
765
- cluster_name="Shared Autoscaling",
766
- spark_version=latest_lts.id,
767
- node_type_id=smallest.id,
768
- autotermination_minutes=20,
769
- autoscale={
770
- "min_workers": 1,
771
- "max_workers": 50,
772
- },
773
- spark_conf={
774
- "spark.databricks.io.cache.enabled": "true",
775
- "spark.databricks.io.cache.maxDiskUsage": "50g",
776
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
777
- })
778
- ```
779
724
  :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
780
725
  :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
781
726
  :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
727
+ :param pulumi.Input['ClusterProviderConfigArgs'] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
782
728
  :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
783
729
  :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
784
730
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
@@ -848,6 +794,8 @@ class _ClusterState:
848
794
  pulumi.set(__self__, "num_workers", num_workers)
849
795
  if policy_id is not None:
850
796
  pulumi.set(__self__, "policy_id", policy_id)
797
+ if provider_config is not None:
798
+ pulumi.set(__self__, "provider_config", provider_config)
851
799
  if remote_disk_throughput is not None:
852
800
  pulumi.set(__self__, "remote_disk_throughput", remote_disk_throughput)
853
801
  if runtime_engine is not None:
@@ -1171,30 +1119,6 @@ class _ClusterState:
1171
1119
  def no_wait(self) -> Optional[pulumi.Input[_builtins.bool]]:
1172
1120
  """
1173
1121
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1174
-
1175
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1176
-
1177
- ```python
1178
- import pulumi
1179
- import pulumi_databricks as databricks
1180
-
1181
- smallest = databricks.get_node_type(local_disk=True)
1182
- latest_lts = databricks.get_spark_version(long_term_support=True)
1183
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1184
- cluster_name="Shared Autoscaling",
1185
- spark_version=latest_lts.id,
1186
- node_type_id=smallest.id,
1187
- autotermination_minutes=20,
1188
- autoscale={
1189
- "min_workers": 1,
1190
- "max_workers": 50,
1191
- },
1192
- spark_conf={
1193
- "spark.databricks.io.cache.enabled": "true",
1194
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1195
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1196
- })
1197
- ```
1198
1122
  """
1199
1123
  return pulumi.get(self, "no_wait")
1200
1124
 
@@ -1238,6 +1162,18 @@ class _ClusterState:
1238
1162
  def policy_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1239
1163
  pulumi.set(self, "policy_id", value)
1240
1164
 
1165
+ @_builtins.property
1166
+ @pulumi.getter(name="providerConfig")
1167
+ def provider_config(self) -> Optional[pulumi.Input['ClusterProviderConfigArgs']]:
1168
+ """
1169
+ Configure the provider for management through account provider. This block consists of the following fields:
1170
+ """
1171
+ return pulumi.get(self, "provider_config")
1172
+
1173
+ @provider_config.setter
1174
+ def provider_config(self, value: Optional[pulumi.Input['ClusterProviderConfigArgs']]):
1175
+ pulumi.set(self, "provider_config", value)
1176
+
1241
1177
  @_builtins.property
1242
1178
  @pulumi.getter(name="remoteDiskThroughput")
1243
1179
  def remote_disk_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -1406,6 +1342,7 @@ class Cluster(pulumi.CustomResource):
1406
1342
  node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1407
1343
  num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1408
1344
  policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1345
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1409
1346
  remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1410
1347
  runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1411
1348
  single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -1468,33 +1405,10 @@ class Cluster(pulumi.CustomResource):
1468
1405
  :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1469
1406
  :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1470
1407
  :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1471
-
1472
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1473
-
1474
- ```python
1475
- import pulumi
1476
- import pulumi_databricks as databricks
1477
-
1478
- smallest = databricks.get_node_type(local_disk=True)
1479
- latest_lts = databricks.get_spark_version(long_term_support=True)
1480
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1481
- cluster_name="Shared Autoscaling",
1482
- spark_version=latest_lts.id,
1483
- node_type_id=smallest.id,
1484
- autotermination_minutes=20,
1485
- autoscale={
1486
- "min_workers": 1,
1487
- "max_workers": 50,
1488
- },
1489
- spark_conf={
1490
- "spark.databricks.io.cache.enabled": "true",
1491
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1492
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1493
- })
1494
- ```
1495
1408
  :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1496
1409
  :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1497
1410
  :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1411
+ :param pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
1498
1412
  :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1499
1413
  :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1500
1414
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
@@ -1564,6 +1478,7 @@ class Cluster(pulumi.CustomResource):
1564
1478
  node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1565
1479
  num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1566
1480
  policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1481
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1567
1482
  remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1568
1483
  runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1569
1484
  single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -1610,6 +1525,7 @@ class Cluster(pulumi.CustomResource):
1610
1525
  __props__.__dict__["node_type_id"] = node_type_id
1611
1526
  __props__.__dict__["num_workers"] = num_workers
1612
1527
  __props__.__dict__["policy_id"] = policy_id
1528
+ __props__.__dict__["provider_config"] = provider_config
1613
1529
  __props__.__dict__["remote_disk_throughput"] = remote_disk_throughput
1614
1530
  __props__.__dict__["runtime_engine"] = runtime_engine
1615
1531
  __props__.__dict__["single_user_name"] = single_user_name
@@ -1665,6 +1581,7 @@ class Cluster(pulumi.CustomResource):
1665
1581
  node_type_id: Optional[pulumi.Input[_builtins.str]] = None,
1666
1582
  num_workers: Optional[pulumi.Input[_builtins.int]] = None,
1667
1583
  policy_id: Optional[pulumi.Input[_builtins.str]] = None,
1584
+ provider_config: Optional[pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']]] = None,
1668
1585
  remote_disk_throughput: Optional[pulumi.Input[_builtins.int]] = None,
1669
1586
  runtime_engine: Optional[pulumi.Input[_builtins.str]] = None,
1670
1587
  single_user_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -1723,33 +1640,10 @@ class Cluster(pulumi.CustomResource):
1723
1640
  :param pulumi.Input[_builtins.bool] is_single_node: When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers`.
1724
1641
  :param pulumi.Input[_builtins.str] kind: The kind of compute described by this compute specification. Possible values (see [API docs](https://docs.databricks.com/api/workspace/clusters/create#kind) for full list): `CLASSIC_PREVIEW` (if corresponding public preview is enabled).
1725
1642
  :param pulumi.Input[_builtins.bool] no_wait: If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
1726
-
1727
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
1728
-
1729
- ```python
1730
- import pulumi
1731
- import pulumi_databricks as databricks
1732
-
1733
- smallest = databricks.get_node_type(local_disk=True)
1734
- latest_lts = databricks.get_spark_version(long_term_support=True)
1735
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
1736
- cluster_name="Shared Autoscaling",
1737
- spark_version=latest_lts.id,
1738
- node_type_id=smallest.id,
1739
- autotermination_minutes=20,
1740
- autoscale={
1741
- "min_workers": 1,
1742
- "max_workers": 50,
1743
- },
1744
- spark_conf={
1745
- "spark.databricks.io.cache.enabled": "true",
1746
- "spark.databricks.io.cache.maxDiskUsage": "50g",
1747
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
1748
- })
1749
- ```
1750
1643
  :param pulumi.Input[_builtins.str] node_type_id: Any supported get_node_type id. If `instance_pool_id` is specified, this field is not needed.
1751
1644
  :param pulumi.Input[_builtins.int] num_workers: Number of worker nodes that this cluster should have. A cluster has one Spark driver and `num_workers` executors for a total of `num_workers` + 1 Spark nodes.
1752
1645
  :param pulumi.Input[_builtins.str] policy_id: Identifier of Cluster Policy to validate cluster and preset certain defaults. *The primary use for cluster policies is to allow users to create policy-scoped clusters via UI rather than sharing configuration for API-created clusters.* For example, when you specify `policy_id` of [external metastore](https://docs.databricks.com/administration-guide/clusters/policies.html#external-metastore-policy) policy, you still have to fill in relevant keys for `spark_conf`. If relevant fields aren't filled in, then it will cause the configuration drift detected on each plan/apply, and Pulumi will try to apply the detected changes.
1646
+ :param pulumi.Input[Union['ClusterProviderConfigArgs', 'ClusterProviderConfigArgsDict']] provider_config: Configure the provider for management through account provider. This block consists of the following fields:
1753
1647
  :param pulumi.Input[_builtins.str] runtime_engine: The type of runtime engine to use. If not specified, the runtime engine type is inferred based on the spark_version value. Allowed values include: `PHOTON`, `STANDARD`.
1754
1648
  :param pulumi.Input[_builtins.str] single_user_name: The optional user name of the user (or group name if `kind` if specified) to assign to an interactive cluster. This field is required when using `data_security_mode` set to `SINGLE_USER` or AAD Passthrough for Azure Data Lake Storage (ADLS) with a single-user cluster (i.e., not high-concurrency clusters).
1755
1649
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] spark_conf: should have following items:
@@ -1794,6 +1688,7 @@ class Cluster(pulumi.CustomResource):
1794
1688
  __props__.__dict__["node_type_id"] = node_type_id
1795
1689
  __props__.__dict__["num_workers"] = num_workers
1796
1690
  __props__.__dict__["policy_id"] = policy_id
1691
+ __props__.__dict__["provider_config"] = provider_config
1797
1692
  __props__.__dict__["remote_disk_throughput"] = remote_disk_throughput
1798
1693
  __props__.__dict__["runtime_engine"] = runtime_engine
1799
1694
  __props__.__dict__["single_user_name"] = single_user_name
@@ -2006,30 +1901,6 @@ class Cluster(pulumi.CustomResource):
2006
1901
  def no_wait(self) -> pulumi.Output[Optional[_builtins.bool]]:
2007
1902
  """
2008
1903
  If true, the provider will not wait for the cluster to reach `RUNNING` state when creating the cluster, allowing cluster creation and library installation to continue asynchronously. Defaults to false (the provider will wait for cluster creation and library installation to succeed).
2009
-
2010
- The following example demonstrates how to create an autoscaling cluster with [Delta Cache](https://docs.databricks.com/delta/optimizations/delta-cache.html) enabled:
2011
-
2012
- ```python
2013
- import pulumi
2014
- import pulumi_databricks as databricks
2015
-
2016
- smallest = databricks.get_node_type(local_disk=True)
2017
- latest_lts = databricks.get_spark_version(long_term_support=True)
2018
- shared_autoscaling = databricks.Cluster("shared_autoscaling",
2019
- cluster_name="Shared Autoscaling",
2020
- spark_version=latest_lts.id,
2021
- node_type_id=smallest.id,
2022
- autotermination_minutes=20,
2023
- autoscale={
2024
- "min_workers": 1,
2025
- "max_workers": 50,
2026
- },
2027
- spark_conf={
2028
- "spark.databricks.io.cache.enabled": "true",
2029
- "spark.databricks.io.cache.maxDiskUsage": "50g",
2030
- "spark.databricks.io.cache.maxMetaDataCache": "1g",
2031
- })
2032
- ```
2033
1904
  """
2034
1905
  return pulumi.get(self, "no_wait")
2035
1906
 
@@ -2057,6 +1928,14 @@ class Cluster(pulumi.CustomResource):
2057
1928
  """
2058
1929
  return pulumi.get(self, "policy_id")
2059
1930
 
1931
+ @_builtins.property
1932
+ @pulumi.getter(name="providerConfig")
1933
+ def provider_config(self) -> pulumi.Output[Optional['outputs.ClusterProviderConfig']]:
1934
+ """
1935
+ Configure the provider for management through account provider. This block consists of the following fields:
1936
+ """
1937
+ return pulumi.get(self, "provider_config")
1938
+
2060
1939
  @_builtins.property
2061
1940
  @pulumi.getter(name="remoteDiskThroughput")
2062
1941
  def remote_disk_throughput(self) -> pulumi.Output[Optional[_builtins.int]]:
@@ -64,6 +64,8 @@ httpTimeoutSeconds: Optional[int]
64
64
 
65
65
  metadataServiceUrl: Optional[str]
66
66
 
67
+ oauthCallbackPort: Optional[int]
68
+
67
69
  oidcTokenEnv: Optional[str]
68
70
 
69
71
  password: Optional[str]
@@ -120,6 +120,10 @@ class _ExportableConfig(types.ModuleType):
120
120
  def metadata_service_url(self) -> Optional[str]:
121
121
  return __config__.get('metadataServiceUrl')
122
122
 
123
+ @_builtins.property
124
+ def oauth_callback_port(self) -> Optional[int]:
125
+ return __config__.get_int('oauthCallbackPort')
126
+
123
127
  @_builtins.property
124
128
  def oidc_token_env(self) -> Optional[str]:
125
129
  return __config__.get('oidcTokenEnv')
@@ -23,7 +23,6 @@ class ConnectionArgs:
23
23
  def __init__(__self__, *,
24
24
  comment: Optional[pulumi.Input[_builtins.str]] = None,
25
25
  connection_type: Optional[pulumi.Input[_builtins.str]] = None,
26
- environment_settings: Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']] = None,
27
26
  name: Optional[pulumi.Input[_builtins.str]] = None,
28
27
  options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
29
28
  owner: Optional[pulumi.Input[_builtins.str]] = None,
@@ -43,8 +42,6 @@ class ConnectionArgs:
43
42
  pulumi.set(__self__, "comment", comment)
44
43
  if connection_type is not None:
45
44
  pulumi.set(__self__, "connection_type", connection_type)
46
- if environment_settings is not None:
47
- pulumi.set(__self__, "environment_settings", environment_settings)
48
45
  if name is not None:
49
46
  pulumi.set(__self__, "name", name)
50
47
  if options is not None:
@@ -80,15 +77,6 @@ class ConnectionArgs:
80
77
  def connection_type(self, value: Optional[pulumi.Input[_builtins.str]]):
81
78
  pulumi.set(self, "connection_type", value)
82
79
 
83
- @_builtins.property
84
- @pulumi.getter(name="environmentSettings")
85
- def environment_settings(self) -> Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']]:
86
- return pulumi.get(self, "environment_settings")
87
-
88
- @environment_settings.setter
89
- def environment_settings(self, value: Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']]):
90
- pulumi.set(self, "environment_settings", value)
91
-
92
80
  @_builtins.property
93
81
  @pulumi.getter
94
82
  def name(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -159,7 +147,6 @@ class _ConnectionState:
159
147
  created_at: Optional[pulumi.Input[_builtins.int]] = None,
160
148
  created_by: Optional[pulumi.Input[_builtins.str]] = None,
161
149
  credential_type: Optional[pulumi.Input[_builtins.str]] = None,
162
- environment_settings: Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']] = None,
163
150
  full_name: Optional[pulumi.Input[_builtins.str]] = None,
164
151
  metastore_id: Optional[pulumi.Input[_builtins.str]] = None,
165
152
  name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -204,8 +191,6 @@ class _ConnectionState:
204
191
  pulumi.set(__self__, "created_by", created_by)
205
192
  if credential_type is not None:
206
193
  pulumi.set(__self__, "credential_type", credential_type)
207
- if environment_settings is not None:
208
- pulumi.set(__self__, "environment_settings", environment_settings)
209
194
  if full_name is not None:
210
195
  pulumi.set(__self__, "full_name", full_name)
211
196
  if metastore_id is not None:
@@ -303,15 +288,6 @@ class _ConnectionState:
303
288
  def credential_type(self, value: Optional[pulumi.Input[_builtins.str]]):
304
289
  pulumi.set(self, "credential_type", value)
305
290
 
306
- @_builtins.property
307
- @pulumi.getter(name="environmentSettings")
308
- def environment_settings(self) -> Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']]:
309
- return pulumi.get(self, "environment_settings")
310
-
311
- @environment_settings.setter
312
- def environment_settings(self, value: Optional[pulumi.Input['ConnectionEnvironmentSettingsArgs']]):
313
- pulumi.set(self, "environment_settings", value)
314
-
315
291
  @_builtins.property
316
292
  @pulumi.getter(name="fullName")
317
293
  def full_name(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -462,7 +438,6 @@ class Connection(pulumi.CustomResource):
462
438
  opts: Optional[pulumi.ResourceOptions] = None,
463
439
  comment: Optional[pulumi.Input[_builtins.str]] = None,
464
440
  connection_type: Optional[pulumi.Input[_builtins.str]] = None,
465
- environment_settings: Optional[pulumi.Input[Union['ConnectionEnvironmentSettingsArgs', 'ConnectionEnvironmentSettingsArgsDict']]] = None,
466
441
  name: Optional[pulumi.Input[_builtins.str]] = None,
467
442
  options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
468
443
  owner: Optional[pulumi.Input[_builtins.str]] = None,
@@ -820,7 +795,6 @@ class Connection(pulumi.CustomResource):
820
795
  opts: Optional[pulumi.ResourceOptions] = None,
821
796
  comment: Optional[pulumi.Input[_builtins.str]] = None,
822
797
  connection_type: Optional[pulumi.Input[_builtins.str]] = None,
823
- environment_settings: Optional[pulumi.Input[Union['ConnectionEnvironmentSettingsArgs', 'ConnectionEnvironmentSettingsArgsDict']]] = None,
824
798
  name: Optional[pulumi.Input[_builtins.str]] = None,
825
799
  options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
826
800
  owner: Optional[pulumi.Input[_builtins.str]] = None,
@@ -837,7 +811,6 @@ class Connection(pulumi.CustomResource):
837
811
 
838
812
  __props__.__dict__["comment"] = comment
839
813
  __props__.__dict__["connection_type"] = connection_type
840
- __props__.__dict__["environment_settings"] = environment_settings
841
814
  __props__.__dict__["name"] = name
842
815
  __props__.__dict__["options"] = None if options is None else pulumi.Output.secret(options)
843
816
  __props__.__dict__["owner"] = owner
@@ -872,7 +845,6 @@ class Connection(pulumi.CustomResource):
872
845
  created_at: Optional[pulumi.Input[_builtins.int]] = None,
873
846
  created_by: Optional[pulumi.Input[_builtins.str]] = None,
874
847
  credential_type: Optional[pulumi.Input[_builtins.str]] = None,
875
- environment_settings: Optional[pulumi.Input[Union['ConnectionEnvironmentSettingsArgs', 'ConnectionEnvironmentSettingsArgsDict']]] = None,
876
848
  full_name: Optional[pulumi.Input[_builtins.str]] = None,
877
849
  metastore_id: Optional[pulumi.Input[_builtins.str]] = None,
878
850
  name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -920,7 +892,6 @@ class Connection(pulumi.CustomResource):
920
892
  __props__.__dict__["created_at"] = created_at
921
893
  __props__.__dict__["created_by"] = created_by
922
894
  __props__.__dict__["credential_type"] = credential_type
923
- __props__.__dict__["environment_settings"] = environment_settings
924
895
  __props__.__dict__["full_name"] = full_name
925
896
  __props__.__dict__["metastore_id"] = metastore_id
926
897
  __props__.__dict__["name"] = name
@@ -983,11 +954,6 @@ class Connection(pulumi.CustomResource):
983
954
  """
984
955
  return pulumi.get(self, "credential_type")
985
956
 
986
- @_builtins.property
987
- @pulumi.getter(name="environmentSettings")
988
- def environment_settings(self) -> pulumi.Output[Optional['outputs.ConnectionEnvironmentSettings']]:
989
- return pulumi.get(self, "environment_settings")
990
-
991
957
  @_builtins.property
992
958
  @pulumi.getter(name="fullName")
993
959
  def full_name(self) -> pulumi.Output[_builtins.str]: